Add Batch 789ba3d9-366a-4adc-9727-324a090d290a
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/b7bf8615-4c9a-42fc-bd3e-0d822d42f1ab_content_list.json +3 -0
- 2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/b7bf8615-4c9a-42fc-bd3e-0d822d42f1ab_model.json +3 -0
- 2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/b7bf8615-4c9a-42fc-bd3e-0d822d42f1ab_origin.pdf +3 -0
- 2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/full.md +407 -0
- 2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/images.zip +3 -0
- 2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/layout.json +3 -0
- abayesianoptimizationapproachtomachinetranslationreranking/e6893c9d-09ab-4c0e-a429-b12ff05203f6_content_list.json +3 -0
- abayesianoptimizationapproachtomachinetranslationreranking/e6893c9d-09ab-4c0e-a429-b12ff05203f6_model.json +3 -0
- abayesianoptimizationapproachtomachinetranslationreranking/e6893c9d-09ab-4c0e-a429-b12ff05203f6_origin.pdf +3 -0
- abayesianoptimizationapproachtomachinetranslationreranking/full.md +356 -0
- abayesianoptimizationapproachtomachinetranslationreranking/images.zip +3 -0
- abayesianoptimizationapproachtomachinetranslationreranking/layout.json +3 -0
- accessabenchmarkforabstractcausaleventdiscoveryandreasoning/dba615fd-e933-40db-b521-4c7b6a606e65_content_list.json +3 -0
- accessabenchmarkforabstractcausaleventdiscoveryandreasoning/dba615fd-e933-40db-b521-4c7b6a606e65_model.json +3 -0
- accessabenchmarkforabstractcausaleventdiscoveryandreasoning/dba615fd-e933-40db-b521-4c7b6a606e65_origin.pdf +3 -0
- accessabenchmarkforabstractcausaleventdiscoveryandreasoning/full.md +556 -0
- accessabenchmarkforabstractcausaleventdiscoveryandreasoning/images.zip +3 -0
- accessabenchmarkforabstractcausaleventdiscoveryandreasoning/layout.json +3 -0
- accordclosingthecommonsensemeasurabilitygap/51b70a61-a300-4946-bd40-f5512a80d45c_content_list.json +3 -0
- accordclosingthecommonsensemeasurabilitygap/51b70a61-a300-4946-bd40-f5512a80d45c_model.json +3 -0
- accordclosingthecommonsensemeasurabilitygap/51b70a61-a300-4946-bd40-f5512a80d45c_origin.pdf +3 -0
- accordclosingthecommonsensemeasurabilitygap/full.md +0 -0
- accordclosingthecommonsensemeasurabilitygap/images.zip +3 -0
- accordclosingthecommonsensemeasurabilitygap/layout.json +3 -0
- acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/4d3753eb-da4e-4fdc-b38d-da331bf2b58e_content_list.json +3 -0
- acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/4d3753eb-da4e-4fdc-b38d-da331bf2b58e_model.json +3 -0
- acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/4d3753eb-da4e-4fdc-b38d-da331bf2b58e_origin.pdf +3 -0
- acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/full.md +558 -0
- acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/images.zip +3 -0
- acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/layout.json +3 -0
- activefewshotlearningfortextclassification/1f71a233-7606-4149-981f-d07f210615f6_content_list.json +3 -0
- activefewshotlearningfortextclassification/1f71a233-7606-4149-981f-d07f210615f6_model.json +3 -0
- activefewshotlearningfortextclassification/1f71a233-7606-4149-981f-d07f210615f6_origin.pdf +3 -0
- activefewshotlearningfortextclassification/full.md +512 -0
- activefewshotlearningfortextclassification/images.zip +3 -0
- activefewshotlearningfortextclassification/layout.json +3 -0
- adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/ccbfbf24-6776-43af-9715-49834a214263_content_list.json +3 -0
- adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/ccbfbf24-6776-43af-9715-49834a214263_model.json +3 -0
- adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/ccbfbf24-6776-43af-9715-49834a214263_origin.pdf +3 -0
- adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/full.md +471 -0
- adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/images.zip +3 -0
- adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/layout.json +3 -0
- adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/8b39b034-d377-424a-a0d3-f7fefaf33c8d_content_list.json +3 -0
- adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/8b39b034-d377-424a-a0d3-f7fefaf33c8d_model.json +3 -0
- adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/8b39b034-d377-424a-a0d3-f7fefaf33c8d_origin.pdf +3 -0
- adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/full.md +382 -0
- adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/images.zip +3 -0
- adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/layout.json +3 -0
- adaptingsentencelevelautomaticmetricsfordocumentlevelsimplificationevaluation/62862604-1763-4a1b-a361-a1046b1ed4b2_content_list.json +3 -0
- adaptingsentencelevelautomaticmetricsfordocumentlevelsimplificationevaluation/62862604-1763-4a1b-a361-a1046b1ed4b2_model.json +3 -0
2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/b7bf8615-4c9a-42fc-bd3e-0d822d42f1ab_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d69bccfa2733c6af47b46569b09cd4c45d1c13fb3989dc2d0ea9ef5d25597549
|
| 3 |
+
size 78067
|
2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/b7bf8615-4c9a-42fc-bd3e-0d822d42f1ab_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:018dd9268d2989e5d4ffd93126b4b0d5a3f75034d88efe4865fd6f77d6c3d22a
|
| 3 |
+
size 89467
|
2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/b7bf8615-4c9a-42fc-bd3e-0d822d42f1ab_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fb9071f2b3218539d317710d79dae37ddaf9a3dad2fd7402b497aede5435a9dd
|
| 3 |
+
size 1668373
|
2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/full.md
ADDED
|
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# $\mathcal{S}^2\mathrm{IT}$ : Stepwise Syntax Integration Tuning for Large Language Models in Aspect Sentiment Quad Prediction
|
| 2 |
+
|
| 3 |
+
Bingfeng Chen $^{1,2}$ , Chenjie Qiu $^{1}$ , Yifeng Xie $^{1}$ , Boyan Xu $^{1*}$ , Ruichu Cai $^{1,3}$ , Zhifeng Hao $^{1,4}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ School of Computer Science, Guangdong University of Technology $^{2}$ Guangdong Laboratory of Artificial Intelligence and Digital Economy (SZ)
|
| 6 |
+
|
| 7 |
+
$^{3}$ Peng Cheng Laboratory
|
| 8 |
+
|
| 9 |
+
<sup>4</sup>College of Science, Shantou University
|
| 10 |
+
|
| 11 |
+
chenbf@gdut.edu.cn
|
| 12 |
+
|
| 13 |
+
{yausankit, evfxie, hpakyim, cairuichu}@gmail.com
|
| 14 |
+
|
| 15 |
+
haozhifeng@stu.edu.cn
|
| 16 |
+
|
| 17 |
+
# Abstract
|
| 18 |
+
|
| 19 |
+
Aspect Sentiment Quad Prediction (ASQP) has seen significant advancements, largely driven by the powerful semantic understanding and generative capabilities of large language models (LLMs). However, while syntactic structure information has been proven effective in previous extractive paradigms, it remains underutilized in the generative paradigm of LLMs due to their limited reasoning capabilities. In this paper, we propose $S^2$ IT, a novel Stepwise Syntax Integration Tuning framework that progressively integrates syntactic structure knowledge into LLMs through a multi-step tuning process. The training process is divided into three steps. $S^2$ IT decomposes the quadruple generation task into two stages: 1) Global Syntax-guided Extraction and 2) Local Syntax-guided Classification, integrating both global and local syntactic structure information. Finally, Fine-grained Structural Tuning enhances the model's understanding of syntactic structures through the prediction of element links and node classification. Experiments demonstrate that $S^2$ IT significantly improves state-of-the-art performance across multiple datasets. Our implementation will be open-sourced at https://github.com/DMIRLAB-Group/S2IT.
|
| 20 |
+
|
| 21 |
+
# 1 Introduction
|
| 22 |
+
|
| 23 |
+
Aspect Sentiment Quad Prediction (ASQP) focuses on predicting tuples of sentiment-related elements from a given text (Zhang et al., 2022). These elements consist of four components central to ASQP: aspect term (a), aspect category (c), opinion term (o), and sentiment polarity (s). For example, in the sentence "I really love sushi!", the corresponding elements are "sushi" (aspect term), "food quality" (aspect category), "love" (opinion term), and "positive" (sentiment polarity). Although ASQP is fun-
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 1: An example of the ASQP task. The most notable feature that distinguishes $S^2$ IT from previous work is that it decomposes the quadruples and injects syntactic information from different perspectives step by step into the large language model.
|
| 27 |
+
|
| 28 |
+
damentally an extractive task, the ability to generalize and generate responses has made fine-tuning models like T5 (Raffel et al., 2019) a mainstream approach, as demonstrated by Zhang et al. (2021).
|
| 29 |
+
|
| 30 |
+
Recent approaches to improving language models' performance have incorporated syntactic structure knowledge, typically divided into encoder-only and encoder-decoder. For instance, encoder-only approaches (Liang et al., 2022; Li et al., 2023b; Chen et al., 2024) enhance the connections between elements at the representation level through Graph Neural Networks (GNNs). For encoder-decoder approaches, a key challenge is how to add syntactic structure knowledge to language models. Li et al. (2023a) and Yu et al. (2023) incorporates GNNs into the encoder layer of T5 and BART. These approaches are limited by their reliance on GNN integration, which does not extend well to decoder-only language models.
|
| 31 |
+
|
| 32 |
+
In this paper, we propose $S^2\mathrm{IT}$ , which introduces a novel Stepwise Syntax Integration Tuning framework. Specifically, we decompose the complex
|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
Figure 2: $S^2$ IT framework illustration. (a) shows how we serialize the dependency tree into natural language. (b) shows the main part of our framework and two structure instruction tuning tasks.
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
|
| 43 |
+
task of generating sentiment quadruples into two stages: Global Syntax-guided Extraction and Local Syntax-guided Classification. First, Global Syntax-guided Extraction incorporates global syntactic knowledge to identify aspects and opinions. Second, Local Syntax-guided Classification classifies opinions into sentiment and category by utilizing the local syntactic relationships between aspects and opinions. As shown in Figure 1, the incorporation of syntactic information effectively helps $S^2$ IT mitigate the influence of multi-aspect terms.
|
| 44 |
+
|
| 45 |
+
We also introduce Fine-grained Structural Tuning to improve LLMs' ability to understand and use structural knowledge for reasoning. Our main contributions are as follows: (1) We propose $S^2 \mathrm{IT}$ , a novel Stepwise Syntax Integration Tuning framework that progressively integrates syntactic structure knowledge into large language models (LLMs) through a multi-step tuning process. (2) $S^2 \mathrm{IT}$ effectively learns sentiment element relationships by leveraging syntactic structure at different granularities and local levels through three stages: Global Syntax-guided Extraction, Local Syntax-guided Classification, and Fine-grained Structural Tuning. (3) Experiments demonstrate that $S^2 \mathrm{IT}$ achieves the state-of-the-art on the restaurant and laptop datasets.
|
| 46 |
+
|
| 47 |
+
# 2 Methodology
|
| 48 |
+
|
| 49 |
+
We first formulate the task of ASQP (§2.1). As shown in Figure 2, we then present our $S^2\mathrm{IT}$ , which was enhanced with the syntax information. It includes the extraction task (§2.2), classification task (§2.3), and structure instruction tuning (§2.4).
|
| 50 |
+
|
| 51 |
+
# 2.1 Problem Formulation
|
| 52 |
+
|
| 53 |
+
For a given sentence $x = \{x_{1}, x_{2}, \ldots, x_{n}\}$ , ASQP aims to predict all the aspect-level sentiment quadruples $\{(a_{j}, o_{j}, c_{j}, s_{j})\}_{j=1}^{N}$ , which represent the aspect term, opinion term, category term and sentiment polarity, respectively. The aspect category $c$ belongs to a predefined set of categories, and the sentiment polarity $s$ falls into one of three categories: negative, positive, and neutral.
|
| 54 |
+
|
| 55 |
+
# 2.2 Global Syntax-guided Extraction
|
| 56 |
+
|
| 57 |
+
Although promising, current methods fall short of fully leveraging the additional syntactic features available. Our objective is to extract potential aspect-opinion pairs $\mathcal{P} = \{\langle a_j,o_j\rangle \}_{j = 1}^N$ from sentences, We can introduce syntactic information to help the model discover relationships between the elements.
|
| 58 |
+
|
| 59 |
+
As an example, we consider the injection of modifier relationships between elements into LLMs. We utilize the template $\mathcal{E}$ to delineate the type of relation found in the dependency tree. Assumed that $x_{i}$ serves as the head node for $x_{h}$ , we document the details of this syntactic structure as follows:
|
| 60 |
+
|
| 61 |
+
$$
|
| 62 |
+
\mathcal {E} _ {i \in \{1, \dots , n \}} = \left\{x _ {i} \right\} \text {m o d i f y} \left\{x _ {h} \right\} \tag {1}
|
| 63 |
+
$$
|
| 64 |
+
|
| 65 |
+
Afterward, we concatenate all pairs according to the order of their occurrence within the sentence to create a natural language description of the global structure. This description is subsequently appended to the original sentence, enhancing the syntactic context. Furthermore, we implement instruction tuning using the instruction $\mathcal{I}_1$ and the
|
| 66 |
+
|
| 67 |
+
output $\mathcal{O}_1$ . The process is as follows:
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
\mathcal {O} _ {1, j \in \{1, \dots , N \}} = \text {a s p e c t}: \left\{a _ {j} \right\}, \text {o p i n i o n}: \left\{o _ {j} \right\} \tag {2}
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
The objective of supervised fine-tuning is to minimize the loss defined as:
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\min _ {M ^ {*}} \mathcal {L} \left(M ^ {*} \left(\mathcal {I} _ {1}, x, \mathcal {E} _ {1} | \dots | \mathcal {E} _ {n}, \mathcal {O} _ {1, 1} | \dots | \mathcal {O} _ {1, N}\right)\right) \tag {3}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
# 2.3 Local Syntax-guided Classification
|
| 80 |
+
|
| 81 |
+
Focusing not only on the global syntactic structure, which has been shown to enhance performance (Cheng et al., 2023; Xie et al., 2023), but also paying attention to the local syntactic structure is crucial (Liang et al., 2022; Chen et al., 2024). After obtaining the correct pairs $\mathcal{P}$ , treating these pairs as central nodes in the dependency tree, we extract neighboring words as subgraphs. For example, the description of a subgraph $S$ centered on $a_{j}$ including at most one-hop neighbor details, is:
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
\begin{array}{l} \mathcal {S} _ {a _ {j \in \{1, \dots , N \}}} ^ {(1)} = \left\{a _ {j} \right\} \text {i s c o n n e c t e d w i t h} \\ \{v \mid \mathcal {A} _ {a _ {j} v} = 1 \} \text {w i t h i n o n e h o p .} \tag {4} \\ \end{array}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
where $\mathcal{A}$ represents the adjacency matrix and $\{v\mid$ $\mathcal{A}_{a_jv} = 1\}$ denotes all one-hop neighbors of $a_{j}$ in the dependency tree. The formula also holds true for $o_j$ and $S_{o_j}^{(1)}$
|
| 88 |
+
|
| 89 |
+
Subsequently, we also concatenate this local syntactic information with the original sentence, integrating the output $\mathcal{O}_2$ and the instruction $\mathcal{I}_2$ . This integrated data is then processed and tuned to optimize model performance further:
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\begin{array}{l} \mathcal {O} _ {2, j \in \{1, \dots , N \}} = \text {a s p e c t}: \left\{a _ {j} \right\}, \text {o p i n i o n}: \left\{o _ {j} \right\} \\ \text {c a t e g o r y}: \{c _ {j} \}, \text {s e n t i m e n t}: \{s _ {j} \} \tag {5} \\ \end{array}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
We then fine-tune the LLMs while minimizing the following loss function:
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
\begin{array}{l} \min _ {M ^ {*}} \mathcal {L} (M ^ {*} (\mathcal {I} _ {2}, x, \mathcal {S} _ {a _ {1}} ^ {(1)}, \mathcal {S} _ {o _ {1}} ^ {(1)} | \dots | \mathcal {S} _ {a _ {N}} ^ {(1)}, \mathcal {S} _ {o _ {N}} ^ {(1)}, \mathcal {P}, \\ \left. \mathcal {O} _ {2, 1} \mid \dots \mid \mathcal {O} _ {2, N}\right)) \tag {6} \\ \end{array}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
# 2.4 Fine-grained Structure Instruction Tuning
|
| 102 |
+
|
| 103 |
+
Element Link Prediction. To tailor our model to specialized structured knowledge tasks, we introduce the auxiliary task about element link prediction. Specifically, we developed a structure-aware
|
| 104 |
+
|
| 105 |
+
matching task that presents genuine sentiment elements (e.g., aspects) to LLMs and uses their corresponding sentiment elements (e.g., opinions) as labels. This approach guides the model to accurately associate them based on the provided sentences and syntactic information. Such alignment not only enhances the model's accuracy but also its capability to manage complex relationships within the data.
|
| 106 |
+
|
| 107 |
+
Node Classification. Building upon our method, we introduce an auxiliary task focused on sentiment element classification. This task is integral to our structure-aware strategy, directing the model to accurately assign labels to each sentiment element (e.g., sentiment polarity or other relevant categories) within the structural framework. Through this task, the model effectively learns semantic information and local syntactic structures, thereby enhancing its capability to comprehend and analyze intricate language nuances.
|
| 108 |
+
|
| 109 |
+
These tasks are integral to our fine-grained structure instruction tuning method. All our instruction prompts are summarized in Appendix A.2.
|
| 110 |
+
|
| 111 |
+
# 3 Experiments
|
| 112 |
+
|
| 113 |
+
# 3.1 Datasets
|
| 114 |
+
|
| 115 |
+
We validate our methods on Restaurant and Laptop datasets (Cai et al., 2021a). There are 2,286 sentences in Restaurant domain, and 4,076 sentences in Laptop domain. Following the setting from Cai et al. (2021a), we divide the original dataset into a training set, a validation set, and a testing set.
|
| 116 |
+
|
| 117 |
+
# 3.2 Implement Details
|
| 118 |
+
|
| 119 |
+
We used Qwen2.5-7B-Instruct<sup>1</sup> and Qwen2.5-32B-Instruct<sup>2</sup> as the base language model. In addition, to verify how well our framework works on different LLMs, we also conducted ablation experiments on Llama3-8B-instruct<sup>3</sup> in (§3.4). The model was trained with an initial learning rate of 5e-5 for 5 epochs. We set the batch size to 4 and used a gradient accumulation of 2. We applied LoRA (Hu et al., 2021) with a LoRA rank of 32 for efficient fine-tuning, allowing our 7B model to be trained and inferred on a single NVIDIA GeForce RTX
|
| 120 |
+
|
| 121 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">Restaurant</td><td colspan="2">Laptop</td></tr><tr><td>R</td><td>F1</td><td>R</td><td>F1</td></tr><tr><td colspan="5">• Extractive-based methods</td></tr><tr><td>TAS-BERT(Wan et al., 2020)</td><td>46.3</td><td>33.5</td><td>19.2</td><td>27.3</td></tr><tr><td>Extract-Classify(Cai et al., 2021b)</td><td>52.9</td><td>44.6</td><td>29.4</td><td>35.8</td></tr><tr><td>One-ASQP(Zhou et al., 2023)</td><td>56.2</td><td>60.6</td><td>39.5</td><td>41.5</td></tr><tr><td colspan="5">• Generative-based methods</td></tr><tr><td>Paraphrase(Zhang et al., 2021)</td><td>59.8</td><td>59.8</td><td>42.5</td><td>43.0</td></tr><tr><td>DLO(Hu et al., 2022)</td><td>59.8</td><td>59.9</td><td>43.8</td><td>43.6</td></tr><tr><td>+AToss(Seo et al., 2024)</td><td>59.9</td><td>60.5</td><td>43.9</td><td>44.5</td></tr><tr><td>MvP(Gou et al., 2023)</td><td>57.8</td><td>59.5</td><td>43.6</td><td>43.7</td></tr><tr><td>+AToss(Seo et al., 2024)</td><td>58.3</td><td>60.6</td><td>43.1</td><td>44.2</td></tr><tr><td>qwen2.5-7B</td><td>64.1</td><td>63.6</td><td>44.7</td><td>44.6</td></tr><tr><td>S²IT-7B</td><td>64.9</td><td>66.1</td><td>44.7</td><td>45.9</td></tr><tr><td>qwen2.5-32B</td><td>65.0</td><td>64.2</td><td>45.0</td><td>45.3</td></tr><tr><td>S²IT-32B</td><td>66.6</td><td>67.37</td><td>45.4</td><td>46.7</td></tr></table>
|
| 122 |
+
|
| 123 |
+
3090 24G. All training details, including hyperparameters, will be presented in Appendix A.1.
|
| 124 |
+
|
| 125 |
+
# 3.3 Main results
|
| 126 |
+
|
| 127 |
+
Our method significantly outperforms previous state-of-the-art baselines in terms of F1 scores across five datasets in supervised settings as shown in Table 1, becoming SOTA for all these tasks. In particular, our performance on the restaurant dataset of the challenging ACOS task exceeds the baseline model by 2.71 points. This improvement is attributed to our two-step framework, which effectively reduces the coupling in the tuple generation task, helping the model extract correct aspect-opinion pairs from the sentence and make accurate classifications from both syntactic and semantic perspectives.
|
| 128 |
+
|
| 129 |
+
# 3.4 Ablation Experiment.
|
| 130 |
+
|
| 131 |
+
Effect Analysis of syntax integration. To verify the impact of syntax integration, we conducted additional experiments to evaluate the effectiveness of syntax information in our framework. Table 2 compares the base model (qwen2.5-7B), the full $S^2$ IT framework, and $S^2$ IT with syntax information entirely removed. The results highlight the importance of syntax information in achieving superior performance.
|
| 132 |
+
|
| 133 |
+
Effect Analysis of Syntax Imformation. Global structural information effectively helps the model understand the syntactic relationship between as
|
| 134 |
+
|
| 135 |
+
pects and opinions in a sentence. As shown in Figure 3, incorporating global syntactic features significantly improved the model's performance. We observed that, compared to the model without syntactic injection, our approach increased the F1 score by $2.9\%$ on the Restaurant dataset and by $3.1\%$ on the Laptop dataset in Step 1, proving the importance of global structure in handling complex dependencies. Local structure helps the model capture fine-grained details by aggregating local syntactic information. In Step 2, incorporating local structure improved the F1 score by $1.4\%$ on the Restaurant dataset and $2.2\%$ on the Laptop dataset. Thus, understanding nearby sentiment elements significantly enhances the model's ability significantly to perform accurate sentiment classification.
|
| 136 |
+
|
| 137 |
+
Table 1: Recall and F1 scores $(\%)$ on the Restaurant and Laptop datasets compared to other baselines.
|
| 138 |
+
|
| 139 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">Restaurant</td><td colspan="2">Laptop</td></tr><tr><td>R</td><td>F1</td><td>R</td><td>F1</td></tr><tr><td>qwen2.5-7B</td><td>64.1</td><td>63.6</td><td>44.7</td><td>44.6</td></tr><tr><td>S²IT-7B</td><td>64.9</td><td>66.1</td><td>45.0</td><td>45.9</td></tr><tr><td>w/o syntax information</td><td>63.0</td><td>64.8</td><td>43.6</td><td>44.8</td></tr></table>
|
| 140 |
+
|
| 141 |
+
Table 2: Full $S^2\mathrm{IT}$ framework, with Fine-grained Structural Tuning, but completely excluding syntax information
|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
Figure 3: The impact of syntactic information at each stage on the Laptop and Restaurant datasets. Step 1 demonstrated the improvements that syntactic information brings to the extraction of $\mathcal{P}$ , Step 2 showed how much the model depends on syntactic information when it has the correct $\mathcal{P}$ .
|
| 145 |
+
|
| 146 |
+
Effect Analysis of Structure Instruction Tuning. To validate the effectiveness of structural instruction tuning, we conducted ablation experiments on two datasets using two different LLMs, focusing on the impact of element linking and node classification on the model's ability to understand structural information. As shown in Table 3, the effectiveness of our tasks was confirmed. We observed a significant performance drop across both models on different datasets after removing all structural instruction tuning tasks. This result indicates that
|
| 147 |
+
|
| 148 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">Restaurant</td><td colspan="2">Laptop</td></tr><tr><td>R</td><td>F1</td><td>R</td><td>F1</td></tr><tr><td>S2IT+qwen2.5(7B)</td><td>64.9</td><td>66.1</td><td>44.7</td><td>45.9</td></tr><tr><td>w/o Element Link</td><td>63.7</td><td>65.3</td><td>43.4</td><td>44.7</td></tr><tr><td>w/o Node Classification</td><td>63.1</td><td>65.0</td><td>43.3</td><td>44.5</td></tr><tr><td>w/o both</td><td>62.7</td><td>64.3</td><td>42.9</td><td>44.5</td></tr><tr><td>S2IT+llama3(8B)</td><td>64.8</td><td>66.0</td><td>44.2</td><td>45.7</td></tr><tr><td>w/o Element Link</td><td>62.7</td><td>64.9</td><td>43.6</td><td>44.8</td></tr><tr><td>w/o Node Classification</td><td>63.5</td><td>65.8</td><td>42.3</td><td>43.9</td></tr><tr><td>w/o both</td><td>62.8</td><td>64.3</td><td>43.4</td><td>44.6</td></tr></table>
|
| 149 |
+
|
| 150 |
+
Table 3: The ablation experiments on Structure Instruction Tuning.
|
| 151 |
+
|
| 152 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">Restaurant</td><td colspan="2">Laptop</td></tr><tr><td>R</td><td>F1</td><td>R</td><td>F1</td></tr><tr><td>qwen2.5-7B</td><td>64.1</td><td>63.6</td><td>44.7</td><td>44.6</td></tr><tr><td>qwen2.5-7B w/ nl-syn</td><td>65.3</td><td>64.4</td><td>45.0</td><td>44.9</td></tr><tr><td>qwen2.5-7B w/ symbol-syn</td><td>61.9</td><td>61.0</td><td>42.8</td><td>42.9</td></tr></table>
|
| 153 |
+
|
| 154 |
+
Table 4: Performance Comparison of Different Dependency Tree Templates on Restaurant and Laptop Datasets. Here, nl-syn refers to dependency tree syntactic information described using natural language templates, while symbol-syn refers to dependency tree syntactic information presented in the traditional bracketed tree format.
|
| 155 |
+
|
| 156 |
+
the structural instruction tuning tasks effectively enhance the model's understanding of hierarchical structures, especially when handling tasks like complex sentiment classification task.
|
| 157 |
+
|
| 158 |
+
Effect of Structural Prompt Templates. To further validate our structural prompt templates, we designed an interesting experiment comparing the overall impact of dependency tree templates composed of natural language (nl-syn) versus classical dependency tree templates composed of symbols (symbol-syn) on our method. The results in Table 4 reveal that symbol-based templates significantly degrade performance. We attribute this decline to the inconsistency between symbol templates and the pretraining objectives of LLMs, which adversely affects downstream task performance. This experiment demonstrates the rationale for converting syntactic information into natural language to better align with the capabilities of LLMs.
|
| 159 |
+
|
| 160 |
+
# 4 Related Work
|
| 161 |
+
|
| 162 |
+
Extractive Methods Previous methods leverage language models such as BERT to generate word-
|
| 163 |
+
|
| 164 |
+
level textual embeddings and train additional classification heads (Wan et al., 2020). Cai et al. (2021b); Zhou et al. (2023) employ a joint multitask sequence labeling approach to extract elements within tuples. However, their specially designed extraction framework exhibits poor generalization across various ABSA tasks. Consequently, unified generative methods have become the current mainstream.
|
| 165 |
+
|
| 166 |
+
Generative Methods Recent approaches have favored designing a unified end-to-end framework for performing ABSA tasks using generative language models. In essence, these methods serialize target tuples into natural language and employ them as labels to fine-tune the language models (Zhang et al., 2021).
|
| 167 |
+
|
| 168 |
+
Depending on the task requirements, sentiment tuples can be transformed into various sequences for training. Yan et al. (2021) use the indices of the target sentiment elements within a sentence as labels to guide the model in generating the indices of aspects and opinions. Bao et al. (2022, 2023) introduced a tree generation template that guides the model in capturing the semantic relationships between sentiment elements and utilizes a generative model to extract linearized trees.
|
| 169 |
+
|
| 170 |
+
A key challenge is how to add syntactic structure knowledge to language models. Li et al. (2023a) and Yu et al. (2023) incorporates GNNs into the encoder layer of T5 and BART. These methods all face the challenge of incorporating syntactic structural information into decoder-only models. Our framework effectively integrates syntactic knowledge into decoder-only models in a seamless and fluent manner, without requiring any structural modifications to the models.
|
| 171 |
+
|
| 172 |
+
# 5 Conclusion
|
| 173 |
+
|
| 174 |
+
In this paper, we presented $S^2\mathrm{IT}$ , a novel framework designed to enhance Aspect Sentiment Quad Prediction (ASQP) by incorporating syntactic structure knowledge into large language models (LLMs) through a multi-step tuning process. The core idea of our framework is to simultaneously decompose quadruple prediction and syntactic structure learning during the multi-step tuning process of language models. The significant performance gains and adaptability to different language models further validate the effectiveness of our fine-tuning framework.
|
| 175 |
+
|
| 176 |
+
# Limitations
|
| 177 |
+
|
| 178 |
+
While our framework achieved state-of-the-art results on the Restaurant and Laptop datasets, there is still room for improvement in constructing syntactic information and designing structural instruction tuning tasks. We have only implemented classic structure-aware tasks such as link prediction and node classification, yet these have significantly boosted performance. Nonetheless, enhancing large language models (LLMs) to better understand and reason about structural relationships remains an ongoing challenge.
|
| 179 |
+
|
| 180 |
+
# Acknowledgments
|
| 181 |
+
|
| 182 |
+
This research was financially supported by the Open Research Fund from Guangdong Laboratory of Artificial Intelligence and Digital Economy (SZ), under Grant No. GML-KF-24-23, National Science and Technology Major Project (2021ZD0111501), National Science Fund for Excellent Young Scholars (62122022), Natural Science Foundation of China (62406078, 62476163, U24A20233), the major key project of PCL (PCL2021A12), Guangdong Basic and Applied Basic Research Foundation (2023B1515120020), Collaborative Education Project of the Ministry of Education (202407). This research was enabled by the computational resources and support of the High Performance Computing Platform at the School of Computer Science, Guangdong University of Technology.
|
| 183 |
+
|
| 184 |
+
# References
|
| 185 |
+
|
| 186 |
+
Xiaoyi Bao, Zhongqing Wang, and Guodong Zhou. 2023. Exploring graph pre-training for aspect-based sentiment analysis. In The 2023 Conference on Empirical Methods in Natural Language Processing.
|
| 187 |
+
Xiaoyi Bao, Wang Zhongqing, Xiaotong Jiang, Rong Xiao, and Shoushan Li. 2022. Aspect-based sentiment analysis with opinion tree generation. In Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22, pages 4044-4050. International Joint Conferences on Artificial Intelligence Organization. Main Track.
|
| 188 |
+
Hongjie Cai, Rui Xia, and Jianfei Yu. 2021a. Aspect-category-opinion-sentiment quadruple extraction with implicit aspects and opinions. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 340-350, Online. Association for Computational Linguistics.
|
| 189 |
+
|
| 190 |
+
Hongjie Cai, Rui Xia, and Jianfei Yu. 2021b. Aspect-category-opinion-sentiment quadruple extraction with implicit aspects and opinions. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 340-350.
|
| 191 |
+
Bingfeng Chen, Qihan Ouyang, Yongqi Luo, Boyan Xu, Ruichu Cai, and Zhifeng Hao. 2024. $\mathbf{S}^2\mathbf{g}\mathbf{s}\mathbf{l}$ : Incorporating segment to syntactic enhanced graph structure learning for aspect-based sentiment analysis. Preprint, arXiv:2406.02902.
|
| 192 |
+
Xuxin Cheng, Zhihong Zhu, Bowen Cao, Qichen Ye, and Yuexian Zou. 2023. MRRL: Modifying the reference via reinforcement learning for non-autoregressive joint multiple intent detection and slot filling. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 10495-10505, Singapore. Association for Computational Linguistics.
|
| 193 |
+
Zhibin Gou, Qingyan Guo, and Yujiu Yang. 2023. Mvp: Multi-view prompting improves aspect sentiment tuple prediction. Preprint, arXiv:2305.12627.
|
| 194 |
+
Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. Preprint, arXiv:2106.09685.
|
| 195 |
+
Mengting Hu, Yike Wu, Hang Gao, Yinhao Bai, and Shiwan Zhao. 2022. Improving aspect sentiment quad prediction via template-order data augmentation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 7889-7900, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.
|
| 196 |
+
Jinyang Li, Binyuan Hui, Reynold Cheng, Bowen Qin, Chenhao Ma, Nan Huo, Fei Huang, Wenyu Du, Luo Si, and Yongbin Li. 2023a. Graphix-t5: Mixing pretrained transformers with graph-aware layers for text-to-sql parsing. arXiv preprint arXiv:2301.07507.
|
| 197 |
+
Pan Li, Ping Li, and Kai Zhang. 2023b. Dual-channel span for aspect sentiment triplet extraction. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 248-261, Singapore. Association for Computational Linguistics.
|
| 198 |
+
Shuo Liang, Wei Wei, Xian-Ling Mao, Fei Wang, and Zhiyong He. 2022. BiSyn-GAT+: Bi-syntax aware graph attention network for aspect-based sentiment analysis. In *Findings of the Association for Computational Linguistics: ACL* 2022, pages 1835-1848, Dublin, Ireland. Association for Computational Linguistics.
|
| 199 |
+
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2019. Exploring the limits of transfer learning with a unified text-to-text transformer. Preprint, arXiv:1910.10683.
|
| 200 |
+
|
| 201 |
+
Yongsik Seo, Sungwon Song, Ryang Heo, Jieyong Kim, and Dongha Lee. 2024. Make compound sentences simple to analyze: Learning to split sentences for aspect-based sentiment analysis. Preprint, arXiv:2410.02297.
|
| 202 |
+
|
| 203 |
+
Hai Wan, Yufei Yang, Jianfeng Du, Yanan Liu, Kunxun Qi, and Jeff Z. Pan. 2020. Target-aspect-sentiment joint detection for aspect-based sentiment analysis. Proceedings of the AAAI Conference on Artificial Intelligence, 34(05):9122-9129.
|
| 204 |
+
|
| 205 |
+
Yifeng Xie, Zhihong Zhu, Xuxin Cheng, Zhiqi Huang, and Dongsheng Chen. 2023. Syntax matters: Towards spoken language understanding via syntax-aware attention. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, Singapore. Association for Computational Linguistics.
|
| 206 |
+
|
| 207 |
+
Xiaohan Xu, Chongyang Tao, Tao Shen, Can Xu, Hongbo Xu, Guodong Long, Jian-guang Lou, and Shuai Ma. 2024. Re-reading improves reasoning in large language models.
|
| 208 |
+
|
| 209 |
+
Hang Yan, Junqi Dai, Tuo ji, Xipeng Qiu, and Zheng Zhang. 2021. A unified generative framework for aspect-based sentiment analysis. Preprint, arXiv:2106.04300.
|
| 210 |
+
|
| 211 |
+
Chengze Yu, Taiqiang Wu, Jiayi Li, Xingyu Bai, and Yujiu Yang. 2023. Syngen: A syntactic plug-and-play module for generative aspect-based sentiment analysis. Preprint, arXiv:2302.13032.
|
| 212 |
+
|
| 213 |
+
Wenxuan Zhang, Yang Deng, Xin Li, Yifei Yuan, Li-dong Bing, and Wai Lam. 2021. Aspect sentiment quad prediction as paraphrase generation. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 9209-9219. Association for Computational Linguistics.
|
| 214 |
+
|
| 215 |
+
Wenxuan Zhang, Xin Li, Yang Deng, Lidong Bing, and Wai Lam. 2022. A survey on aspect-based sentiment analysis: Tasks, methods, and challenges. *Preprint*, arXiv:2203.01054.
|
| 216 |
+
|
| 217 |
+
Junxian Zhou, Haiqin Yang, Yuxuan He, Hao Mou, and Junbo Yang. 2023. A unified one-step solution for aspect sentiment quad prediction. Preprint, arXiv:2306.04152.
|
| 218 |
+
|
| 219 |
+
Shen Zhou and Tieyun Qian. 2023. On the strength of sequence labeling and generative models for aspect sentiment triplet extraction. In *Findings of the Association for Computational Linguistics: ACL* 2023, pages 12038–12050, Toronto, Canada. Association for Computational Linguistics.
|
| 220 |
+
|
| 221 |
+
# Appendix
|
| 222 |
+
|
| 223 |
+
# A Training
|
| 224 |
+
|
| 225 |
+
# A.1 Details of Hyperparameters
|
| 226 |
+
|
| 227 |
+
In this part, we outline the key hyperparameters used for fine-tuning the three different LLMs in our
|
| 228 |
+
|
| 229 |
+
experiments: Qwen2.5-7B-instruct, Qwen2.5-32B-instruct and Llama3-8B-instruct. The 32B model was trained on an NVIDIA A100 80G. LoRA was applied to fine-tune the models, with ranks set to 16, or 32, and alpha values set to either 32. Dropout rates were adjusted between 0.1 to mitigate overfitting.
|
| 230 |
+
|
| 231 |
+
<table><tr><td>Hyperparameter</td><td>Value</td></tr><tr><td>Learning Rate</td><td>5e-5</td></tr><tr><td>Num Train Epochs</td><td>5</td></tr><tr><td>LoRA Rank</td><td>32</td></tr><tr><td>LoRA Alpha</td><td>32</td></tr><tr><td>LoRA Dropout</td><td>0.1</td></tr><tr><td>Batch Size</td><td>4</td></tr><tr><td>Gradient Accumulation Steps</td><td>2</td></tr><tr><td>LR Scheduler Type</td><td>cosine</td></tr></table>
|
| 232 |
+
|
| 233 |
+
Table 5: Hyperparameters used in the Qwen2.5-7B-instruct.
|
| 234 |
+
|
| 235 |
+
<table><tr><td>Hyperparameter</td><td>Value</td></tr><tr><td>Learning Rate</td><td>5e-5</td></tr><tr><td>Num Train Epochs</td><td>5</td></tr><tr><td>LoRA Rank</td><td>32</td></tr><tr><td>LoRA Alpha</td><td>32</td></tr><tr><td>LoRA Dropout</td><td>0.1</td></tr><tr><td>Batch Size</td><td>2</td></tr><tr><td>Gradient Accumulation Steps</td><td>4</td></tr><tr><td>LR Scheduler Type</td><td>cosine</td></tr></table>
|
| 236 |
+
|
| 237 |
+
Table 6: Hyperparameters used in the Qwen2.5-32B-instruct.
|
| 238 |
+
|
| 239 |
+
<table><tr><td>Hyperparameter</td><td>Value</td></tr><tr><td>Learning Rate</td><td>5e-5</td></tr><tr><td>Num Train Epochs</td><td>5</td></tr><tr><td>LoRA Rank</td><td>16</td></tr><tr><td>LoRA Alpha</td><td>32</td></tr><tr><td>LoRA Dropout</td><td>0.1</td></tr><tr><td>Batch Size</td><td>4</td></tr><tr><td>Gradient Accumulation Steps</td><td>2</td></tr><tr><td>LR Scheduler Type</td><td>cosine</td></tr></table>
|
| 240 |
+
|
| 241 |
+
Table 7: Hyperparameters used in the Llama3-8B-instruct.
|
| 242 |
+
|
| 243 |
+
# A.2 Details of Instruction Tuning
|
| 244 |
+
|
| 245 |
+
We provide comprehensive details of the instruction tuning process. Specifically, we present examples of tasks used during Supervised Fine-Tuning
|
| 246 |
+
|
| 247 |
+
(SFT). These examples illustrate the types of instructions and responses the model was trained on, highlighting the integration of domain-specific knowledge and structural information to align the model with the desired tasks.
|
| 248 |
+
|
| 249 |
+
# task: (extract aspect, opinion)
|
| 250 |
+
|
| 251 |
+
# Instruction:
|
| 252 |
+
|
| 253 |
+
Given a sentence and related dependency relations, extract aspect and opinion (both implicit and explicit) from the sentence and return pair(aspect, opinion). Pay attention to the one or multi hop dependency relationships between aspect and opinion.
|
| 254 |
+
|
| 255 |
+
# Input:
|
| 256 |
+
|
| 257 |
+
sentence: service ok but unfriendly, filthy bathroom. dependency relation: root depend service | service modify ok | bathroom depend but | bathroom modify unfriendly | bathroom modify filthy | service depend bathroom
|
| 258 |
+
|
| 259 |
+
# Output:
|
| 260 |
+
|
| 261 |
+
aspect: service, opinion: ok | aspect: service, opinion: unfriendly | aspect: bathroom, opinion: filthy
|
| 262 |
+
|
| 263 |
+
<|end_of_sentence|>
|
| 264 |
+
|
| 265 |
+
# task: (extract opinion, aspect)
|
| 266 |
+
|
| 267 |
+
# Instruction:
|
| 268 |
+
|
| 269 |
+
Given a sentence and related dependency relations, extract opinion and aspect (both implicit and explicit) from the sentence and return pair(opinion, aspect). Pay attention to the one or multi hop dependency relationships between aspect and opinion.
|
| 270 |
+
|
| 271 |
+
# Input:
|
| 272 |
+
|
| 273 |
+
sentence: service ok but unfriendly, filthy bathroom. dependency relation: root depend service | service modify ok | bathroom depend but | bathroom modify unfriendly | bathroom modify filthy | service depend bathroom
|
| 274 |
+
|
| 275 |
+
# Output:
|
| 276 |
+
|
| 277 |
+
opinion: ok, aspect: service | opinion: unfriendly, aspect: service | opinion: filthy, aspect: bathroom
|
| 278 |
+
|
| 279 |
+
<|end of sentence|>
|
| 280 |
+
|
| 281 |
+
# task: (linking aspect to opinion)
|
| 282 |
+
|
| 283 |
+
# Instruction:
|
| 284 |
+
|
| 285 |
+
Given a sentence, related dependency relations and known aspects, determine the opinion (both implicit and explicit) related to the each aspect from dependency relation and return the pair(aspect, opinion).
|
| 286 |
+
|
| 287 |
+
# Input:
|
| 288 |
+
|
| 289 |
+
sentence: service ok but unfriendly, filthy bathroom. dependency relation: root depend service | service modify ok | bathroom depend but | bathroom modify unfriendly | bathroom modify filthy | service depend bathroom candidates: aspect: service | aspect: service | aspect: bathroom
|
| 290 |
+
|
| 291 |
+
Output: aspect: service, opinion: ok | aspect: service, opinion: unfriendly | aspect: bathroom, opinion: filthy
|
| 292 |
+
|
| 293 |
+
<|end_of_sentence|>
|
| 294 |
+
|
| 295 |
+
# task: (linking opinion to aspect)
|
| 296 |
+
|
| 297 |
+
# Instruction:
|
| 298 |
+
|
| 299 |
+
Given a sentence, related dependency relations and known opinions, determine the aspect (both implicit and explicit) related to the each opinion from dependency relation and return the pair(opinion, aspect).
|
| 300 |
+
|
| 301 |
+
# Input:
|
| 302 |
+
|
| 303 |
+
sentence: service ok but unfriendly, filthy bathroom. dependency relation: root depend service | service modify ok | bathroom depend but | bathroom modify unfriendly | bathroom modify filthy | service depend bathroom candidates: opinion: ok | opinion: unfriendly | opinion: filthy
|
| 304 |
+
|
| 305 |
+
# Output:
|
| 306 |
+
|
| 307 |
+
opinion: ok, aspect: service | opinion: unfriendly, aspect: service | opinion: filthy, aspect: bathroom
|
| 308 |
+
|
| 309 |
+
$< |$ end_of_sentence $1>$
|
| 310 |
+
|
| 311 |
+
# B Inference
|
| 312 |
+
|
| 313 |
+
In this sention, we will introduce the details of our model during the inference process. The unidirectional dependency of sentiment elements in generative models has been thoroughly discussed in MVP (Gou et al., 2023) and SLGM (Zhou and Qian, 2023). Due to the unidirectional attention mechanism in decoder-only LLMs, this issue becomes more pronounced(Xu et al., 2024). Therefore, in the Global Syntax-guided Extraction, we
|
| 314 |
+
|
| 315 |
+
# task: (classification (aspect, opinion) to (category, sentiment))
|
| 316 |
+
|
| 317 |
+
# Instruction:
|
| 318 |
+
|
| 319 |
+
Given a sentence, related dependency relations (will be presented in the form of subgraph) and (aspect, opinion) candidates, determine the category of the aspect and the sentiment (positive, neutral, negative) of the opinion and return the quadruple(aspect, opinion, category, sentiment).
|
| 320 |
+
|
| 321 |
+
# Input
|
| 322 |
+
|
| 323 |
+
sentence: service ok but unfriendly, filthy bathroom.
|
| 324 |
+
subgraph: aspect: service, which is connected to (bathroom, ok) within one hop. opinion: ok, which is connected to (service) within one hop. | aspect: service, which is connected to (bathroom, ok) within one hop. opinion: unfriendly, which is connected to (bathroom) within one hop. | aspect: bathroom, which is connected to (unfriendly, filthy, service, but) within one hop. opinion: filthy, which is connected to (bathroom) within one hop.
|
| 325 |
+
|
| 326 |
+
candidate: aspect: service, opinion: ok | aspect: service, opinion: unfriendly | aspect: bathroom, opinion: filthy
|
| 327 |
+
|
| 328 |
+
# Output
|
| 329 |
+
|
| 330 |
+
aspect: service, opinion: ok, category: service general, sentiment: negative | aspect: service, opinion: unfriendly, category: service general, sentiment: negative | aspect: bathroom, opinion: filthy, category: ambience general, sentiment: negative
|
| 331 |
+
|
| 332 |
+
<|end of sentence|>
|
| 333 |
+
|
| 334 |
+
# task: (classification aspect to category)
|
| 335 |
+
|
| 336 |
+
# Instruction:
|
| 337 |
+
|
| 338 |
+
Given a sentence, related dependency relations (will be presented in the form of subgraph) and known aspects (both implicit and explicit), determine the category related to the each aspects from dependency relation and return pair (aspect, category).
|
| 339 |
+
|
| 340 |
+
# Input
|
| 341 |
+
|
| 342 |
+
sentence: service ok but unfriendly, filthy bathroom.
|
| 343 |
+
subgraph: aspect: service, which is connected to (bathroom, ok) within one hop. | aspect: service, which is connected to (bathroom, ok) within one hop. | aspect: bathroom, which is connected to (unfriendly, filthy, service, but) within one hop.
|
| 344 |
+
candidate aspect: service | service | bathroom
|
| 345 |
+
|
| 346 |
+
# Output:
|
| 347 |
+
|
| 348 |
+
aspect: service, category: service general | aspect: service, category: service general | aspect: bathroom, category: ambience general
|
| 349 |
+
<|end_of Sentence|>
|
| 350 |
+
|
| 351 |
+
# task: (classification aspect to sentiment)
|
| 352 |
+
|
| 353 |
+
# Instruction:
|
| 354 |
+
|
| 355 |
+
Given a sentence, related dependency relations (will be presented in the form of subgraph) and known aspects (both implicit and explicit), determine the sentiment related to the each aspects from dependency relation and return pair (aspect, sentiment). Input:
|
| 356 |
+
|
| 357 |
+
sentence: service ok but unfriendly , filthy bathroom
|
| 358 |
+
|
| 359 |
+
subgraph: aspect: service, which is connected to (bathroom, ok) within one hop. | aspect: service, which is connected to (bathroom, ok) within one hop. | aspect: bathroom, which is connected to (unfriendly, filthy, service, but) within one hop.
|
| 360 |
+
|
| 361 |
+
candidate aspect: service | service | bathroom
|
| 362 |
+
|
| 363 |
+
candidates: aspect: service | aspect: service | aspect: bathroom
|
| 364 |
+
|
| 365 |
+
Output: aspect: service, sentiment: negative | aspect: service, sentiment: negative | aspect: bathroom, sentiment: negative
|
| 366 |
+
|
| 367 |
+
<|end_of_sentence|>
|
| 368 |
+
|
| 369 |
+
# task: (classification opinion to category)
|
| 370 |
+
|
| 371 |
+
# Instruction:
|
| 372 |
+
|
| 373 |
+
Given a sentence, related dependency relations (will be presented in the form of subgraph) and known opinions (both implicit and explicit), determine the category related to the each opinions from dependency relation and return pair (opinion, category).
|
| 374 |
+
|
| 375 |
+
# Input
|
| 376 |
+
|
| 377 |
+
sentence: service ok but unfriendly, filthy bathroom.
|
| 378 |
+
subgraph: opinion: ok, which is connected to (service) within one hop. | opinion: unfriendly, which is connected to (bathroom) within one hop. | opinion: filthy, which is connected to (bathroom) within one hop.
|
| 379 |
+
|
| 380 |
+
candidate opinion: ok | unfriendly | filthy
|
| 381 |
+
|
| 382 |
+
# Output:
|
| 383 |
+
|
| 384 |
+
opinion:ok, category: service general | opinion: unfriendly, category: service general |
|
| 385 |
+
opinion: filthy, category: ambience general |
|
| 386 |
+
<|end_ofsentence|>
|
| 387 |
+
|
| 388 |
+
# task: (classification opinion to sentiment)
|
| 389 |
+
|
| 390 |
+
# Instruction:
|
| 391 |
+
|
| 392 |
+
Given a sentence, related dependency relations (will be presented in the form of subgraph) and known opinions (both implicit and explicit), determine the category related to the each opinions from dependency relation and return pair (opinion, category).
|
| 393 |
+
|
| 394 |
+
# Input
|
| 395 |
+
|
| 396 |
+
sentence: service ok but unfriendly, filthy bathroom.
|
| 397 |
+
subgraph: opinion: ok, which is connected to (service) within one hop. | opinion: unfriendly, which is connected to (bathroom) within one hop. | opinion: filthy, which is connected to (bathroom) within one hop.
|
| 398 |
+
|
| 399 |
+
candidate opinion: ok | unfriendly | filthy
|
| 400 |
+
|
| 401 |
+
# Output:
|
| 402 |
+
|
| 403 |
+
opinion:ok,sentiment: negative | opinion:unfriendly, sentiment: negative | opinion:filthy, sentiment: negative
|
| 404 |
+
|
| 405 |
+
<|end of sentence|>
|
| 406 |
+
|
| 407 |
+
adopt a bidirectional generation approach for the $\langle \mathrm{a,o}\rangle$ pair, similar to SLGM. Beam search is set to a size of 4.
|
2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c598b9fe44929a1f7f6733e57ba2a2148b227f43c9fa992db17c4224f285aabe
|
| 3 |
+
size 418640
|
2itstepwisesyntaxintegrationtuningforlargelanguagemodelsinaspectsentimentquadprediction/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6ad6549b7eff2460e29655da84ca13f5b2aef0e462879a842481273550bda5c8
|
| 3 |
+
size 349288
|
abayesianoptimizationapproachtomachinetranslationreranking/e6893c9d-09ab-4c0e-a429-b12ff05203f6_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e4ef394ccf19b4c474f70399fb144b73e42a5db38fae5471acd090d5d23e497
|
| 3 |
+
size 97369
|
abayesianoptimizationapproachtomachinetranslationreranking/e6893c9d-09ab-4c0e-a429-b12ff05203f6_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:97e6517f93d3927970cf742fbcee3c3308a97e221891d9a7dcee4937d3c18680
|
| 3 |
+
size 116109
|
abayesianoptimizationapproachtomachinetranslationreranking/e6893c9d-09ab-4c0e-a429-b12ff05203f6_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:09377ff87ea7f05aedca4302290003746edbbdbe3648de939b74d9010db53da0
|
| 3 |
+
size 661778
|
abayesianoptimizationapproachtomachinetranslationreranking/full.md
ADDED
|
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Bayesian Optimization Approach to Machine Translation Reranking
|
| 2 |
+
|
| 3 |
+
Julius Cheng<sup>1</sup> Maike Züflé<sup>2</sup> Vilém Zouhar<sup>3</sup> Andreas Vlachos<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
$^{1}$ University of Cambridge $^{2}$ Karlsruhe Institute of Technology $^{3}$ ETH Zürich {jncc3, av308}@cam.ac.uk maize.zuefle@kit.edu vzouhar@ethz.ch
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Reranking, or scoring a list of prediction candidates from a machine translation system with an external scoring model and returning the highest-scoring candidate, remains a simple and effective method for improving prediction quality. However, reranking with high quality scoring models can add substantial computational cost to the translation pipeline, which we address in this work by framing list reranking as a Bayesian optimization (BayesOpt) problem over the candidate list, where unknown scores are modeled with a Gaussian process. This algorithm scores candidates iteratively, choosing next candidates by balancing between exploration, choosing to score those that differ from candidates already scored, and exploitation, choosing to score those that resemble high-scoring candidates. This procedure finds high-scoring candidates while scoring only a fraction of the candidates list; given candidate lists of 200 random samples (before dedduplication), our method achieves the same CometKiwi score using only 70 scoring evaluations on average compared to scoring a random subset of 180 candidates. We also propose multi-fidelity BayesOpt for list reranking, where scores obtained from a noisier but cheaper proxy scoring model are incorporated into the search process. We show that well-trained distilled proxy scorers can further improve the performance of BayesOpt.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Reranking is a framework for prediction where probabilistic generator model produces a list of candidates, and a separate evaluator or scoring model produces scores for each of the candidates which are used to determine the final prediction. Reranking has a long history in natural language processing for sequential prediction problems such as dependency parsing (Collins and Koo, 2005;
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: A machine translation system generates candidates Aa, Bb, Cc, Dd, and Ee. The goal of BayesOpt is to find the highest scoring candidate with fewer scoring calls. An acquisition function selects the next candidate to score repeatedly until budget is reached, and the candidate with the highest score so far is returned.
|
| 19 |
+
|
| 20 |
+
Charniak and Johnson, 2005) and language modeling problems such as summarization (Ravaut et al., 2022) and machine translation (MT; Fernandes et al., 2022).
|
| 21 |
+
|
| 22 |
+
The quality of models for automatic MT evaluation has surged in recent years due to innovations in neural network architecture (Rei et al., 2020; Juraska et al., 2023; Sellam et al., 2020) as well as the abundance of training data (Freitag et al., 2023b; Kocmi et al., 2024a). These evaluation models are often repurposed for reranking to further improve the performance of an MT system. For instance, in the WMT 2024 shared task (Kocmi et al., 2024a), 5 out of 19 systems, including the overall best submission (Rei et al., 2024) use reranking with Comet models (Rei et al., 2020) and/or minimum Bayes risk decoding (MBR; Eikema and Aziz, 2020), which can be interpreted as a form of reranking. Prior to the application of automatic evaluation metrics to reranking, other scoring methods have been proposed, including discriminatively
|
| 23 |
+
|
| 24 |
+
trained classifiers (Lee et al., 2021; Bhattacharyya et al., 2021) and noisy channel decoding (Yee et al., 2019).
|
| 25 |
+
|
| 26 |
+
So, while LMs for MT generation for greatly improved in recent years, scoring models have seen a commensurate increase in quality (Zerva et al., 2022), and thus reranking remains relevant method for improving translation quality. However, the scoring models have also grown dramatically in size, increasing the computational requirements for reranking.
|
| 27 |
+
|
| 28 |
+
In this work, we address the computational cost of reranking by framing it as a search problem over the list of candidates. The goal of search in this setting is to find high-scoring candidates in a small number of steps, thereby avoiding the cost of scoring the full list. Our proposed algorithm uses Gaussian processes to model uncertainty about unseen scores and Bayesian optimization (BayesOpt; Shahriari et al., 2016) to choose which candidates to score next.
|
| 29 |
+
|
| 30 |
+
GPs are flexible priors over functions which are able to model the complex and nonlinear relationship between each candidate and its score. GPs make very few assumptions about the distribution and base their predictions are mostly on observed points, which enables them to easily adapt to different candidate lists across translation instances. BayesOpt is a sequential black-box optimization method that uses the posterior mean and variance of unobserved data points to decide which points to evaluate next.
|
| 31 |
+
|
| 32 |
+
We apply BayesOpt and GPs (BayesOpt+GP) to MT list reranking in a straightforward manner and show that it obtains close to the maximum achievable score with only a fraction of score evaluations. For example, the maximal obtainable score across 200 randomly sampled candidates on our test set is 0.8216 CometKiwi; our method achieves 0.8210 with 70 score evaluations on average, while scoring 70 random candidates attains 0.8149, a difference of 0.0061 which is likely to be human-detectable according to (Kocmi et al., 2024b). We also propose a number of search-based baselines which outperform random selection, all of which are outperformed by BayesOpt+GP.
|
| 33 |
+
|
| 34 |
+
Then, building upon previous works that use a faster but noisier proxy scoring function to prune the candidate list (Fernandes et al., 2022; Eikema and Aziz, 2022), we propose a multi-fidelity extension to BayesOpt which incorporates proxy scores
|
| 35 |
+
|
| 36 |
+
to improve estimation. This is related in motivation to coarse-to-fine methods (Petrov, 2011) and model cascading (Chen et al., 2023), where the use of a faster proxy model reduces the use of the main model. In our multi-fidelity experiments, we find that smaller proxy scoring models distilled from the main model can assist BayesOpt+GP in finding high-scoring candidates earlier.
|
| 37 |
+
|
| 38 |
+
# 2 Background
|
| 39 |
+
|
| 40 |
+
# 2.1 Translation generation and reranking
|
| 41 |
+
|
| 42 |
+
In a typical machine translation setting, a conditional language model (LM) is trained to model the probability of the next token $y_{t}$ given a source sentence $x$ and previous tokens: $p(y_{t}|x,y_{1},\dots,y_{t - 1})$ . These probabilities can be autoregressively combined to model a sequence probability $p(y|x)$ . Usually, beam search is used to search for a $y$ which maximizes log probability combined with a length normalization objective (Wu et al., 2016).
|
| 43 |
+
|
| 44 |
+
In a basic list reranking setting, given $x$ , the LM is used to generate a candidate list $\mathcal{C}_x = [y_1,\dots,y_n]$ with a decoding algorithm such as beam search or ancestral sampling. A scoring function $s(x,y_i)$ is then applied to each $y_{i}\in \mathcal{C}_{x}$ , and the best scoring sequence $\arg \max_{y_i\in \mathcal{C}_x}s(x,y_i)$ is returned. A common choice of scoring function is a quality estimation (QE) model which directly predicts a scalar value representing the quality.
|
| 45 |
+
|
| 46 |
+
Reranking with high-quality evaluation metrics has been shown to be highly effective at improving translation output (Freitag et al., 2022), though it can skew results when the same metric is also used for evaluation (Kocmi et al., 2024a). Reranking performance improves as the number of candidates increases (Vernikos and Popescu-Belis, 2024) and when multiple scoring metrics are combined to form a stronger prediction (Fernandes et al., 2022).
|
| 47 |
+
|
| 48 |
+
Reranking adds significant computational costs to prediction and may be prohibitive to use at test time, but it can be used to benefit LM training instead of test time prediction; high-quality predictions obtained from reranking can be used for knowledge distillation (Wang et al., 2024) and self-training (Finkelstein et al., 2024). Such methods can improve the performance of an MT system without additional costs during test time.
|
| 49 |
+
|
| 50 |
+
Previous work on efficient reranking for MT is relatively limited. Fernandes et al. (2022) and Eikema and Aziz (2022) perform a two-stage reranking by first pruning with a faster and noisier
|
| 51 |
+
|
| 52 |
+
scoring function to a fixed size before evaluating the target score. There has been recent interest in efficient approximations for MBR (Cheng and Vlachos, 2023; Deguchi et al., 2024; Trabelsi et al., 2024; Vamvas and Sennrich, 2024), but these methods are not applicable to general scoring functions. (Singhal et al., 2023) propose to represent the candidate space compactly in a lattice over which a token-level reranker can efficiently score many candidates. In this work, we attempt to address a more general setting: the reranking of candidate lists with arbitrary black-box scoring functions.
|
| 53 |
+
|
| 54 |
+
# 2.2 Bayesian optimization with Gaussian process prior
|
| 55 |
+
|
| 56 |
+
Bayesian optimization is a sequential algorithm for optimizing a black-box function $f$ . $f$ is assumed to be drawn from a prior distribution over functions. The main loop of BayesOpt is as follows: given a set of (possibly noisy) observations of $f(a_{1}),\dots,f(a_{i})$ , the prior distribution over $f$ is updated to a posterior distribution with Bayes theorem. An acquisition function determines a query point $a_{i + 1}$ at which to evaluate $f$ next. $f(a_{i + 1})$ is evaluated and added to the set of observations. This repeats until a stopping criteria is reached. The principal design choices in BayesOpt are the prior distribution of $f$ and the acquisition function.
|
| 57 |
+
|
| 58 |
+
A common choice of prior is the Gaussian process, which assumes that any subset of points $f(a_{1}), \ldots, f(a_{i})$ are drawn jointly from a multivariate Gaussian distribution $\mathcal{N}(\mu, \mathcal{K})$ , where $\mathcal{K}$ is the covariance matrix defined by a kernel function such as the radial basis function kernel (RBF). RBFs define the covariance of two points $a$ and $a'$ as:
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
\mathcal {K} _ {\mathrm {R B F}} (a, a ^ {\prime}) = \exp \left(- \frac {| | a - a ^ {\prime} | | ^ {2}}{2 w ^ {2}}\right), \quad (1)
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
where $w$ is the bandwidth hyperparameter which determines scaling. The choice of kernel dictates prior assumptions about the shape of $f$ ; with RBF, points that are closer in Euclidean space have larger covariance. RBFs are a popular choice of kernel due their ability to adapt to complex nonlinear functions.
|
| 65 |
+
|
| 66 |
+
The assumption that $f(a_{1}), \ldots, f(a_{i})$ are jointly Gaussian gives rise to a convenient posterior distribution. Given a vector of observed data points $\mathbf{a}$ and their observed values $f(\mathbf{a})$ , the posterior mean $\mu_{a}$ and variance $\sigma_{a}$ of a point $a$ are given by the
|
| 67 |
+
|
| 68 |
+
conditional multivariate Gaussian distribution:
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\mu_ {a} = \mu + \mathcal {K} (a, \mathbf {a}) \left(\mathcal {K} (\mathbf {a}, \mathbf {a}) + \sigma^ {2} I\right) ^ {- 1} f (\mathbf {a}) \tag {2}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
\begin{array}{l} \sigma_ {a} = \mathcal {K} (a, a) + \sigma^ {2} - \tag {3} \\ \mathcal {K} (a, \mathbf {a}) (\mathcal {K} (\mathbf {a}, \mathbf {a}) + \sigma^ {2} I) ^ {- 1} \mathcal {K} (\mathbf {a}, a) \\ \end{array}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
where $\mu$ is the unconditional mean of the distribution, $\sigma^2$ is a constant Gaussian noise on observations, $I$ is the identity matrix, and $\mathcal{K}$ here returns elementwise kernel values when given vector arguments.
|
| 79 |
+
|
| 80 |
+
The acquisition function in BayesOpt is the strategy for selecting the next point to evaluate in the optimization process. Acquisition functions can seek the highest expected improvement (EI; Mockus, 1974), an upper confidence bound if the scores are noisy (Srinivas et al., 2009), or information gain (Hennig and Schuler, 2011). We use EI, defined as:
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\alpha (a) = \mathbb {E} [ \max (f (a) - f (a ^ {+}), 0) ], \tag {4}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
where $a^+$ is the location of the current best observation. When $f$ is Gaussian and there is no observation noise, this has the following closed-form solution (Jones, 2001):
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\alpha (a) = \sigma_ {a} (z \cdot \operatorname {c d f} (z) + \operatorname {p d f} (z)), \tag {5}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
where $z = \frac{f(a^{+}) - \mu_{a}}{\sigma_{a}}$ , and cdf, pdf are the Gaussian cumulative distribution function and probability density function, respectively. EI encourages both exploration of uncertain points and exploitation of high-scoring points; the quantity in Equation 5 can be increased by increasing $\mu_{a}$ or $\sigma_{a}$ .
|
| 93 |
+
|
| 94 |
+
The generality of BayesOpt and modeling freedom enjoyed by GPs make them suitable for a great variety of tasks, including spatial monitoring (Krause et al., 2008) and hyperparameter optimisation (Bergstra et al., 2011). GPs have been applied to text regression tasks (Beck et al., 2013, 2014; Beck and Cohn, 2017), but they are not as well-studied in NLP compared to many other domains.
|
| 95 |
+
|
| 96 |
+
# 3 Methods
|
| 97 |
+
|
| 98 |
+
# 3.1 MT reranking with Bayesian optimization
|
| 99 |
+
|
| 100 |
+
Our main algorithm is an adaptation of BayesOpt with GPs as described in Section 2.2 to the reranking setting. Each source sentence $x$ and its associated candidate list is treated as a standalone BayesOpt problem, meaning that no observations are shared across different $x$ . Thus for brevity, we
|
| 101 |
+
|
| 102 |
+
Inputs: main metric $s$ , proxy metric $s'$ , budget $n$ for evaluating $s$ , hypotheses $\mathcal{C}$ , number of initial main scores $\alpha$ , number of initial proxy scores $\beta$ , scoring budget $n$ , batch size $k$ , precomputed multi-fidelity kernel $\mathcal{K}_{mult}$ .
|
| 103 |
+
|
| 104 |
+
Output: hypothesis with the highest observed score arg $\max_{y\in \mathcal{C}_{\mathrm{obs}}}s(y)$
|
| 105 |
+
|
| 106 |
+
1: $\mathcal{C}_{\mathrm{obs}}^{\prime}\gets \binom {\mathcal{C}}{\min (\beta ,|\mathcal{C}|)},\mathcal{C}_{\mathrm{obs}}\gets \binom {\mathcal{C}_{\mathrm{obs}}^{\prime}}{\min (\alpha ,|\mathcal{C}|)}$ ▷ Sample initial subsets
|
| 107 |
+
2: $S_{\mathrm{obs}}\gets \{s(y)|y\in \mathcal{C}_{\mathrm{obs}}\}$ ▷ Compute scores for main scoring function
|
| 108 |
+
3: $S_{\mathrm{obs}}^{\prime}\gets \{s^{\prime}(y)|y\in \mathcal{C}_{\mathrm{obs}}^{\prime}\}$ ▷ Compute proxy scores
|
| 109 |
+
4: while $|\mathcal{C}_{\mathrm{obs}}| < n$ and $|\mathcal{C}_{\mathrm{obs}}| < |\mathcal{C}|$ do
|
| 110 |
+
5: $\bar{\mathcal{C}}_{\mathrm{obs}}\gets \mathcal{C}\setminus \mathcal{C}_{\mathrm{obs}}$ ▷ Get complement of $\mathcal{C}_{\mathrm{obs}}$
|
| 111 |
+
6: $\hat{S}\leftarrow \operatorname {Norm}(S_{\mathrm{obs}}),\hat{S}^{\prime}\leftarrow \operatorname {Norm}(S_{\mathrm{obs}}^{\prime})$ ▷Normalize observed scores to 0 mean, 1 variance
|
| 112 |
+
7: $y_{\mathrm{best}}\gets \arg \max_{y\in \mathcal{C}_{\mathrm{obs}}}\hat{S} (y)$ ▷Get best observed point
|
| 113 |
+
8: $\forall y\in \bar{\mathcal{C}}_{\mathrm{obs}}:\mu_y,\sigma_y\gets$ calculate posterior using $y,\kappa_{\mathrm{mult}},\hat{S},\hat{S}^{\prime}$ ▷GP posterior as in Equations (2) and (3)
|
| 114 |
+
9: $\forall y\in \bar{\mathcal{C}}_{\mathrm{obs}}:\gamma_y\gets \operatorname {EI}(y_{\mathrm{best}},\mu_y,\sigma_y)$ ▷Expected improvement as in Equation (5)
|
| 115 |
+
10: $\mathcal{C}_{\mathrm{top - k}}\gets \arg \operatorname {topk}_{y\in \bar{\mathcal{C}}_{\mathrm{obs}}}\gamma_y$ ▷Select $k$ best hypotheses based on EI
|
| 116 |
+
11: $S_{\mathrm{obs}}\gets S_{\mathrm{obs}}\cup \{s(y)|y\in \mathcal{C}_{\mathrm{top - k}}\}$ ▷Compute scores for selected hypotheses
|
| 117 |
+
12: $C_\mathrm{obs}\gets C_\mathrm{obs}\cup C_\mathrm{top - k}$ ▷Update observed hypotheses
|
| 118 |
+
13: end while
|
| 119 |
+
14: return arg max $y\in \mathcal{C}_{\mathrm{obs}}$ s(y)
|
| 120 |
+
|
| 121 |
+
Algorithm 1: The BayesOpt+GP+P algorithm. BayesOpt+GP is a special case of this where $\beta = 0$ .
|
| 122 |
+
|
| 123 |
+
omit $x$ from notation when discussing BayesOpt for a particular instance.
|
| 124 |
+
|
| 125 |
+
Let $s$ be the scoring function, an MT quality estimator. Let $\mathcal{C}$ be a set of candidates, $\mathcal{C}_{\mathrm{obs}} \subseteq \mathcal{C}$ the subset of candidates for which we have observed $s(y)$ , and $\bar{\mathcal{C}}_{\mathrm{obs}}$ be all other $y$ ( $\bar{\mathcal{C}}_{\mathrm{obs}} = \mathcal{C} \setminus \mathcal{C}_{\mathrm{obs}}$ ). To perform reranking for an instance, we first generate candidates $\mathcal{C}$ and initialize the algorithm by scoring a random $\alpha$ -sized subset of the list with $s$ . In one iteration in the algorithm loop, we normalize the observed scores to mean 0 and 1 variance at every step and assume a 0 unconditional mean. Then we compute the GP posterior of all $y \in \bar{\mathcal{C}}_{\mathrm{obs}}$ with Equation 2 and 3 given the scores of $\mathcal{C}_{\mathrm{obs}}$ , which is then used to compute EI with Equation 5, assuming no observation noise. We score the $k$ candidates in $\bar{\mathcal{C}}_{\mathrm{obs}}$ with the highest EI, adding them to $\mathcal{C}_{\mathrm{obs}}$ (as well as removing them from $\bar{\mathcal{C}}_{\mathrm{obs}}$ ), and repeat the loop, terminating when a predefined budget of $n$ calls to $s$ is reached (or when all candidates have been evaluated, in the case that $|\mathcal{C}| \leq n$ ). Finally, we choose $\arg \max_{y \in \mathcal{C}_{\mathrm{obs}}} s(y)$ as the prediction.
|
| 126 |
+
|
| 127 |
+
We now describe our choice of GP kernel. $y\in \mathcal{C}$ are strings, and we seek a representation that is fast to compute and to compare, since $|\mathcal{C}|$ representations are generated, and the computing the GP covariance matrix requires $|\mathcal{C}|^2$ comparisons. Our kernel is $\kappa_{\mathrm{MT}}(y_i,y_j) = \kappa_{\mathrm{RBF}}(\mathrm{emb}(y_i),\mathrm{emb}(y_j))$
|
| 128 |
+
|
| 129 |
+
where emb returns the mean-pooled token-level outputs of the final decoder layer when generating $y$ , normalized to the unit norm after pooling. emb uses meaning representations produced automatically during candidate list generation, so the additional cost to compute it is negligible. Also, the covariance matrix is fast to compute given the candidate list sizes and embedding dimensionality used in our experiments.
|
| 130 |
+
|
| 131 |
+
# 3.2 Multi-fidelity BayesOpt
|
| 132 |
+
|
| 133 |
+
We also propose an extension to BayesOpt+GP for the setting where observations are available from a different but related proxy score function $s'$ . We refer to this as BayesOpt+GP+P. $s'$ is assumed to have non-trivial covariance with the scoring model $s$ and to be cheaper to evaluate. This is known as multi-fidelity BayesOpt in the literature, but while the multi-fidelity settings of Kandasamy et al. (2016); Wu et al. (2020) use acquisition functions that may choose to evaluate lower-fidelity scores, we study a simpler setting: $\beta$ observations of $s'$ are obtained at the start where $\beta > \alpha$ , and only $s$ may be evaluated during the BayesOpt loop. In the multi-fidelity setting, observations are made on $\langle y_i, s_i \rangle$ , a combination of a data point and scoring function, instead of the data point alone.
|
| 134 |
+
|
| 135 |
+
Our kernel for BayesOpt $^+$ GP $^+$ P is the product
|
| 136 |
+
|
| 137 |
+
of the RBF kernel from Section 3.1 and a kernel over score functions $f$ :
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
\begin{array}{l} \mathcal {K} _ {\mathrm {m u l t}} \left(\langle y _ {i}, s _ {k} \rangle , \langle y _ {j}, s _ {l} \rangle\right) = \\ \mathcal {K} _ {\mathrm {M T}} \left(y _ {i}, y _ {j}\right) \mathcal {K} _ {\text {s c o r e}} \left(s _ {k}, s _ {l}\right). \tag {6} \\ \end{array}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
$\kappa_{\mathrm{mult}}$ is a valid kernel because a product of two kernels defined on different spaces is also a kernel (Rasmussen and Williams, 2005). With $\kappa_{\mathrm{mult}}$ , the covariance between two observations depends on both the difference between scoring functions and the distance between data points. This way, an observation influences the posterior for all other data points at all choices of scoring function, as long as the scoring functions are correlated. This formulation enables the use of any number of scoring functions, but in this work, we consider at most two: the main scorer $s$ and a proxy scorer $s'$ .
|
| 144 |
+
|
| 145 |
+
We set $\mathcal{K}_{\mathrm{score}}(s_k, s_l)$ to be the empirical covariance between $s_k$ and $s_l$ measured over a validation set, where all scores are normalized per-instance so that in each instance, the scores of all candidates for a particular scorer have 0 mean and 1 variance. Then for each scoring function, concatenate all candidate scores across instances, and compare the resulting lists to obtain the covariance. Covariance is a valid kernel because the covariance calculation can be expressed as a dot product, and dot products are valid kernels.
|
| 146 |
+
|
| 147 |
+
Proxy scores are incorporated into posterior estimation given by Equations 2 and 3 by redefining $a$ to be a tuple of (data point, scoring function) and $a$ to be a vector of such tuples. The kernel $\mathcal{K}$ is set to $\mathcal{K}_{\mathrm{mult}}$ which takes as input two tuples of data point and scoring function. The full BayesOpt+GP+P algorithm is in Algorithm 1.
|
| 148 |
+
|
| 149 |
+
# 3.3 Proxy scores
|
| 150 |
+
|
| 151 |
+
We train smaller scoring models to have high covariance with $s$ for use in BayesOpt+GP+P. In this work, our scoring functions are based on the Comet referenceless quality estimation architecture (Rei et al., 2020), also known as CometKiwi. These models encode the source and hypothesis jointly with a bidirectional transformer. Activations from all transformer layers are pooled to form a fixed-size representation, which is passed to a feed-forward regression head. The vast majority of computation in this models is spent in the encoder. Thus, faster Comet models can be obtained by reducing the size of the encoder.
|
| 152 |
+
|
| 153 |
+
We train Comet models using two differently sized pretrained multilingual encoder models in two ways: (1) training on the same training set as CometKiwi and (2) distillation. Among distillation methods, we attempt in preliminary experiments (1) training on the same training set as CometKiwi with ground truth scores replaced with CometKiwi scores and (2) training on a synthetic dataset comprising of LM samples along with their associated CometKiwi scores. The latter achieves higher correlation with CometKiwi on sampled candidates, which is to be expected since the training distribution is more suitable for the reranking use case. We therefore use this latter distillation method for all subsequent experiments. A similar procedure has been described in Rei et al. (2022a).
|
| 154 |
+
|
| 155 |
+
# 3.4 Candidate list generation
|
| 156 |
+
|
| 157 |
+
In preliminary experiments, we consider generating the candidate list using beam search with 128 outputs versus sampling 200 candidates using $\epsilon$ -sampling (Hewitt et al., 2022) with $\epsilon = 0.02$ , a setting which effectively balances quality and diversity for MBR (Freitag et al., 2023a). Under beam search, the candidates exhibit high lexical overlap, and while the mean score of candidates is higher, the average maximum score is lower. The effectiveness of truncated sampling over beam search in larger conditional language model has also been observed by Fernandes et al. (2022).
|
| 158 |
+
|
| 159 |
+
Furthermore, beam search suffers from out-of-memory errors on long translations, whereas with sampling, we simply reduce the batch size when out of memory. While it is possible to implement beam search in a batched manner, this does not exist in any popular conditional language model libraries, to the best of our knowledge.
|
| 160 |
+
|
| 161 |
+
For these reasons, we generate 200 candidates per instance with $\epsilon$ -sampling, $\epsilon = 0.02$ in all experiments. The sampled candidate list is then deduplicated, resulting in $\sim 178$ candidates on average per instance.
|
| 162 |
+
|
| 163 |
+
# 4 Experiments
|
| 164 |
+
|
| 165 |
+
We now discuss the details and findings of our Bayesian optimization experiments, followed by analysis of our trained proxy scoring models, concluding with runtime measurements. All run time values are measured on a A100-SXM4-40GB GPU. For exact values for figures in this section, see Table 3 in the Appendix. Appendix A contains
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
Figure 2: Left: Performance of reranking methods measured as the average CometKiwi score of the selected candidate. Beam search with beam size 5 achieves a score of 0.754 and is too low to be pictured here. Right: percentage of instances where the selected candidate had the highest score (right). The x-axis is the scoring budget. Legends show the normalized area under the curve of CometKiwi score of each method in brackets.
|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
|
| 172 |
+
extensive statistical significance tests.
|
| 173 |
+
|
| 174 |
+
For BayesOpt experiments, we grid search for the optimal value of RBF bandwidth parameter $w$ on the entire validation set, setting scoring budget $n = 100$ and batch size $k = 1$ . While it is possible to optimize it for every unique combination of language pair, $n, k$ , proxy scoring function, and $\beta$ , we find that the results are not statistically significantly different within a range of settings. For simplicity, and to demonstrate the robustness of our methods, we use the same $w$ for all experiments.
|
| 175 |
+
|
| 176 |
+
In all experiments, we use $\alpha = 10$ initial randomly scored candidates. We set $k = 1$ in Sections 4.2 and 4.3 to demonstrate the effectiveness of BayesOpt+GP under ideal conditions, but since $k$ can have a large impact on speed, we experiment with varying it in Section 4.5.
|
| 177 |
+
|
| 178 |
+
# 4.1 Models and datasets
|
| 179 |
+
|
| 180 |
+
For candidate generation, we use the 600M-parameter distilled NLLB model (Team et al., 2022) in all experiments. For the main scoring model, we use CometKiwi-22 (Rei et al., 2022b).
|
| 181 |
+
|
| 182 |
+
As a dataset used for proxy model training, we use data from the WMT Metrics Shared Task up to 2022 (Freitag et al., 2023b), which contains tuples of <source, hypothesis, human score>. The human scores were largely collected with the DA+SQM annotation protocol (Kocmi et al., 2022).
|
| 183 |
+
|
| 184 |
+
For BayesOpt experiments, we select the first 1000 and 500 source sentences per language pair from the WMT23 Metrics Shared Task dataset as the validation and test set, respectively, for 7 language pairs: English-Czech, English-German,
|
| 185 |
+
|
| 186 |
+
English-Japanese, English-Chinese, and the reverse directions of the latter 3 pairs.
|
| 187 |
+
|
| 188 |
+
CometKiwi is based on the encoder of XLM-Robertalarge (Conneau et al., 2019) (2.2GB memory). For proxy scorers we train smaller models based on XLM-Robertabase (1.1GB), and Multilingual-MiniLM-L12-H384 (Wang et al., 2020) (469MB).
|
| 189 |
+
|
| 190 |
+
# 4.2 BayesOpt+GP
|
| 191 |
+
|
| 192 |
+
The goal of reranking BayesOpt+GP is to improve the speed by only evaluating a subset of available candidates. We evaluate this through quality-cost tradeoff curves, where quality is determined by final selected candidate's CometKiwi score, and cost is determined by the number of calls to the scoring function. As another measure of approximation quality, we also show the percentage of instances in which the actual best scoring candidate is returned. We devise several baselines with which to compare BayesOpt+GP. Each is a strategy for selecting a subset of candidates to score from which the best scoring candidate is returned. The baselines are:
|
| 193 |
+
|
| 194 |
+
- UniqRandom: Shuffle the candidate list before de-duplication, then de-duplicate while preserving the order of the first appearance of each candidate. Select the first $\min (n,|\mathcal{C}|)$ candidates in the resulting list.
|
| 195 |
+
- Logprob{Avg, Sum}: Sort $\mathcal{C}$ in order of negative sequence log probability (either average or sum), and then select the first $\min(n, |\mathcal{C}|)$ .
|
| 196 |
+
- HillClimbing: Let $y^{+}$ be the highest scoring observation point at any time step. Iteratively select $\arg \min_{y \in \bar{C}_{\mathrm{obs}}} ||\mathrm{emb}(y) - \mathrm{emb}(y^{+})||$ as the next
|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
Figure 3: Average CometKiwi score of the selected top candidate (y-axis) for BayesOpt+GP+P with Distilled-S (left) and Distilled-M (right) compared to the ProxyFirst baseline. This figure disregards the additional compute costs for these proxy metrics in order to show the marginal score increase from proxy observations.
|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
Figure 4: Average CometKiwi score of the selected candidate (y-axis) for BayesOpt+GP+P with different choices of proxy score.
|
| 205 |
+
|
| 206 |
+
observation point until $\min (n,|\mathcal{C}|)$ candidates are scored.
|
| 207 |
+
|
| 208 |
+
UniqRandom simulates the effect of iteratively sampling candidates until $n$ unique candidates are obtained. LogprobFirst{Avg, Sum} are included to verify whether more advanced methods indeed outperform simple subset selection using statistics obtained for free. HillClimbing is a heuristic iterative selection strategy which, like BayesOpt, is black-box and derivative-free (Conn et al., 2009).
|
| 209 |
+
|
| 210 |
+
In Figure 2, BayesOpt+GP outperforms all baselines, and HillClimbing is the best among the baselines, with LogprobAvg following behind. LogprobSum severely underperforms UniqRandom in score, confirming findings on the inadequacy of very high probability translations (Eikema and
|
| 211 |
+
|
| 212 |
+
Aziz, 2020). Informally speaking, UniqRandom is a simple "exploration" strategy that ignores existing observations, while HillClimbing is a simple "exploitation" strategy, only searching over neighbors nearest the best observation while ignoring the full search space. These results confirm that balancing these respective deciderata helps to find the optimal candidate more efficiently.
|
| 213 |
+
|
| 214 |
+
# 4.3 BayesOpt+GP+P
|
| 215 |
+
|
| 216 |
+
# 4.3.1 Proxy score evaluation
|
| 217 |
+
|
| 218 |
+
We first evaluate trained proxy scorers independently of their use in BayesOpt according to (1) actual runtime, (2) correlation with human ratings in the WMT23 dataset, (3) correlation with CometKiwi on source-hypothesis pairs in WMT23, and (4) correlation with CometKiwi on a synthetic candidates for an instance, averaged over instances. For correlations we use Kendall's $\tau_{c}$ , which is commonly used in MT metric evaluation (Freitag et al., 2023b).
|
| 219 |
+
|
| 220 |
+
Table 1 shows the results for the proxy models. The model size corresponds closely to inference time. As desired, training proxies using distillation results in much higher correlation with CometKiwi, although it loses some correlation with human judgments. In subsequent experiments, we consider Distilled- $\{\mathbf{S}, \mathbf{M}\}$ only. While LogprobAvg has comparatively much lower correlation, we nevertheless consider it as a proxy score since it is obtained for free during candidate generation.
|
| 221 |
+
|
| 222 |
+
<table><tr><td>Model</td><td>Time</td><td>Human Test</td><td>CometKiwi Test</td><td>Cands.</td></tr><tr><td>CometKiwi</td><td>51.38s</td><td>0.245</td><td>1.000</td><td>1.000</td></tr><tr><td>LogprobsAvg</td><td>0.00s</td><td>-</td><td>-</td><td>0.191</td></tr><tr><td>LogprobsSum</td><td>0.00s</td><td>-</td><td>-</td><td>-0.090</td></tr><tr><td>Authentic-S</td><td>7.13s</td><td>0.193</td><td>0.314</td><td>0.350</td></tr><tr><td>Authentic-M</td><td>18.71s</td><td>0.199</td><td>0.320</td><td>0.448</td></tr><tr><td>Distilled-S</td><td>7.13s</td><td>0.169</td><td>0.488</td><td>0.620</td></tr><tr><td>Distilled-M</td><td>18.71s</td><td>0.188</td><td>0.572</td><td>0.680</td></tr></table>
|
| 223 |
+
|
| 224 |
+
Table 1: Benchmarking proxy models (Section 3.3) on speed and correlation with human judgments/CometKiwi using the WMT23 dataset. Speed is measured by runtime per 10000 samples using maximum batch size. Correlation is measured with Kendall's $\tau_{c}$ against human judgments and CometKiwi scores. CometKiwi correlation is taken over the provided targets in WMT23 (Test) and a synthetic dataset comprised of 200 samples per source sentence, deduped (Cands). Logprobs{Avg,Sum} is not evaluated on WMT23 targets because they are generated by other MT systems.
|
| 225 |
+
|
| 226 |
+
# 4.3.2 Reranking results
|
| 227 |
+
|
| 228 |
+
When $s'$ is sufficiently fast and correlated with $s$ , it can further improve the quality-cost tradeoff in BayesOpt+GP. Recall that BayesOpt+GP+P initializes with $\beta$ evaluations of $s'$ . Figure 4 shows the quality-cost curve when all proxy scores are known, or $\beta = 200$ . The relative performance when including proxy scores correspond to their correlation with CometKiwi as shown in Table 1; Distilled-M outperforms Distilled-S, and both outperform LogprobAvg. This demonstrates the importance of ensuring high correlation in the proxy score. The addition of LogprobAvg to BayesOpt+GP has little effect, showing that poorly correlated proxies are too noisy to help and may even hinder performance. Beyond $n = 70$ , all methods achieve close to the maximum attainable score.
|
| 229 |
+
|
| 230 |
+
We also examine the effect of initializing with a fraction of proxy observations rather than all of them. For some choice of $\beta$ , an appropriate baseline is to rank the top- $n$ candidates among the $\beta$ observed proxy scores. We call this ProxyFirst. The results when using Distilled-M and Distilled-S as proxies are shown in Figure 3. In both cases, the difference between BayesOpt+GP+P and ProxyFirst is smaller when $\beta = 200$ than when $\beta = 50$ , and this gap is smaller for Distilled-M. This is to be expected because as the covariance of $s$ and $s'$ increases, using ProxyFirst with $\beta = 200$ approaches
|
| 231 |
+
|
| 232 |
+
standard full-list reranking. The marginal benefit of BayesOpt+GP+P is more clear when $\beta = 50$ , where proxy scores help to find promising candidates earlier.
|
| 233 |
+
|
| 234 |
+
Overall, proxy observations can indeed improve quality for a particular $n$ . However, for sufficiently large $n$ , BayesOpt+GP converges, so proxy observations are unnecessary. Proxy evaluations add to the runtime cost which we discuss in Section 4.4. Therefore, while we show that the multi-fidelity kernel is capable of leveraging proxy scores to improve search, in practice, the overall computational budget should be considered along with the quality and cost of the proxy scoring function to ensure that using the method is worthwhile.
|
| 235 |
+
|
| 236 |
+
# 4.4 Runtime
|
| 237 |
+
|
| 238 |
+
Our reranking algorithm significantly reduces actual runtime compared to scoring all candidates for a source sentence. We profile the full pipeline, from generating candidates to making a final selection, on three settings: (1) BayesOpt+GP with $n = 90$ , and (2) multi-fidelity BayesOpt+GP with 50 Distilled-S scores and $n = 70$ , and 3) the baseline of evaluating CometKiwi on all candidates. $n, \beta$ are selected to balance the final scores of the two algorithms (0.8213 and 0.8211 respectively, as shown in Table 3).
|
| 239 |
+
|
| 240 |
+
For the runtime calculations, we select 50 source sentences from each language pair and generate 200 candidates for each. For the baseline, we compute scores for all candidates with a batch size of 200. For BayesOpt+GP methods, we profile the additional steps required: computing the kernel, computing the posteriors at each step, and evaluating proxy scores. BayesOpt+GP(+S) uses batch size $k = 10$ , which does not affect scores compared to using $k = 1$ (see Section 4.5). Memory bandwidth can be a major overhead in large neural networks, making it inefficient to run small batches. Since BayesOpt+GP obtains $k$ candidates per step, in order to use large batches, we process candidates for multiple instances in parallel.
|
| 241 |
+
|
| 242 |
+
Results are shown in Table 2. In all cases, candidate generation and CometKiwi calculations dominate the overall runtime. The extra cost from BayesOpt-related computations is compensated by the savings from reducing CometKiwi evaluations, despite similarity matrix computation being $\mathcal{O}(|\mathcal{C}|^2)$ , and matrix inversion for posteriori calculation at each iteration being $\mathcal{O}(|\mathcal{C}|^3)$ .
|
| 243 |
+
|
| 244 |
+
BayesOpt+GP+P with Distilled-S reduces the runtime by further reducing the number of CometKiwi calculations to 70, with the cost of loading and running the Distilled-S proxy metric introducing minimal overhead.
|
| 245 |
+
|
| 246 |
+
<table><tr><td>Operation</td><td>AllComet</td><td>BayesOpt +GP n = 90</td><td>BayesOpt +GP+P n = 70,β = 50</td></tr><tr><td>Candidates</td><td>701.38</td><td>701.38</td><td>701.38</td></tr><tr><td>Similarities</td><td>-</td><td>1.24</td><td>1.24</td></tr><tr><td>BayesOpt+GP</td><td>-</td><td>1.92</td><td>2.25</td></tr><tr><td>Comet Loading</td><td>8.43</td><td>8.43</td><td>11.27</td></tr><tr><td>Distilled-S</td><td>-</td><td>-</td><td>11.11</td></tr><tr><td>CometKiwi</td><td>274.87</td><td>188.39</td><td>146.33</td></tr><tr><td>Total</td><td>984.68</td><td>901.36</td><td>873.58</td></tr></table>
|
| 247 |
+
|
| 248 |
+
Table 2: Runtimes for the full reranking baseline (AllComet), BayesOpt+GP, and BayesOpt+GP+P with Distilled-S as proxy score at settings where CometKiwi scores are roughly equal. Time given in seconds per 350 instances.
|
| 249 |
+
|
| 250 |
+
# 4.5 Batch size $k$ in BayesOpt+GP
|
| 251 |
+
|
| 252 |
+
We examine the effect of batch size $k$ in BayesOpt+GP for $k = 1,2,5,10$ . Figure 5 shows that as expected, larger $k$ diminishes performance, although the differences nearly vanish at $n > 70$ .
|
| 253 |
+
|
| 254 |
+
$k$ impacts how often the BayesOpt loop is run and thus has a large effect on speed. Fortunately, we observe for sufficiently large $n$ , $k$ can be increased without sacrificing quality.
|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
Figure 5: Difference between BayesOpt+GP with batch size of 1 (top line in red in Figure 2) and BayesOpt+GP with higher batch sizes. Negative values mean that higher batch size performed worse than BayesOpt+GP with batch size of 1.
|
| 258 |
+
|
| 259 |
+
# 5 Conclusion
|
| 260 |
+
|
| 261 |
+
In this work, we formalize MT reranking as a Bayesian optimization problem, leveraging the ba
|
| 262 |
+
|
| 263 |
+
sic observation that similar translations are more likely to have similar quality scores. We also extend the framework to accept observations from proxy scoring functions, which is applicable when the target score is very costly: large QE models, MBR, or human evaluation. In realistic experiments, we show that our methods improve reranking efficiency over strong baselines. We also propose several design choices that make the methods useful in practice; a GP kernel that requires minimal overhead, and effective proxy model training via distillation.
|
| 264 |
+
|
| 265 |
+
We consider our work a first step in applying BayesOpt to MT reranking. Future directions include integrating BayesOpt with candidate generation, alternative acquisition functions, and further exploration of GP kernels for MT.
|
| 266 |
+
|
| 267 |
+
# 6 Limitations
|
| 268 |
+
|
| 269 |
+
The optimization problem considered in this work is to maximize score from a scoring model. We show that BayesOpt is an effective optimizer, but we do not explore to what extent the optimization problem is flawed due to flaws in the scoring model. We refer to Kocmi et al. (2024b) to understand what magnitude of score difference between systems is significant. However, the existence of "metric overfitting" when directly optimizing an evaluation metric is debated and may affect the interpretation of score differences (Fernandes et al., 2022; Wang et al., 2024).
|
| 270 |
+
|
| 271 |
+
BayesOpt+GP requires matrix inversion, a $\mathcal{O}(|\mathcal{C}|^3)$ operation that is performed once per iteration. While it is inexpensive for the $|\mathcal{C}|$ we consider, this limits the number of observations that can be used for posterior computation without resorting to approximations (Noack et al., 2023).
|
| 272 |
+
|
| 273 |
+
As an iterative algorithm, BayesOpt can score no more than $k$ candidates in a batch for a single instance. Small batch sizes introduce a significant bottleneck for large neural networks, so in order to maintain large batch sizes, we propose processing multiple instances in parallel. However, this requires additional engineering.
|
| 274 |
+
|
| 275 |
+
# Acknowledgements
|
| 276 |
+
|
| 277 |
+
Julius Cheng is supported by a scholarship from Huawei. Part of this work received support from the European Union's Horizon research and innovation programme under grant agreement No 101135798, project Meetween (My Personal AI
|
| 278 |
+
|
| 279 |
+
Mediator for Virtual MEETtings BetWEEN People. We thank the organizers of MT Marathon 2024, where the authors met and this work was conceived. We also thank Beni Egressy for useful discussions and Will Tebbutt for lending expertise on GPs.
|
| 280 |
+
|
| 281 |
+
# References
|
| 282 |
+
|
| 283 |
+
Daniel Beck and Trevor Cohn. 2017. Learning kernels over strings using Gaussian processes. In Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 2: Short Papers), 67-73. Asian Federation of Natural Language Processing.
|
| 284 |
+
Daniel Beck, Trevor Cohn, and Lucia Specia. 2014. Joint emotion analysis via multi-task Gaussian processes. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), 1798-1803. Association for Computational Linguistics.
|
| 285 |
+
Daniel Beck, Kashif Shah, Trevor Cohn, and Lucia Specia. 2013. SHEF-Lite: When less is more for translation quality estimation. In Proceedings of the Eighth Workshop on Statistical Machine Translation, 337-342. Association for Computational Linguistics.
|
| 286 |
+
James Bergstra, Rémi Bardenet, Yoshua Bengio, and Balázs Kégl. 2011. Algorithms for hyper-parameter optimization. In Advances in Neural Information Processing Systems, volume 24. Curran Associates, Inc.
|
| 287 |
+
Sumanta Bhattacharyya, Amirmohammad Rooshenas, Subhajit Naskar, Simeng Sun, Mohit Iyyer, and Andrew McCallum. 2021. Energy-based reranking: Improving neural machine translation using energy-based models. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), 4528-4537. Association for Computational Linguistics.
|
| 288 |
+
Eugene Charniak and Mark Johnson. 2005. Coarse-to-fine n-best parsing and MaxEnt discriminative reranking. In Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL '05), 173-180. Association for Computational Linguistics.
|
| 289 |
+
Lingjiao Chen, Matei Zaharia, and James Zou. 2023. Frugalgpt: How to use large language models while reducing cost and improving performance.
|
| 290 |
+
Julius Cheng and Andreas Vlachos. 2023. Faster minimum Bayes risk decoding with confidence-based pruning. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 12473-12480. Association for Computational Linguistics.
|
| 291 |
+
|
| 292 |
+
Michael Collins and Terry Koo. 2005. Discriminative reranking for natural language parsing. Computational Linguistics, 31(1):25-70.
|
| 293 |
+
Andrew R. Conn, Katya Scheinberg, and Luis N. Vicente. 2009. Introduction to Derivative-Free Optimization. Society for Industrial and Applied Mathematics.
|
| 294 |
+
Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishray Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettle-moyer, and Veselin Stoyanov. 2019. Unsupervised cross-lingual representation learning at scale. CoRR, abs/1911.02116.
|
| 295 |
+
Hiroyuki Deguchi, Yusuke Sakai, Hidetaka Kamigaito, Taro Watanabe, Hideki Tanaka, and Masao Utiyama. 2024. Centroid-based efficient minimum Bayes risk decoding. In Findings of the Association for Computational Linguistics ACL 2024, 11009-11018, Bangkok, Thailand and virtual meeting. Association for Computational Linguistics.
|
| 296 |
+
Bryan Eikema and Wilker Aziz. 2020. Is MAP decoding all you need? the inadequacy of the mode in neural machine translation. In Proceedings of the 28th International Conference on Computational Linguistics, 4506-4520. International Committee on Computational Linguistics.
|
| 297 |
+
Bryan Eikema and Wilker Aziz. 2022. Sampling-based approximations to minimum Bayes risk decoding for neural machine translation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, 10978-10993. Association for Computational Linguistics.
|
| 298 |
+
Patrick Fernandes, Antonio Farinhas, Ricardo Rei, José G. C. de Souza, Perez Ogayo, Graham Neubig, and Andre Martins. 2022. Quality-aware decoding for neural machine translation. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 1396-1412. Association for Computational Linguistics.
|
| 299 |
+
Mara Finkelstein, Subhajit Naskar, Mehdi Mirzazadeh, Apurva Shah, and Markus Freitag. 2024. MBR and QE finetuning: Training-time distillation of the best and most expensive decoding methods.
|
| 300 |
+
Markus Freitag, Behrooz Ghorbani, and Patrick Fernandes. 2023a. Epsilon sampling rocks: Investigating sampling strategies for minimum Bayes risk decoding for machine translation. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, 9198–9209. Association for Computational Linguistics.
|
| 301 |
+
Markus Freitag, David Grangier, Qijun Tan, and Bowen Liang. 2022. High quality rather than high model probability: Minimum Bayes risk decoding with neural metrics. Transactions of the Association for Computational Linguistics, 10:811-825.
|
| 302 |
+
|
| 303 |
+
Markus Freitag, Nitika Mathur, Chi-kiu Lo, Eleftherios Avramidis, Ricardo Rei, Brian Thompson, Tom Kocmi, Frederic Blain, Daniel Deutsch, Craig Stewart, Chrysoula Zerva, Sheila Castilho, Alon Lavie, and George Foster. 2023b. Results of WMT23 metrics shared task: Metrics might be guilty but references are not innocent. In Proceedings of the Eighth Conference on Machine Translation, 578-628. Association for Computational Linguistics.
|
| 304 |
+
Philipp Hennig and Christian J. Schuler. 2011. Entropy search for information-efficient global optimization. ArXiv, abs/1112.1217.
|
| 305 |
+
John Hewitt, Christopher Manning, and Percy Liang. 2022. Truncation sampling as language model desmoothing. In *Findings of the Association for Computational Linguistics: EMNLP* 2022, 3414-3427. Association for Computational Linguistics.
|
| 306 |
+
Donald R. Jones. 2001. A taxonomy of global optimization methods based on response surfaces. Journal of Global Optimization, 21:345-383.
|
| 307 |
+
Juraj Juraska, Mara Finkelstein, Daniel Deutsch, Aditya Siddhant, Mehdi Mirzazadeh, and Markus Freitag. 2023. MetricX-23: The Google submission to the WMT 2023 metrics shared task. In Proceedings of the Eighth Conference on Machine Translation, 756–767. Association for Computational Linguistics.
|
| 308 |
+
Kirthevasan Kandasamy, Gautam Dasarathy, Junier B Oliva, Jeff Schneider, and Barnabas Poczos. 2016. Gaussian process bandit optimisation with multifidelity evaluations. In Advances in Neural Information Processing Systems, volume 29. Curran Associates, Inc.
|
| 309 |
+
Tom Kocmi, Eleftherios Avramidis, Rachel Bawden, Ondrej Bojar, Anton Dvorkovich, Christian Federmann, Mark Fishel, Markus Freitag, Thamme Gowda, Roman Grundkiewicz, Barry Haddow, Marzena Karpinska, Philipp Koehn, Benjamin Marie, Christof Monz, Kenton Murray, Masaaki Nagata, Martin Popel, Maja Popovic, Mariya Shmatova, Steinthor Steingrímsson, and Vilém Zouhar. 2024a. Findings of the WMT24 general machine translation shared task: The LLM era is here but MT is not solved yet. In Proceedings of the Ninth Conference on Machine Translation, 1-46, Miami, Florida, USA. Association for Computational Linguistics.
|
| 310 |
+
Tom Kocmi, Rachel Bawden, Ondrej Bojar, Anton Dvorkovich, Christian Federmann, Mark Fishel, Thamme Gowda, Yvette Graham, Roman Grundkiewicz, Barry Haddow, Rebecca Knowles, Philipp Koehn, Christof Monz, Makoto Morishita, Masaaki Nagata, Toshiaki Nakazawa, Michal Novak, Martin Popel, and Maja Popovic. 2022. Findings of the 2022 conference on machine translation (WMT22). In Proceedings of the Seventh Conference on Machine Translation (WMT), 1-45. Association for Computational Linguistics.
|
| 311 |
+
|
| 312 |
+
Tom Kocmi, Vilém Zouhar, Christian Federmann, and Matt Post. 2024b. Navigating the metrics maze: Reconciling score magnitudes and accuracies. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 1999-2014, Bangkok, Thailand. Association for Computational Linguistics.
|
| 313 |
+
Andreas Krause, Ajit Singh, and Carlos Guestrin. 2008. Near-optimal sensor placements in gaussian processes: Theory, efficient algorithms and empirical studies. J. Mach. Learn. Res., 9:235-284.
|
| 314 |
+
Ann Lee, Michael Auli, and Marc'Aurelio Ranzato. 2021. Discriminative reranking for neural machine translation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume I: Long Papers), 7250-7264. Association for Computational Linguistics.
|
| 315 |
+
Jonas Mockus. 1974. On bayesian methods for seeking the extremum. In Proceedings of the IFIP Technical Conference, 400-404.
|
| 316 |
+
Marcus M. Noack, Harinarayan Krishnan, Mark D. Risser, and Kristofer G. Reyes. 2023. Exact gaussian processes for massive datasets via non-stationary sparsity-discovering kernels. Scientific Reports, 13(1).
|
| 317 |
+
Slav Petrov. 2011. Coarse-to-Fine Natural Language Processing (Theory and Applications of Natural Language Processing). Springer Publishing Company, Incorporated.
|
| 318 |
+
C.E. Rasmussen and C.K.I. Williams. 2005. Gaussian Processes for Machine Learning. Adaptive Computation and Machine Learning series. MIT Press.
|
| 319 |
+
Mathieu Ravaut, Shafiq Joty, and Nancy Chen. 2022. SummaReranker: A multi-task mixture-of-experts re-ranking framework for abstractive summarization. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 4504-4524. Association for Computational Linguistics.
|
| 320 |
+
Ricardo Rei, Ana C Farinha, José G.C. de Souza, Pedro G. Ramos, André F.T. Martins, Luisa Coheur, and Alon Lavie. 2022a. Searching for COMETINHO: The little metric that could. In Proceedings of the 23rd Annual Conference of the European Association for Machine Translation, 61-70. European Association for Machine Translation.
|
| 321 |
+
Ricardo Rei, Jose Pombal, Nuno M. Guerreiro, João Alves, Pedro Henrique Martins, Patrick Fernandes, Helena Wu, Tania Vaz, Duarte Alves, Amin Farajian, Sweta Agrawal, Antonio Farinhas, José G. C. De Souza, and André Martins. 2024. Tower v2: Unbabel-IST 2024 submission for the general MT shared task. In Proceedings of the Ninth Conference on Machine Translation, 185–204, Miami, Florida, USA. Association for Computational Linguistics.
|
| 322 |
+
|
| 323 |
+
Ricardo Rei, Craig Stewart, Ana C Farinha, and Alon Lavie. 2020. COMET: A neural framework for MT evaluation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), 2685-2702. Association for Computational Linguistics.
|
| 324 |
+
Ricardo Rei, Marcos Treviso, Nuno M. Guerreiro, Chrysoula Zerva, Ana C Farinha, Christine Maroti, José G. C. de Souza, Taisiya Glushkova, Duarte Alves, Luisa Coheur, Alon Lavie, and André F. T. Martins. 2022b. CometKiwi: IST-unbabel 2022 submission for the quality estimation shared task. In Proceedings of the Seventh Conference on Machine Translation (WMT), 634-645. Association for Computational Linguistics.
|
| 325 |
+
Thibault Sellam, Dipanjan Das, and Ankur Parikh. 2020. BLEURT: Learning robust metrics for text generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, 7881-7892. Association for Computational Linguistics.
|
| 326 |
+
Bobak Shahriari, Kevin Swersky, Ziyun Wang, Ryan P. Adams, and Nando de Freitas. 2016. Taking the human out of the loop: A review of bayesian optimization. Proceedings of the IEEE, 104:148-175.
|
| 327 |
+
Prasann Singhal, Jiacheng Xu, Xi Ye, and Greg Durrett. 2023. EEL: Efficiently encoding lattices for reranking. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 9299-9316. Association for Computational Linguistics.
|
| 328 |
+
Niranjan Srinivas, Andreas Krause, Sham M. Kakade, and Matthias W. Seeger. 2009. Information-theoretic regret bounds for gaussian process optimization in the bandit setting. IEEE Transactions on Information Theory, 58:3250-3265.
|
| 329 |
+
NLLB Team, Marta R. Costa-jussà, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. 2022. No language left behind: Scaling human-centered machine translation.
|
| 330 |
+
Firas Trabelsi, David Vilar, Mara Finkelstein, and Markus Freitag. 2024. Efficient minimum bayes risk decoding using low-rank matrix completion algorithms.
|
| 331 |
+
Jannis Vamvas and Rico Sennrich. 2024. Linear-time minimum Bayes risk decoding with reference aggregation. In Proceedings of the 62nd Annual Meeting
|
| 332 |
+
|
| 333 |
+
of the Association for Computational Linguistics (Volume 2: Short Papers), 790-801, Bangkok, Thailand. Association for Computational Linguistics.
|
| 334 |
+
Giorgos Vernikos and Andrei Popescu-Belis. 2024. Don't rank, combine! combining machine translation hypotheses using quality estimation. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 12087-12105, Bangkok, Thailand. Association for Computational Linguistics.
|
| 335 |
+
Jun Wang, Eleftheria Briakou, Hamid Dadkhahi, Rishabh Agarwal, Colin Cherry, and Trevor Cohn. 2024. Don't throw away data: Better sequence knowledge distillation.
|
| 336 |
+
Wenhui Wang, Furu Wei, Li Dong, Hangbo Bao, Nan Yang, and Ming Zhou. 2020. MiniLM: Deep self-attention distillation for task-agnostic compression of pre-trained transformers.
|
| 337 |
+
Jian Wu, Saul Toscano-Palmerin, Peter I. Frazier, and Andrew Gordon Wilson. 2020. Practical multifidelity bayesian optimization for hyperparameter tuning. In Proceedings of The 35th Uncertainty in Artificial Intelligence Conference, volume 115 of Proceedings of Machine Learning Research, 788-798. PMLR.
|
| 338 |
+
Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser, Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa, Alex Rudnick, Oriol Vinyls, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2016. Google's neural machine translation system: Bridging the gap between human and machine translation.
|
| 339 |
+
Kyra Yee, Yann Dauphin, and Michael Auli. 2019. Simple and effective noisy channel modeling for neural machine translation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), 5696-5701. Association for Computational Linguistics.
|
| 340 |
+
Chrysoula Zerva, Frédéric Blain, Ricardo Rei, Piyawat Lertvittayakumjorn, José G. C. de Souza, Steffen Eger, Diptesh Kanojia, Duarte Alves, Constantin Orasan, Marina Fomicheva, André F. T. Martins, and Lucia Specia. 2022. Findings of the WMT 2022 shared task on quality estimation. In Proceedings of the Seventh Conference on Machine Translation (WMT), 69-99. Association for Computational Linguistics.
|
| 341 |
+
|
| 342 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Figure</td><td rowspan="2">10</td><td rowspan="2">20</td><td rowspan="2">30</td><td rowspan="2">40</td><td colspan="6">CometKiwi runs</td></tr><tr><td>50</td><td>60</td><td>70</td><td>80</td><td>90</td><td>100</td></tr><tr><td>UniqRandom</td><td>2</td><td>0.7917</td><td>0.8022</td><td>0.8074</td><td>0.8104</td><td>0.8124</td><td>0.8140</td><td>0.8149</td><td>0.8160</td><td>0.8168</td><td>0.8175</td></tr><tr><td>LogprobAvg</td><td>2</td><td>0.7956</td><td>0.8055</td><td>0.8101</td><td>0.8129</td><td>0.8149</td><td>0.8162</td><td>0.8171</td><td>0.8181</td><td>0.8187</td><td>0.8193</td></tr><tr><td>LogprobSum</td><td>2</td><td>0.7519</td><td>0.7723</td><td>0.7834</td><td>0.7913</td><td>0.7974</td><td>0.8019</td><td>0.8051</td><td>0.8081</td><td>0.8109</td><td>0.8125</td></tr><tr><td>HillClimbing</td><td>2</td><td>0.7917</td><td>0.8080</td><td>0.8124</td><td>0.8148</td><td>0.8165</td><td>0.8176</td><td>0.8184</td><td>0.8191</td><td>0.8196</td><td>0.8200</td></tr><tr><td>ProxyFirst 200 Distilled-S</td><td>3</td><td>0.8081</td><td>0.8141</td><td>0.8167</td><td>0.8181</td><td>0.8190</td><td>0.8197</td><td>0.8202</td><td>0.8206</td><td>0.8208</td><td>0.8210</td></tr><tr><td>ProxyFirst 200 Distilled-M</td><td>3</td><td>0.8119</td><td>0.8165</td><td>0.8184</td><td>0.8194</td><td>0.8201</td><td>0.8206</td><td>0.8209</td><td>0.8211</td><td>0.8212</td><td>0.8213</td></tr><tr><td>ProxyFirst 50 Distilled-S</td><td>3</td><td>0.8054</td><td>0.8100</td><td>0.8114</td><td>0.8121</td><td>0.8124</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>ProxyFirst 50 Distilled-M</td><td>3</td><td>0.8073</td><td>0.8107</td><td>0.8119</td><td>0.8122</td><td>0.8124</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>BayesOpt+GP</td><td>2,4,3</td><td>0.7917</td><td>0.8121</td><td>0.8167</td><td>0.8190</td><td>0.8201</td><td>0.8206</td><td>0.8210</td><td>0.8212</td><td>0.8213</td><td>0.8214</td></tr><tr><td>BayesOpt+GP+P with LogprobAvg</td><td>4</td><td>0.7956</td><td>0.8123</td><td>0.8166</td><td>0.8187</td><td>0.8198</td><td>0.8205</td><td>0.8208</td><td>0.8210</td><td>0.8213</td><td>0.8214</td></tr><tr><td>BayesOpt+GP+P with 200 Distilled-S</td><td>4,3</td><td>0.8081</td><td>0.8165</td><td>0.8190</td><td>0.8200</td><td>0.8207</td><td>0.8210</td><td>0.8212</td><td>0.8213</td><td>0.8214</td><td>0.8215</td></tr><tr><td>BayesOpt+GP+P with 200 Distilled-M</td><td>4,3</td><td>0.8119</td><td>0.8182</td><td>0.8199</td><td>0.8205</td><td>0.8209</td><td>0.8211</td><td>0.8213</td><td>0.8214</td><td>0.8215</td><td>0.8215</td></tr><tr><td>BayesOpt+GP+P with 50 Distilled-S</td><td>4,3</td><td>0.8054</td><td>0.8153</td><td>0.8184</td><td>0.8196</td><td>0.8204</td><td>0.8208</td><td>0.8210</td><td>0.8213</td><td>0.8214</td><td>0.8214</td></tr><tr><td>BayesOpt+GP+P with 50 Distilled-M</td><td>4,3</td><td>0.8073</td><td>0.8164</td><td>0.8187</td><td>0.8200</td><td>0.8207</td><td>0.8209</td><td>0.8211</td><td>0.8213</td><td>0.8214</td><td>0.8215</td></tr></table>
|
| 343 |
+
|
| 344 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Figure</td><td rowspan="2">110</td><td rowspan="2">120</td><td rowspan="2">130</td><td rowspan="2">140</td><td colspan="6">CometKiwi runs</td></tr><tr><td>150</td><td>160</td><td>170</td><td>180</td><td>190</td><td>200</td></tr><tr><td>UniqRandom</td><td>2</td><td>0.8182</td><td>0.8188</td><td>0.8192</td><td>0.8197</td><td>0.8200</td><td>0.8205</td><td>0.8208</td><td>0.8211</td><td>0.8214</td><td>0.8216</td></tr><tr><td>LogprobAvg</td><td>2</td><td>0.8199</td><td>0.8203</td><td>0.8205</td><td>0.8209</td><td>0.8211</td><td>0.8212</td><td>0.8213</td><td>0.8214</td><td>0.8216</td><td>0.8216</td></tr><tr><td>LogprobSum</td><td>2</td><td>0.8139</td><td>0.8156</td><td>0.8170</td><td>0.8180</td><td>0.8188</td><td>0.8196</td><td>0.8204</td><td>0.8209</td><td>0.8212</td><td>0.8216</td></tr><tr><td>HillClimbing</td><td>2</td><td>0.8203</td><td>0.8206</td><td>0.8208</td><td>0.8209</td><td>0.8211</td><td>0.8213</td><td>0.8214</td><td>0.8215</td><td>0.8216</td><td>0.8216</td></tr><tr><td>BayesOpt+GP</td><td>2,4,3</td><td>0.8215</td><td>0.8215</td><td>0.8215</td><td>0.8216</td><td>0.8216</td><td>0.8216</td><td>0.8216</td><td>0.8216</td><td>0.8216</td><td>0.8216</td></tr><tr><td>BayesOpt+GP+P with LogprobAvg</td><td>4</td><td>0.8214</td><td>0.8215</td><td>0.8215</td><td>0.8216</td><td>0.8216</td><td>0.8216</td><td>0.8216</td><td>0.8216</td><td>0.8216</td><td>0.8216</td></tr></table>
|
| 345 |
+
|
| 346 |
+
Table 3: Exact values (selected candidate score) for Figures 2 to 4.
|
| 347 |
+
|
| 348 |
+
# A Statistical Significance
|
| 349 |
+
|
| 350 |
+
We measure statistical significance between two methods based on the final candidate CometKiwi scores with either budget 30, 60, 90, or across the budget range from 10 to 190 in Table 4. To determine whether one method is better than another one, we use one-sided paired Student's t-test with p-value threshold 0.01 which is run across the individual samples.
|
| 351 |
+
|
| 352 |
+
<table><tr><td>Budget 30</td><td>UniqRandom</td><td>LogprobAvg</td><td>LogprobSum</td><td>HillClimbing</td><td>BayesOpt+GP</td><td>Budget 60</td><td>UniqRandom</td><td>LogprobAvg</td><td>LogprobSum</td><td>HillClimbing</td><td>BayesOpt+GP</td></tr><tr><td>LogprobAvg</td><td>←</td><td>←</td><td>↑</td><td>↑</td><td></td><td>LogprobAvg</td><td>←</td><td>←</td><td>↑</td><td>↑</td><td></td></tr><tr><td>LogprobSum</td><td>↑</td><td>↑</td><td></td><td>↑</td><td>↑</td><td>LogprobSum</td><td>↑</td><td>↑</td><td></td><td>↑</td><td>↑</td></tr><tr><td>HillClimbing</td><td>←</td><td>←</td><td>←</td><td></td><td>↑</td><td>HillClimbing</td><td>←</td><td>←</td><td>←</td><td></td><td>↑</td></tr><tr><td>ProxyFirst 200 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td><td>ProxyFirst 200 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>↑</td><td></td></tr><tr><td>ProxyFirst 200 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td>←</td><td>ProxyFirst 200 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr><tr><td>ProxyFirst 50 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>↑</td><td>↑</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>ProxyFirst 50 Distilled-M</td><td>←</td><td>←</td><td>←</td><td></td><td>↑</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BayesOpt+GP</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td><td>BayesOpt+GP</td><td>←</td><td>←</td><td>←</td><td></td><td></td></tr><tr><td>BayesOpt+GP+P with LogprobAvg</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td><td>BayesOpt+GP+P with LogprobAvg</td><td>←</td><td>←</td><td>←</td><td></td><td></td></tr><tr><td>BayesOpt+GP+P with 200 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>←</td><td>←</td><td>BayesOpt+GP+P with 200 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr><tr><td>BayesOpt+GP+P with 200 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td>←</td><td>BayesOpt+GP+P with 200 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr><tr><td>BayesOpt+GP+P with 50 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>←</td><td>←</td><td>BayesOpt+GP+P with 50 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr><tr><td>BayesOpt+GP+P with 50 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td>←</td><td>BayesOpt+GP+P with 50 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr></table>
|
| 353 |
+
|
| 354 |
+
<table><tr><td>Budget 90</td><td>UniqRandom</td><td>LogprobAvg</td><td>LogprobSum</td><td>HillClimbing</td><td>BayesOpt+GP</td><td>Across budgets 10 to 190</td><td>UniqRandom</td><td>LogprobAvg</td><td>LogprobSum</td><td>HillClimbing</td><td>BayesOpt+GP</td></tr><tr><td>LogprobAvg</td><td>←</td><td>←</td><td>↑</td><td>↑</td><td></td><td>LogprobAvg</td><td>←</td><td>←</td><td>↑</td><td>↑</td><td></td></tr><tr><td>LogprobSum</td><td>↑</td><td>↑</td><td>↑</td><td>↑</td><td></td><td>LogprobSum</td><td>↑</td><td>↑</td><td>↑</td><td>↑</td><td></td></tr><tr><td>HillClimbing</td><td>←</td><td>←</td><td>←</td><td>↑</td><td></td><td>HillClimbing</td><td>←</td><td>←</td><td>←</td><td>↑</td><td></td></tr><tr><td>ProxyFirst 200 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>←</td><td>↑</td><td>ProxyFirst 200 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr><tr><td>ProxyFirst 200 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td><td>ProxyFirst 200 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr><tr><td>BayesOpt+GP+P</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td><td>BayesOpt+GP</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr><tr><td>BayesOpt+GP+P with LogprobAvg</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td><td>BayesOpt+GP+P with LogprobAvg</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr><tr><td>BayesOpt+GP+P with 200 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td><td>BayesOpt+GP+P with 200 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr><tr><td>BayesOpt+GP+P with 200 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td>←</td><td>BayesOpt+GP+P with 200 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr><tr><td>BayesOpt+GP+P with 50 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td><td>BayesOpt+GP+P with 50 Distilled-S</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr><tr><td>BayesOpt+GP+P with 50 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td><td>BayesOpt+GP+P with 50 Distilled-M</td><td>←</td><td>←</td><td>←</td><td>←</td><td></td></tr></table>
|
| 355 |
+
|
| 356 |
+
Table 4: Statistical significance comparison between proposed methods across various CometKiwi calls budgets. Within a cell, $\uparrow$ means that the column method (in header) is statistically significantly better than the row method and $\leftarrow$ means the opposite. If a cell is empty, none of the methods are significantly better than the other one. For example, in Budget 30 (top left) table, in third row and first column, $\leftarrow$ means that HillClimbing is significantly better than UniqRandom in the setup of budget of 30.
|
abayesianoptimizationapproachtomachinetranslationreranking/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fb46971632944abf8c8f704bd19ec1c426f5166c58cc2e46843ea6d911201716
|
| 3 |
+
size 711131
|
abayesianoptimizationapproachtomachinetranslationreranking/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:54ea3762649a62cb49e1757e875468dba691b7497326a6c08950cfdea9b85b23
|
| 3 |
+
size 501217
|
accessabenchmarkforabstractcausaleventdiscoveryandreasoning/dba615fd-e933-40db-b521-4c7b6a606e65_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1bed62e282f7f73a4290f7efeaceeebb0b181a99edd9039317cc20ea6c1f397e
|
| 3 |
+
size 148958
|
accessabenchmarkforabstractcausaleventdiscoveryandreasoning/dba615fd-e933-40db-b521-4c7b6a606e65_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8fb33713fce386e10e2cae54a565a7c88674e2c8f1e75cbccc7c7e8d1a2a0f67
|
| 3 |
+
size 185112
|
accessabenchmarkforabstractcausaleventdiscoveryandreasoning/dba615fd-e933-40db-b521-4c7b6a606e65_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10835e9d153ee3d49e33d3b6dba01c61e08cfd27c979c2988bb7ff7ac722c31c
|
| 3 |
+
size 537632
|
accessabenchmarkforabstractcausaleventdiscoveryandreasoning/full.md
ADDED
|
@@ -0,0 +1,556 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ACCESS : A Benchmark for Abstract Causal Event Discovery and Reasoning
|
| 2 |
+
|
| 3 |
+
Vy Vo Lizhen Qu Tao Feng Yuncheng Hua Xiaoxi Kang Songhai Fan Tim Dwyer Lay-Ki Soon Gholamreza Haffari Monash University, Australia {firstname_lastname}@monash.edu
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Identifying cause-and-effect relationships is critical to understanding real-world dynamics and ultimately causal reasoning. Existing methods for identifying event causality in NLP, including those based on Large Language Models (LLMs), exhibit difficulties in out-of-distribution settings due to the limited scale and heavy reliance on lexical cues within available benchmarks. Modern benchmarks, inspired by probabilistic causal inference, have attempted to construct causal graphs of events as a robust representation of causal knowledge, where CRAB (Romanou et al., 2023) is one such recent benchmark along this line. In this paper, we introduce ACCESS, a benchmark designed for discovery and reasoning over abstract causal events. Unlike existing resources, ACCESS focuses on causality of everyday life events on the abstraction level. We propose a pipeline for identifying abstractions for event generalizations from GLUCOSE (Mostafazadeh et al., 2020), a large-scale dataset of implicit commonsense causal knowledge, from which we subsequently extract 1,4K causal pairs. Our experiments highlight the ongoing challenges of using statistical methods and/or LLMs for automatic abstraction identification and causal discovery in NLP. Nonetheless, we demonstrate that the abstract causal knowledge provided in ACCESS can be leveraged for enhancing QA reasoning performance in LLMs.
|
| 8 |
+
|
| 9 |
+
# 1 Introduction
|
| 10 |
+
|
| 11 |
+
Commonsense causal reasoning plays a vital role in developing a mental model of reality, where the ability to discover, explain and predict causal relations between events or forces in the environment is fundamental to human planning and control (Johnson-Laird and Khemlani, 2017; Griffiths, 2017). Cognitive science studies further suggest that event causality is critical to human understanding of narratives (Van den Broek et al., 1996; Fletcher and Bloom, 1988; Tillman et al., 2020;
|
| 12 |
+
|
| 13 |
+
Sun et al., 2023), and story events with more causal relations tend to be better memorized than those with fewer relations (Graesser et al., 2003). Humans are able to construct a causal mental model of events after reading a set of stories (Zwaan et al., 1995). For example, in Figure 1, a reader would easily identify a causal relation between $e1$ : "A person needs money." and $e2$ : "A person gets a job." by abstracting away concrete details, such as mentions of particular entities, grouping linguistic variations of the same meanings, and observing that $e1$ almost always leads to $e2$ in multiple stories, without explicit presence of lexical cues (e.g. because) in text. Thus, this paper focuses on investigating to what extent LLMs can identify causal relations without relying on linguistic cues and perform causal reasoning over commonsense knowledge on the abstraction level.
|
| 14 |
+
|
| 15 |
+
Prior works on causal relation extraction heavily rely on linguistic cues, e.g. because of, by, due to, to discern causal relations between event mentions and cause/effect text spans within a text (Wolff and Song, 2003; Mirza and Tonelli, 2014). In contrast, statistical causal discovery methods for event causality do not require linguistic cues but exploit statistical information of symbolic representations of events (Pearl and Mackenzie, 2018). As a result, those approaches are able to find causal relations even when they are not explicitly mentioned anywhere in texts. Therefore, there has been criticism regarding the susceptibility of these causal relation extraction models to exploit the linguistic cues to attain high performance without engaging in actual causal reasoning (Yang et al., 2022; Li et al., 2022). Ample of causal relation extraction datasets, including TempEval-3 (Mirza et al., 2014), CATENA (Mirza and Tonelli, 2016), Causal-TimeBank (Mirza and Tonelli, 2014), BECauSE (Dunietz et al., 2015, 2017) and Event StoryLine Corpus (Caselli and Vossen, 2017), are not suitable for evaluating statis
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Abstract Event Extraction
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
Figure 1: Pipeline of abstract causal event discovery. An event is viewed from three hierarchical levels: mention (realization in a specific text corpus), generalization (conceptualization of the event's components) and abstraction (group of causally consistent generalizations). Given a collection of event mentions, Phase 1 produces a collection of abstractions $A, B, C$ that are mapped back to the original corpus to construct a suitable representation in Phase 2, such as a co-occurrence matrix. Causal discovery algorithms can then be employed to detect causal relations within the data, which may consider the contexts.
|
| 22 |
+
|
| 23 |
+
tical causal discovery methods, because mentions of causal events that are semantically similar but expressed in different linguistic forms are not mapped into the same symbols.
|
| 24 |
+
|
| 25 |
+
Humans identify grouping of semantic similar event mentions via abstraction, which omits concrete spurious details. It has also been suggested that effective causal reasoning requires models to learn suitable abstract representation of the world (Girju, 2003). Abstraction of events can well leverage causality theories (Pearl, 2009), which provide a theoretical underpinning for causal reasoning and formal analysis of causal relations. Given a set of random variables, theoretically grounded causal discovery algorithms (Vowels et al., 2022) use statistics collected from a dataset to construct causal graphs, which combine causal relations into logically coherent directed acyclic graphs (DAGs). In a causal graph, a node $v_{i}$ denotes a random variable, while an edge from $v_{i}$ to $v_{j}$ indicates $v_{i}$ is a direct cause of $v_{j}$ . To the best of our knowledge, none of the existing datasets, such as COPA (Roemmelo et al., 2011) and MAVEN-ERE (Wang et al., 2022), provide both abstraction of events grounded in a corpus and the corresponding causal graphs.
|
| 26 |
+
|
| 27 |
+
Existing knowledge graphs containing causal relations (Sap et al., 2019; Hwang et al., 2021; Hassanzadeh et al., 2022; Mbouadeu et al., 2023), cannot support evaluating both (1) event abstractions grounded in a corpus, and (2) construction of commonsense causal graphs from a collection of documents. The popular ATOMIC (Sap et al., 2019) in particular is harvested by crowd-sourcing so that similar events in ATOMIC are not grouped together and there is also no associated corpus containing all relevant event mentions. As a result, it is challenging to recover the grounded contexts and map events to random variables in order to apply statistical causal discovery. Two additional resources for constructing causal graphs from a collection of documents are CauseNet (Heindorf et al., 2020) and CRAB (Romanou et al., 2023). The causal relations from CauseNet are explicitly mentioned in texts while causal semantics can exist beyond lexical mentions. CRAB generates causal graphs from events extracted from online news articles. Neither CauseNet or CRAB provides abstraction of events or grouping of semantically equivalent events with linguistic variations. Therefore, the resulting graphs over fine-grained events can explode in size, posing
|
| 28 |
+
|
| 29 |
+
severe computational difficulties. Another related resource is GLUCOSE (Mostafazadeh et al., 2020), which translates natural language expressions of event mentions and their relations into generic inferential rules dependent on story contexts. In those rules, entities are mapped to generalized concepts and the same keywords are used for the same relations, such as causes. Although GLUCOSE lacks grouping of similar abstract events and the interconnection of relations into a logically coherent causal graph, our paper extends this dataset to construct abstract knowledge graphs of causality.
|
| 30 |
+
|
| 31 |
+
Contribution. We introduce ACCESS, a benchmark for AbstraCt Causal Event DiScovery and ReaSoning. We propose to explore event causality at the abstraction level as a more efficient representation of knowledge. We introduce a reusable pipeline (See Figure 1) to curate causal relations from a large corpus of stories of daily life events. The resulting benchmark, ACCESS, is a graphical modelling of causal relations of 725 event abstractions forming a base of abstract commonsense knowledge of causation. Our benchmark also includes annotations to evaluate each step of the pipeline. Using ACCESS, our experiments shed light on the ongoing challenges within the field.
|
| 32 |
+
|
| 33 |
+
Firstly, the application of statistical structure learning algorithms for full graph discovery remains highly challenging. Secondly, solely relying on LLMs and automatic clustering proves insufficient for adequate event abstraction. Thirdly, LLMs still struggle with pairwise non-contextual causal discovery, indicating a gap in their possession of complete commonsense causal knowledge. Lastly, incorporating abstract commonsense knowledge through a causal graph enhances Question Answering (QA) reasoning tasks in LLMs up to $20\%$ .
|
| 34 |
+
|
| 35 |
+
# 2 Causal Event Abstraction
|
| 36 |
+
|
| 37 |
+
We follow the definition of events provided in TimeML (Pustejovsky et al., 2003), ECB+ Annotation Guidelines (Cybulska and Vossen, 2014) and Event StoryLine Corpus (Caselli and Vossen, 2017). An event refers to any situation or state that happens or holds, which consists of four basic components: action/state, location, time and participant(s). We here consider location and time as optional; for instance, the sentence he goes to sleep is sufficiently an event. Each component of an event
|
| 38 |
+
|
| 39 |
+
is associated with a concept in an ontology. $^{1}$ A realization of a concept in the event is an event mention. An event abstraction is a tuple (action (state)/concept, participant/concept, time/concept, location/concept) shared among all mentions of that event, where each component is either an entity or a concept at an appropriate abstraction level. An event abstraction is itself an event and can be identified by replacing every component in its representation by a more abstract concept in the ontology. For example, His girlfriend [person] works [action] for Starbucks [location] on the weekends [time].
|
| 40 |
+
|
| 41 |
+
From another point of view, an event abstraction is a generalization of a cluster of event mentions that describe the same event. Two event mentions are equivalent if they are associated with the same event abstraction. An event abstraction is causally consistent w.r.t. a set of event mentions, if (1) none of its mention pairs at the semantic level contains a causal relationship, and (2) the semantics of all its mentions are either the cause or the effect of mentions in another event abstraction. Table 1 describes all the terms used in this paper and throughout the annotation process.
|
| 42 |
+
|
| 43 |
+
Definition of causation. Based on the counterfactual theory of causation (Lewis, 2013), an event $x$ is said to cause another event $y$ and event $y$ is said to be an effect of event $x$ if (1) event $y$ temporally follows event $x$ directly i.e., there are no intermediate events or if there is one, it must rarely occur, and (2) event $y$ would not commonly occur if event $x$ did not occur. It is worth noting that unlike such datasets as BECauSE (Dunietz et al., 2017) or CauseNet (Heindorf et al., 2020) that consider causality between concepts, here causality is defined on the event (sentence) level, which takes into account the interaction of multiple participants. In statistical causality literature, there exist 3 causal structures of interest: confounder, collider and mediator. For random variables $X, Y, Z$ ,
|
| 44 |
+
|
| 45 |
+
- $Z$ is a called confounder if it causes both $X$ and $Y$ , written as $X \gets Z \to Y$ ;
|
| 46 |
+
- $Z$ is a collider when $Z$ is a common child of $X$ and $Y$ but $X$ and $Y$ themselves are not related, written as $X \rightarrow Z \leftarrow Y$ ;
|
| 47 |
+
|
| 48 |
+
<table><tr><td>Terminology</td><td>Description</td></tr><tr><td>Event</td><td>Any situation, state or action that happens, occurs or holds. An event consists of four basic components: participant(s), action/state, location and time.</td></tr><tr><td>Event sentence</td><td>An English sentence describing an event in daily life. An event sentence must contain the participant(s) and action/state components while location and time are the optional components and should not influence the judgment of the meaning of the sentence.</td></tr><tr><td>Cluster</td><td>A group of sentences describing the same event.</td></tr><tr><td>Topic</td><td>An event that is unique to a particular cluster and sufficiently abstract to be described by all event sentences in that cluster.</td></tr><tr><td>Topic sentence</td><td>The English sentence describing the topic of a particular cluster.</td></tr><tr><td>Story</td><td>A description of a series of connected events.</td></tr></table>
|
| 49 |
+
|
| 50 |
+
Table 1: Terminologies of the ACCESS benchmark.
|
| 51 |
+
|
| 52 |
+
- $Z$ is a mediator if there is a chain $X$ causes $Z$ and $Z$ causes $Y$ , written as $X \to Z \to Y$ .
|
| 53 |
+
|
| 54 |
+
Quality criteria. We present the overarching criteria that guide our data construction process. These criteria aim to ensure that the event abstractions i.e., clusters of event mentions, in ACCESS achieve causal consistency:
|
| 55 |
+
|
| 56 |
+
1. Every cluster must be assigned with only one event abstraction.
|
| 57 |
+
2. All event mentions in each cluster must describe the same event and that event (abstraction) must be sufficiently abstract to cover all instances while being specific about the action taking place.
|
| 58 |
+
3. Every cluster must be in a cause-and-effect relation with at least one of the other clusters.
|
| 59 |
+
4. If there exists a causal relation between events at one level, the causal relation must hold at its higher levels of abstraction in the hierarchy. For example, a causal relation between events at the mention level must hold at the generalization and abstraction levels.
|
| 60 |
+
5. A cluster $A$ is said to cause another cluster $B$ if at least one event mentions in cluster $A$ causes any other event mentions in cluster $B$ , according to the above cause-effect definition.
|
| 61 |
+
|
| 62 |
+
# 3 The ACCESS Benchmark
|
| 63 |
+
|
| 64 |
+
ACCESS provides a graphical modelling of the cause-and-effect relations among event abstractions, where every node in the causal graph represents an event abstraction in the causal relation
|
| 65 |
+
|
| 66 |
+
between any two nodes is represented by an arrow going from the cause event abstraction to the effect event abstraction. There are 725 abstractions or clusters, each of which on average contains 7 instances, and in total associated with 9,513 stories in the GLUCOSE dataset. The graph also contains diverse causal structures for causal inference, including confounding, mediation and collider (Pearl, 2009). See Table 2 for examples of pairs of causal abstract events and Table 3 for the descriptive statistics of ACCESS.
|
| 67 |
+
|
| 68 |
+
Figure 1 illustrates our proposed pipeline for performing abstract causal event discovery and reasoning. The ACCESS dataset is constructed in the two phases: Phase (1) is to extract event abstractions from a collection of event mentions, by grouping mentions whose generalizations describing the same event in a way that the resulting abstraction satisfies the above quality criteria. Phase (2) is to identify the causal relations among these event abstractions. Both phases entail an alternation between using automatic algorithms for extracting candidate clusters/causal pairs and crowd-sourcing for refinement and quality control. We briefly describe each phase in the following sections. See Appendix B for more details on our crowd-sourcing pipeline and task descriptions.
|
| 69 |
+
|
| 70 |
+
# 3.1 Abstract Event Extraction
|
| 71 |
+
|
| 72 |
+
We now describe the process of curating these event mentions and extracting event abstractions. Our source of commonsense knowledge is GLUCOSE (Mostafazadeh et al., 2020), a large-scale dataset of over 670K stories with annotated causal relations. GLUCOSE also provides generalized inference rules mapped from specific statements, which cor
|
| 73 |
+
|
| 74 |
+
<table><tr><td></td><td colspan="2">Cause event</td><td>Effect event</td></tr><tr><td>Abstraction</td><td>Generalizations</td><td>Abstraction</td><td>Generalizations</td></tr><tr><td>a person</td><td>a person need money</td><td>a person get</td><td>a person take up a job</td></tr><tr><td>need money</td><td>a person need cash</td><td>a job</td><td>a person get a good job</td></tr><tr><td></td><td>a person need to get money</td><td></td><td>a person get a job at a place</td></tr><tr><td>a person win</td><td>a person win the contest</td><td>a person</td><td>a person be celebrate an occasion</td></tr><tr><td></td><td>a person win something</td><td>celebrate</td><td>a person have a celebration</td></tr><tr><td></td><td>a person end up winning</td><td></td><td>a person celebrate something</td></tr><tr><td>a person fall</td><td>a person fall down</td><td>a person feel pain</td><td>a person be in pain</td></tr><tr><td></td><td>a person fall to the floor</td><td></td><td>a person experience pain in a body part</td></tr><tr><td></td><td>a person fall on the ground</td><td></td><td>a person ’s body be in pain</td></tr></table>
|
| 75 |
+
|
| 76 |
+
Table 2: Examples of event causality on the abstraction and generalization level.
|
| 77 |
+
|
| 78 |
+
<table><tr><td colspan="2">Story corpus</td></tr><tr><td>Stories</td><td>9,513</td></tr><tr><td>Events</td><td>4,708</td></tr><tr><td colspan="2">Causal graph</td></tr><tr><td>Nodes (clusters / abstractions)</td><td>725</td></tr><tr><td>Edges (causal pairs)</td><td>1,494</td></tr><tr><td>Expected degree per node</td><td>4</td></tr><tr><td>Confounders</td><td>149</td></tr><tr><td>Mediators</td><td>368</td></tr><tr><td>Colliders</td><td>3,956</td></tr></table>
|
| 79 |
+
|
| 80 |
+
Table 3: General descriptive statistics of ACCESS.
|
| 81 |
+
|
| 82 |
+
respond to our concept of event mentions. We make use of the generalized expressions for our abstraction procedure and focus only on dimensions 1 and 6 of causal explanations: the direct effect. For simplicity, we will from now on refer to these generalizations as events.
|
| 83 |
+
|
| 84 |
+
Automatic extraction. Two or more event mentions must describe the same event to be clustered together. To describe the same event means they must be semantically related or similar. We initially apply standard text preprocessing and subsequently implement correlation clustering (Bansal et al., 2004; Charikar et al., 2005) to automatically group events with shared semantics. We adopt an algorithm akin to the PIVOT algorithm (Fukunaga, 2019) that aims to maximize the semantic similarity of events in each cluster. We propose to measure semantic similarity by two metrics: cosine similarity and paraphrasing likelihood. The pairwise
|
| 85 |
+
|
| 86 |
+
similarity of two expressions $x, y$ is given by
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\mathcal {S} _ {x y} = 0. 5 \times \left[ f _ {\text {c o s}} (x, y) + f _ {\text {p h r}} (x, y) \right], \tag {1}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
where $S_{xy} \in [0,1]$ , $f_{cos}$ returns the cosine similarity of the contextual embeddings of expressions $x, y$ , and $f_{phr}$ returns the probability events $x, y$ are paraphrases. If $x, y$ are causally related, based on the annotations in GLUCOSE, $S_{xy} = 0$ . The contextual embeddings are obtained from the pretrained all-miniLM-L6-v2 sentence Transformers (Reimers and Gurevych, 2020) while the paraphrasing likelihood is obtained from the pre-trained adversarial paraphrase detector by Nighojkar and Licato (2021).
|
| 93 |
+
|
| 94 |
+
Appendix C presents details of our clustering algorithm, which contains an ablation study against other popular clustering algorithms on unsupervised and supervised metrics to show that our PIVOT algorithm is preferable. In summary, the algorithm begins with a randomly chosen cause-effect pair of events as pivots. For each of these nodes, it finds the neighbors with which the similarity score exceeds $70\%$ . The process is repeated for the remaining events until all events are clustered. Events that do not belong to any clusters are temporarily discarded. To ensure causal consistency, we perform post-processing by splitting each cluster in a way that (1) no events in the same cluster are causally related, and (2) there exists either no or only one causal relation between any two clusters.
|
| 95 |
+
|
| 96 |
+
Human annotation. We then utilize 10 human annotators to assess the quality of cluster assignment as well as determine the abstract expression (or "topic" in laymen term) for each cluster. This involves five key steps. First, the annotators are required to perform sub-clustering out of the clusters
|
| 97 |
+
|
| 98 |
+
formed in the previous step. To strictly guarantee that events grouped together share the same semantics and maximize annotation consistency, we outline 11 scenarios where word uses convey differences in meaning. Next, for each newly formed subcluster, they are also asked to identify the "topic", which subsequently serves as an event abstraction. We then conduct three additional steps to resolve the disagreements in annotation as well as to handle the outlier events that are temporarily removed after the automatic procedure. Appendix B.1 details this annotation process.
|
| 99 |
+
|
| 100 |
+
# 3.2 Causal Relations Discovery
|
| 101 |
+
|
| 102 |
+
This phase aims to identify the causal relations among the abstract events extracted from the previous phase, based on both non-contextual and contextual commonsense knowledge.
|
| 103 |
+
|
| 104 |
+
Automatic causal discovery. To identify candidate causal pairs of event abstractions, we use a combination of existing annotated relations in GLUCOSE and statistical causal discovery methods. Regarding GLUCOSE, we determine the causal relation of two event abstractions (clusters) based on criterion #5 in the above list of quality criteria. Regarding statistical causal discovery, we construct a dataset where each observation is a document or story in the GLUCOSE corpus and each feature records the counts of occurrences (or mentions) of a cluster in a story. On this co-occurrence data matrix, we run the well-known PC algorithm<sup>3</sup> (Spirtes et al., 2000) to obtain more causal candidates, using G-squared and Chi-squared tests at $p$ -value of 0.01. Note that we intentionally avoid using NLP models for event causality identification to avoid potential biases from their training data.
|
| 105 |
+
|
| 106 |
+
Human annotation. We proceed with human annotation on the union of the causal candidates from the above step. There are 3 annotators participating in this task. They are asked to categorize each candidate causal pair $A$ and $B$ into three scenarios: $A$ causes $B$ , $B$ causes $A$ , or $A$ and $B$ have no relation. Initially, the workers are tasked with annotating the causal relations without considering contexts, that is to solely rely on their commonsense about the abstractions. Subsequently, we identify the causal pairs with no consensus from the three workers. We provide the story contexts in GLUCOSE associated with each of these pairs and ask them to reevaluate
|
| 107 |
+
|
| 108 |
+
their annotations. Out of 2,862 candidate pairs detected from GLUCOSE, $39.6\%$ of them are humanly annotated to be truly causal while that number is $61.5\%$ within PC candidates. The final relation of each pair is decided through majority voting. The inter-rate agreement score (Krippendorff's $\alpha$ ) is $77.2\%$ . See Appendix B.2 for details.
|
| 109 |
+
|
| 110 |
+
# 4 Experiments
|
| 111 |
+
|
| 112 |
+
In this section, we conduct empirical analyses to demonstrate how the ACCESS benchmark is used for evaluating (1) the effectiveness of automatic event abstraction and causal discovery approaches, and (2) how a causal structure assists reasoning models on causal QA tasks. All experimental results are averaged over 5 random running seeds. The codes and data for reproducing our experiments are published at github.com/isVy08/ACCESS.
|
| 113 |
+
|
| 114 |
+
# 4.1 Abstract Event Identification
|
| 115 |
+
|
| 116 |
+
For abstract causal discovery and reasoning, a practical question is how one can identify abstract events from real-world corpora where the groundtruth is unknown. Given the advances of LLMs, a promising approach to use LLMs to generate abstractions. In this experiment, we explore two approaches to automatically extract event abstraction with GPT-4o-mini, using Open AI's official API. We then use ACCESS as ground-truth to evaluate the quality of abstraction.
|
| 117 |
+
|
| 118 |
+
Generate abstract events in a Single Step. We have GPT-4o-mini directly generate the generalized expressions. We extract 9,495 event mentions from GLUCOSE and ask the model to generate two generalized versions for every instance, corresponding to the levels of generalization (level 1) and abstraction (level 2) described in Figure 1. We then compare the generated abstractions with the ground-true ones provided by GLUCOSE and ACCESS. The model achieves the BLEU score of 0.520. The prompt for this task can be found in Appendix E.
|
| 119 |
+
|
| 120 |
+
Identify abstract events in Two Steps. In the second approach<sup>5</sup>, we obtain the produced generalizations by GPT-4o-mini from the above step, then run automatic clustering to find the abstractions, following the setup in Section 3.1. For all instances in every output cluster, we retrieve the ground-true
|
| 121 |
+
|
| 122 |
+
clusters given by ACCESS and take the majority one as the predicted assignment. We measure the level of agreement between the predicted and the true assignment, using the Rand index (Steinley, 2004) and mutual information (Vinh et al., 2009).
|
| 123 |
+
|
| 124 |
+
In Appendix C, Table 8 provides detailed numerical results for various clustering algorithms in this experiment. In all cases, the agreement scores are well below 1.0 (perfect agreement). This indicates vanilla automatic clustering is inadequate in identifying useful abstractions. While choosing a good clustering algorithm remains important, we find that the quality of the input generalizations plays a more critical role in the performance. When we conduct the same experiments on the ground-true generalizations from GLUCOSE, all metrics are significantly improved by at least $28\%$ .
|
| 125 |
+
|
| 126 |
+
We further observe that on average, with generalizations from GPT-4o-mini, an output cluster has more than $40\%$ of its instances belonging to a different cluster from the predicted one, and based on the ground-truth, a cluster should be further divided into at least 2 sub-clusters to be considered correct. We find that the issue is mainly due to the fact the model produces over-generalized expressions, causing the clustering algorithm to form bigger clusters. For example, the mentions Amanda feels excited and He is scared are both generalized to A person feel an emotion while we consider be excited or be scared to refer to different states. Another example is the mention Tom works hard being one-step generalized to be A person do something, which arbitrarily can be applied to any expressions. This reveals the difficulty in controlling the granularity of abstractions using LLMs, which substantiates the necessity for the benchmarks on event generalization and abstraction.
|
| 127 |
+
|
| 128 |
+
# 4.2 Pairwise Causal Discovery
|
| 129 |
+
|
| 130 |
+
We now describe how the data provided in ACCESS can be used for the causal discovery task. In the main text, we discuss the pairwise causal discovery task in LLMs. We examine how well LLMs can discern pairwise causal relations between two abstract events. Formally, given a pair of events $x$ and $y$ , LLMs are asked to determine the relation between them by outputting one of the three possible relations: $x$ causes $y$ , $y$ causes $x$ , or no causal relation. In addition to the 1,494 causal relations in ACCESS, we also randomly generate 1,000 negative pairs to challenge the models. For our experiments, the LLMs
|
| 131 |
+
|
| 132 |
+
used are GPT-4o-mini, Llama3.2-3B-Instruct, Llama3.1-8B-Instruct and Llama2-chat-7B $^6$ . The output from these models is post-processed to extract the final relation. The prompts can be found in Table 9 of Appendix E.
|
| 133 |
+
|
| 134 |
+
The results are presented in Table 4, using Precision, Recall, and F1 score as evaluation metrics. We also report the performance of random choices and majority baseline, where the most frequent answer is selected for assessment based on the reference data. LLMs achieve fairly humble accuracies, where GPT-4o-mini achieves the best performance, second to which is Llama3.1-8B. It is worth noting that the task is non-contextual since the goal is to assess the models' capability of intuitive or commonsense causal reasoning. Such intuition in humans is typically shaped by our observations and experiences from everyday life, enabling us to quickly identify scenarios where the causal relationships often hold. For example, we intuitively understand that speeding can frequently result in being the person being fined by the police.
|
| 135 |
+
|
| 136 |
+
Appendix D later demonstrates how the ACCESS pipeline facilitates the application of statistical structure learning algorithms. These methods are currently shown to under-perform on our benchmark, suggesting that there remains a large gap between theoretically grounded causal discovery and event causality identification research in NLP.
|
| 137 |
+
|
| 138 |
+
<table><tr><td colspan="4">Causal Discovery on ACCESS</td></tr><tr><td></td><td>Precision ↑</td><td>Recall ↑</td><td>F1 ↑</td></tr><tr><td>GPT-4o-mini</td><td>0.705 ± .026</td><td>0.581 ± .028</td><td>0.559 ± .025</td></tr><tr><td>Llama3.2-3B</td><td>0.384 ± .015</td><td>0.364 ± .006</td><td>0.326 ± .007</td></tr><tr><td>Llama3.1-8B</td><td>0.437 ± .006</td><td>0.425 ± .006</td><td>0.413 ± .006</td></tr><tr><td>Llama2-7B</td><td>0.376 ± .006</td><td>0.359 ± .006</td><td>0.316 ± .007</td></tr><tr><td>Random</td><td>0.340 ± .008</td><td>0.330 ± .003</td><td>0.330 ± .008</td></tr><tr><td>Majority</td><td>0.114 ± .001</td><td>0.333 ± .001</td><td>0.170 ± .001</td></tr></table>
|
| 139 |
+
|
| 140 |
+
Table 4: Experiment results of causal discovery on ACCESS dataset. Precision, Recall, and F1 are computed under macro-average setting. Bold indicates best performance. $\uparrow$ Higher is better.
|
| 141 |
+
|
| 142 |
+
# 4.3 Reasoning with Causal Graphs
|
| 143 |
+
|
| 144 |
+
We now study how the causal graphs in ACCESS can be used to assist models in QA reasoning tasks. In connection with Section 4.2, this can essentially be viewed as a contextual causal discovery task. We construct a causal QA dataset from GLUCOSE, which provides a set of stories with annotated causal relations between events at both the mention and
|
| 145 |
+
|
| 146 |
+
QA Reasoning on GLUCOSE
|
| 147 |
+
|
| 148 |
+
<table><tr><td></td><td colspan="2">Specific QA</td><td colspan="2">Specific QA+</td><td colspan="2">Abstract QA+</td></tr><tr><td>zero-shot COT</td><td>Accuracy ↑</td><td>F1 ↑</td><td>Accuracy ↑</td><td>F1 ↑</td><td>Accuracy ↑</td><td>F1↑</td></tr><tr><td>GPT-4o-mini</td><td>0.790 ± .008</td><td>0.809 ± .006</td><td>0.719 ± .049</td><td>0.720 ± .019</td><td>0.561 ± .013</td><td>0.554 ± .013</td></tr><tr><td>GPT-4o-mini + CG</td><td>0.894 ± .067</td><td>0.887 ± .046</td><td>0.912 ± .012</td><td>0.887 ± .006</td><td>0.731 ± .047</td><td>0.686 ± .017</td></tr><tr><td>Llama3.2-3B</td><td>0.723 ± .006</td><td>0.501 ± .010</td><td>0.696 ± .022</td><td>0.487 ± .010</td><td>0.631 ± .006</td><td>0.524 ± .008</td></tr><tr><td>Llama3.2-3B + CG</td><td>0.803 ± .002</td><td>0.561 ± .014</td><td>0.754 ± .001</td><td>0.533 ± .002</td><td>0.725 ± .005</td><td>0.551 ± .004</td></tr><tr><td>Llama3.1-8B</td><td>0.883 ± .005</td><td>0.428 ± .003</td><td>0.833 ± .018</td><td>0.418 ± .010</td><td>0.794 ± .012</td><td>0.421 ± .004</td></tr><tr><td>Llama3.1-8B + CG</td><td>0.924 ± .008</td><td>0.556 ± .018</td><td>0.885 ± .002</td><td>0.521 ± .011</td><td>0.875 ± .001</td><td>0.524 ± .009</td></tr><tr><td>Llama2-7B</td><td>0.530 ± .013</td><td>0.509 ± .012</td><td>0.480 ± .003</td><td>0.553 ± .007</td><td>0.483 ± .015</td><td>0.537 ± .005</td></tr><tr><td>Llama2-7B + CG</td><td>0.681 ± .004</td><td>0.601 ± .009</td><td>0.635 ± .015</td><td>0.632 ± .010</td><td>0.692 ± .010</td><td>0.663 ± .009</td></tr></table>
|
| 149 |
+
|
| 150 |
+
Table 5: Experiment results of multi-choice causal reasoning on GLUCOSE-QA dataset. QA+ indicates the setting where the stories are paraphrased. +CG refers to the experiment that prompts the causal information from ACCESS.
|
| 151 |
+
Abstract QA+ on GLUCOSE
|
| 152 |
+
|
| 153 |
+
<table><tr><td>bi-level COT</td><td>Accuracy ↑</td><td>F1 ↑</td></tr><tr><td>Llama3.2-3B</td><td>0.722 ± .025</td><td>0.378 ± .004</td></tr><tr><td>Llama3.2-3B + CG</td><td>0.740 ± .025</td><td>0.410 ± .014</td></tr><tr><td>Llama3.1-8B</td><td>0.753 ± .011</td><td>0.430 ± .009</td></tr><tr><td>Llama3.1-8B + CG</td><td>0.813 ± .022</td><td>0.493 ± .018</td></tr><tr><td>Llama2-7B</td><td>0.503 ± .028</td><td>0.516 ± .007</td></tr><tr><td>Llama2-7B + CG</td><td>0.605 ± .018</td><td>0.623 ± .005</td></tr></table>
|
| 154 |
+
|
| 155 |
+
Table 6: Experiment results of multi-choice Abstract QA+ with bi-level COT prompting.
|
| 156 |
+
|
| 157 |
+
generalization levels. For every story, we create two alternative multi-choice questions about the cause and effect. We extract the sentences in the story as candidate answers. The event appears in the annotated causal pair is thus considered a correct answer. Furthermore, in some cases, there can exist multiple causes that co-occur and lead to an effect and vice versa. To address this, we have a human expert review the data to identify additional correct causal events. The judgment of causality is based on the same criteria of the annotation process described in Appendix B.2. It is worth highlighting again that we focus on direct causal relations, meaning that we do not consider events that result from/in some intermediate causes/effects. The resulting dataset contains 480 questions, some of which have multiple correct answers.
|
| 158 |
+
|
| 159 |
+
Since the LLMs have the tendency to exploit the textual cues, we additionally generate the paraphrases for each story while retaining the event choices in their original version. We label this extra setting as $\mathbf{QA}+$ . We also experiment with two variants of question design. In this first one, the cause/effect event of question occurs at the origi
|
| 160 |
+
|
| 161 |
+
nal mention level, whereas in the second one, the cause/effect event is transformed into its generalized version. For example, the question "What could be the cause of the event 'Amy gets a job in a bank'?" is replaced into "The story describes an event where 'a person gets a job'. What could be the cause of the event?". We label the two variants as Specific QA and Abstract QA respectively. To perform the second task, ideally the model should be able to first perform abstract reasoning, that is to map the generalized cause/effect event to its corresponding mention in the context, prior to retrieving the correct causal pair. Examples of this GLUCOSE-QA dataset are provided in Appendix E.
|
| 162 |
+
|
| 163 |
+
How ACCESS provides abstract causal information. To evaluate whether the causal abstract knowledge from ACCESS can help QA reasoning, we extend the above experiment by adding the causal relations between two relevant abstractions as an additional context in the prompt. In our experiments, the corresponding abstraction of an event mention can be retrieved directly from ACCESS. However, for an arbitrary QA dataset, this should be done via two subsequent steps: (1) perform abstraction of the event described in the target cause/effect and (2) map the output abstraction to at least one abstract event in ACCESS and retrieve the corresponding causal relations. Section 4.1 has described two possible approaches to step (1).
|
| 164 |
+
|
| 165 |
+
Results. Given a story and a causal question, we prompt LLMs to generate the answer from a set of provided candidates. We first adopt zero-shot chain-of-thought (COT) (Kojima et al., 2022) with the basic "let's think step by step" prompting. Given the Llama models are open-sourced, we consider
|
| 166 |
+
|
| 167 |
+
a bi-level COT dedicated to abstract QA tasks. In this approach, we provide a brief instruction on how to perform abstract causal reasoning, which entails two steps: the first one is for abstract reasoning, that is to identify the mention corresponding to the generalized cause/effect of question; the second step is for causal reasoning, that is to retrieve the corresponding effect/cause mentioned in the story context. See Appendix E for prompts and qualitative examples.
|
| 168 |
+
|
| 169 |
+
The evaluation metrics include Accuracy (which measures how often the model successfully retrieves at least one correct answer) and F1 score under weighted-average setting (which considers the alignment of all predicted choices). Tables 5 and 6 summarize our experiment results. Each setting introduces an increased level of difficulty in abstract reasoning. In the first task of Specific QA, the models can draw the answers directly from the raw context. Meanwhile, Specific $\mathrm{QA + }$ tasks obscure away the linguistic cues, which the models are known to heavily exploit for prediction. Finally, Abstract $\mathrm{QA + }$ is the most challenging, where the models are expected to concretize the abstract events before deriving the answers.
|
| 170 |
+
|
| 171 |
+
The findings reveal that the inclusion of causal graphs significantly enhances the performance of LLMs across all experimental settings. Except Llama2 whose performance is consistently poor, the performance of all models degrade on Abstract QA+ tasks, which indicates their struggle in reasoning over abstract causality. However, while we use ACCESS to provide the LLMs with the causal relations only between event abstractions, large improvements have been observed. Therefore, we hypothesize that the model may possess a subtle capacity to reason abstractly that needs proper activating. Compared to GPT-4o-mini, the Llama family are more prone to temporal and lexical biases, resulting in low F1 scores due to a higher number of negative selections. Concretely, the models select on average 1.9-2.9 more answers than the actual ones across the QA tasks. With the additional causal information, the ratios are reduced to $1.8 - 2.6$ . Bi-level COT unfortunately yields undesirable results, with a slight gain in accuracy in smaller models yet at a cost of reduced F1 scores due to increased over-prediction. This implies potential errors in some reasoning steps, but tracing and evaluating lines of reasoning in complex COT is an open challenge. Nevertheless, our experiments show that a simple zero-shot COT plus rele
|
| 172 |
+
|
| 173 |
+
vant abstract causal knowledge can greatly benefit the models. This presents a straightforward alternative strategy to enhance performance by leveraging external knowledge bases.
|
| 174 |
+
|
| 175 |
+
# 5 Conclusion
|
| 176 |
+
|
| 177 |
+
This paper introduces ACCESS, a benchmark for abstract causal event discovery and reasoning. We present a pipeline that combines automatic methods and human crowd-sourcing to extract 1,494 causal relations among 725 abstract events. We demonstrate that incorporating causal knowledge from our benchmark leads to improvements in QA reasoning tasks for LLMs. However, we also highlight challenges in automatic event abstraction identification and causal discovery, where in the latter, the popular statistical algorithms perform poorly in recovering our sub-graphs of fewer than 50 nodes. Our empirical evidence also suggests that LLMs are not ready to perform causal inference effectively due to the lack of effective acquisition of two critical sub-processes: abstract reasoning and causal discovery. This underscores the need for future research to equip the models with these essential skills for achieving true causal reasoning.
|
| 178 |
+
|
| 179 |
+
# Limitations
|
| 180 |
+
|
| 181 |
+
Our benchmark is built upon GLUCOSE (Mostafazadeh et al., 2020) whose scope is limited to everyday children's stories. Acknowledging this limitation, we propose a reproducible data construction pipeline applicable for curating diverse corpora of event causality. Since ACCESS primarily addresses commonsense knowledge in real-world events, it is susceptible to biases regarding the judgement of semantic similarity and cause-and-effect relation of events. To mitigate this issue, our first effort is at every phase, to employ automatic methods alongside with human annotation, based on a set of objective definitions and criteria about events, abstractions and event causality. In the event abstraction phase, we specifically provide the annotators with a list of common scenarios (though non-exhaustive) indicating when the semantics of two expressions are considered similar of different to reduce potential biases. Regarding the subjectivity in human causal judgment, while we focus on non-contextual causal commonsense knowledge, we leverage contextual signals in the original corpus whenever necessary to objectively guide the annotators' decisions
|
| 182 |
+
|
| 183 |
+
on the causal relations. Due to the resource constraints, our causal graph is sparse and limited in size, which however still presents a challenge for statistical structure learning as well as LLMs on causal discovery tasks. One critical drawback in the experiment with statistical methods lies in the representation power of the co-occurrence matrix, which underscores the need for further research on representation learning of abstractions in language domain. As above, future works could also explore other resources to enlarge our causal graph and expand the coverage of real-world data. Such a causal graph could further be leveraged for causal inference according the engine described by Pearl (Pearl, 2009), which seeks to answer causal queries across the three rungs of the Ladder of Causation i.e., associational (Rung 1), interventional (Rung 2), and counterfactual (Rung 3).
|
| 184 |
+
|
| 185 |
+
# Ethics Statement
|
| 186 |
+
|
| 187 |
+
To address potential misuse and uphold fairness and inclusivity, we discuss several ethical considerations for ACCESS. Firstly, it is crucial to clarify that the resources provided in this work are solely intended for research purposes. The narrative scenarios within ACCESS should not be utilized for insults, slander, or any other malicious purposes. Users are expected to adhere to the highest ethical standards, ensuring responsible and transparent use in line with ethical research practices. The creators of the dataset hold no responsibility for misuse or misinterpretation, and all necessary measures have been taken to respect privacy and ensure informed consent during the data collection process. Secondly, it is imperative to acknowledge the mental well-being of annotators during the data annotation process. Prior to data collection, this study underwent a thorough review and approval process by an internal review board. We require each annotator to take a break every two hours or whenever they feel uncomfortable.
|
| 188 |
+
|
| 189 |
+
# Acknowledgment
|
| 190 |
+
|
| 191 |
+
This material is based on research sponsored by DARPA under agreement number HR001122C0029. The U.S. Government is authorized to reproduce and distribute reprints for Governmental purposes notwithstanding any copyright notation thereon. This work is also partially supported by the DARPA Assured Neuro Symbolic Learning and Reasoning (ANSR) program under
|
| 192 |
+
|
| 193 |
+
award number FA8750-23-2-1016.
|
| 194 |
+
|
| 195 |
+
# References
|
| 196 |
+
|
| 197 |
+
Mihael Ankerst, Markus M Breunig, Hans-Peter Kriegel, and Jörg Sander. 1999. Optics: Ordering points to identify the clustering structure. ACM Sigmod record, 28(2):49-60.
|
| 198 |
+
Nikhil Bansal, Avrim Blum, and Shuchi Chawla. 2004. Correlation clustering. Machine learning, 56:89-113.
|
| 199 |
+
Helen Beebee, Christopher Hitchcock, Peter Charles Menzies, and Peter Menzies. 2009. The Oxford handbook of causation. Oxford Handbooks.
|
| 200 |
+
Kevin Bello, Bryon Aragam, and Pradeep Ravikumar. 2022. Dagma: Learning dags via m-matrices and a log-determinant acyclicity characterization. Advances in Neural Information Processing Systems, 35:8226-8239.
|
| 201 |
+
Vincent D Blondel, Jean-Loup Guillaume, Renaud Lambiotte, and Etienne Lefebvre. 2008. Fast unfolding of communities in large networks. Journal of statistical mechanics: theory and experiment, 2008(10):P10008.
|
| 202 |
+
Paul Van den Broek, Elizabeth Puzgles Lorch, and Richard Thurlow. 1996. Children's and adults' memory for television stories: The role of causal factors, story-grammar categories, and hierarchical level. *Child development*, 67(6):3010–3028.
|
| 203 |
+
Tommaso Caselli and Piek Vossen. 2017. The event storyline: A new benchmark for causal and temporal relation extraction. In Proceedings of the Events and Stories in the News Workshop, pages 77-86.
|
| 204 |
+
Moses Charikar, Venkatesan Guruswami, and Anthony Wirth. 2005. Clustering with qualitative information. Journal of Computer and System Sciences, 71(3):360-383.
|
| 205 |
+
Fei Cheng and Yusuke Miyao. 2017. Classifying temporal relations by bidirectional LSTM over dependency paths. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 1-6.
|
| 206 |
+
David Maxwell Chickering. 2002. Optimal structure identification with greedy search. Journal of machine learning research, 3(Nov):507-554.
|
| 207 |
+
Agata Cybulska and Piek Vossen. 2014. Guidelines for ecb+ annotation of events and their coreference.
|
| 208 |
+
Dhairya Dalal, Paul Buitelaar, and Mihael Arcan. 2023. Calm-bench: A multi-task benchmark for evaluating causality-aware language models. In *Findings of the Association for Computational Linguistics: EACL* 2023, pages 296-311.
|
| 209 |
+
|
| 210 |
+
Jesse Dunietz, Lori Levin, and Jaime G Carbonell. 2015. Annotating causal language using corpus lexicography of constructions. In Proceedings of The 9th Linguistic Annotation Workshop, pages 188-196.
|
| 211 |
+
Jesse Dunietz, Lori Levin, and Jaime G Carbonell. 2017. The because corpus 2.0: Annotating causality and overlapping relations. In Proceedings of the 11th Linguistic Annotation Workshop, pages 95-104.
|
| 212 |
+
Charles R Fletcher and Charles P Bloom. 1988. Causal reasoning in the comprehension of simple narrative texts. Journal of Memory and language, 27(3):235-244.
|
| 213 |
+
Takuro Fukunaga. 2019. Lp-based pivoting algorithm for higher-order correlation clustering. Journal of Combinatorial Optimization, 37:1312-1326.
|
| 214 |
+
Jinglong Gao, Xiao Ding, Bing Qin, and Ting Liu. 2023. Is chatgpt a good causal reasoner? a comprehensive evaluation. arXiv preprint arXiv:2305.07375.
|
| 215 |
+
Lei Gao, Prafulla Kumar Choubey, and Ruihong Huang. 2019. Modeling document-level causal structures for event causal relation identification. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers).
|
| 216 |
+
Roxana Girju. 2003. Automatic detection of causal relations for question answering. In Proceedings of the ACL 2003 workshop on Multilingual summarization and question answering, pages 76-83.
|
| 217 |
+
Arthur C Graesser, Brent Olde, and Bianca Klettke. 2003. 10 how does the mind construct and represent stories? Narrative impact: Social and cognitive foundations, page 121.
|
| 218 |
+
Thomas L Griffiths. 2017. Formalizing prior knowledge in causal induction. The oxford handbook of causal reasoning, pages 115-126.
|
| 219 |
+
Thomas R Gruber. 1993. A translation approach to portable ontology specifications. Knowledge acquisition, 5(2):199-220.
|
| 220 |
+
Chikara Hashimoto, Kentaro Torisawa, Julien Kloetzer, Motoki Sano, István Varga, Jong-Hoon Oh, and Yutaka Kidawara. 2014. Toward future scenario generation: Extracting event causality exploiting semantic relation, context, and association features. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 987-997.
|
| 221 |
+
Oktie Hassanzadeh, Parul Awasthy, Ken Barker, Onkar Bhardwaj, Debarun Bhattacharjya, Mark Feblowitz, Lee Martie, Jian Ni, Kavitha Srinivas, and Lucy Yip. 2022. Knowledge-based news event analysis and forecasting toolkit. In *IJCAI*, pages 5904-5907.
|
| 222 |
+
|
| 223 |
+
Mutian He, Tianqing Fang, Weiqi Wang, and Yangqiu Song. 2022. Acquiring and modelling abstract commonsense knowledge via conceptualization. arXiv preprint arXiv:2206.01532.
|
| 224 |
+
Stefan Heindorf, Yan Scholten, Henning Wachsmuth, Axel-Cyrille Ngonga Ngomo, and Martin Potthast. 2020. Causenet: Towards a causality graph extracted from the web. In CIKM. ACM.
|
| 225 |
+
Jena D Hwang, Chandra Bhagavatula, Ronan Le Bras, Jeff Da, Keisuke Sakaguchi, Antoine Bosselut, and Yejin Choi. 2021. (comet-) atomic 2020: On symbolic and neural commonsense knowledge graphs. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 6384-6392.
|
| 226 |
+
Alon Jacovi, Avi Caciularu, Omer Goldman, and Yoav Goldberg. 2023. Stop uploading test data in plain text: Practical strategies for mitigating data contamination by evaluation benchmarks. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 5075-5084.
|
| 227 |
+
Zhijing Jin, Jiarui Liu, Zhiheng Lyu, Spencer Poff, Mrinmaya Sachan, Rada Mihalcea, Mona Diab, and Bernhard Scholkopf. 2023. Can large language models infer causation from correlation? arXiv preprint arXiv:2306.05836.
|
| 228 |
+
Philip Nicholas Johnson-Laird and Sangeet Khemlani. 2017. Mental models and causation. Oxford handbook of causal reasoning, pages 1-42.
|
| 229 |
+
Emre Kiciman, Robert Ness, Amit Sharma, and Chenhao Tan. 2023. Causal reasoning and large language models: Opening a new frontier for causality. arXiv preprint arXiv:2305.00050.
|
| 230 |
+
Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. 2022. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35:22199-22213.
|
| 231 |
+
David Lewis. 2013. Counterfactuals. John Wiley & Sons.
|
| 232 |
+
Jiaxuan Li, Lang Yu, and Allyson Ettinger. 2022. Counterfactual reasoning: Do language models need world knowledge for causal inference? In NeurIPS 2022 Workshop on Neuro Causal and Symbolic AI (nCSI).
|
| 233 |
+
Steve Fonin Mbouadeu, Martin Lorenzo, Ken Barker, and Oktie Hassanzadeh. 2023. An evaluation framework for mapping news headlines to event classes in a knowledge graph. In Proceedings of the 6th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text, pages 44-52, Varna, Bulgaria. INCOMA Ltd., Shoumen, Bulgaria.
|
| 234 |
+
Paramita Mirza, Rachele Sprugnoli, Sara Tonelli, and Manuela Speranza. 2014. Annotating causality in the tempeval-3 corpus. In Proceedings of the EACL 2014 Workshop on Computational Approaches to Causality in Language (CAtoCL), pages 10-19.
|
| 235 |
+
|
| 236 |
+
Paramita Mirza and Sara Tonelli. 2014. An analysis of causality between events and its relation to temporal information. In Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pages 2097-2106, Dublin, Ireland. Dublin City University and Association for Computational Linguistics.
|
| 237 |
+
Paramita Mirza and Sara Tonelli. 2016. Catena: Causal and temporal relation extraction from natural language texts. In The 26th international conference on computational linguistics, pages 64-75. ACL.
|
| 238 |
+
Nasrin Mostafazadeh, Aditya Kalyanpur, Lori Moon, David Buchanan, Lauren Berkowitz, Or Biran, and Jennifer Chu-Carroll. 2020. GLUCOSE: GeneraLized and COntextualized story explanations. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 4569-4586, Online. Association for Computational Linguistics.
|
| 239 |
+
Gregory Murphy. 2004. The big book of concepts. MIT press.
|
| 240 |
+
Ignavier Ng, AmirEmad Ghassami, and Kun Zhang. 2020. On the role of sparsity and dag constraints for learning linear dags. Advances in Neural Information Processing Systems, 33:17943-17954.
|
| 241 |
+
Ignavier Ng, Shengyu Zhu, Zhitang Chen, and Zhuangyan Fang. 2019. A graph autoencoder approach to causal structure learning. arXiv preprint arXiv:1911.07420.
|
| 242 |
+
Animesh Nighojkar and John Licato. 2021. Improving paraphrase detection with the adversarial paraphrasing task. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 7106-7116, Online. Association for Computational Linguistics.
|
| 243 |
+
Jong-Hoon Oh, Kentaro Torisawa, Chikara Hashimoto, Motoki Sano, Stijn De Saeger, and Kiyonori Ohtake. 2013. Why-question answering using intra-and intersentential causal relations. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1733-1743.
|
| 244 |
+
Judea Pearl. 2009. Causality. Cambridge university press.
|
| 245 |
+
Judea Pearl and Dana Mackenzie. 2018. The book of why: the new science of cause and effect. Basic books.
|
| 246 |
+
James Pustejovsky, Patrick Hanks, Roser Sauri, Andrew See, Robert Gaizauskas, Andrea Setzer, Dragomir Radev, Beth Sundheim, David Day, Lisa Ferro, et al. 2003. The timebank corpus. In Corpus linguistics, volume 2003, page 40. Lancaster, UK.
|
| 247 |
+
|
| 248 |
+
James Pustejovsky, Jessica Littman, Roser Saurí, and Marc Verhagen. 2006. Timebank 1.2 documentation. Event London, no. April, pages 6-11.
|
| 249 |
+
Nils Reimers and Iryna Gurevych. 2020. Making monolingual sentence embeddings multilingual using knowledge distillation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics.
|
| 250 |
+
Mehwish Riaz and Roxana Girju. 2014. Recognizing causality in verb-noun pairs via noun and verb semantics. In Proceedings of the EACL 2014 Workshop on Computational Approaches to Causality in Language (CAtoCL), pages 48-57.
|
| 251 |
+
Melissa Roemmele, Cosmin Adrian Bejan, and Andrew S Gordon. 2011. Choice of plausible alternatives: An evaluation of commonsense causal reasoning. In 2011 AAAI Spring Symposium Series.
|
| 252 |
+
Angelika Romanou, Syrielle Montariol, Debjit Paul, Leo Laugier, Karl Aberer, and Antoine Bosselut. 2023. Crab: Assessing the strength of causal relationships between real-world events. arXiv preprint arXiv:2311.04284.
|
| 253 |
+
Peter J Rousseeuw. 1987. Silhouettes: a graphical aid to the interpretation and validation of cluster analysis. Journal of computational and applied mathematics, 20:53-65.
|
| 254 |
+
Maarten Sap, Ronan Le Bras, Emily Allaway, Chandra Bhagavatula, Nicholas Lourie, Hannah Rashkin, Brendan Roof, Noah A Smith, and Yejin Choi. 2019. Atomic: An atlas of machine commonsense for if-then reasoning. In Proceedings of the AAAI conference on artificial intelligence, volume 33, pages 3027-3035.
|
| 255 |
+
Shirong Shen, Heng Zhou, Tongtong Wu, and Guilin Qi. 2022. Event causality identification via derivative prompt joint learning. In Proceedings of the 29th International Conference on Computational Linguistics, pages 2288-2299, Gyeongju, Republic of Korea. International Committee on Computational Linguistics.
|
| 256 |
+
Peter Spirtes and Clark Glymour. 1991. An algorithm for fast recovery of sparse causal graphs. Social science computer review, 9(1):62-72.
|
| 257 |
+
Peter Spirtes, Clark N Glymour, and Richard Scheines. 2000. Causation, prediction, and search. MIT press.
|
| 258 |
+
Douglas Steinley. 2004. Properties of the hubertarable adjusted rand index. Psychological methods, 9(3):386.
|
| 259 |
+
Yidan Sun, Qin Chao, and Boyang Li. 2023. Event causality is key to computational story understanding. arXiv preprint arXiv:2311.09648.
|
| 260 |
+
|
| 261 |
+
Katharine Tillman, Nestor Tulagan, Jessica Sullivan, S Denison, M Mack, Y Xu, and BC Armstrong. 2020. Children's spontaneous inferences about time and causality in narrative. In CogSci.
|
| 262 |
+
Vincent A Traag, Ludo Waltman, and Nees Jan Van Eck. 2019. From louvain to leiden: guaranteeing well-connected communities. Scientific reports, 9(1):5233.
|
| 263 |
+
Nguyen Xuan Vinh, Julien Epps, and James Bailey. 2009. Information theoretic measures for clusterings comparison: is a correction for chance necessary? In Proceedings of the 26th annual international conference on machine learning, pages 1073-1080.
|
| 264 |
+
Matthew J Vowels, Necati Cihan Camgoz, and Richard Bowden. 2022. D'ya like dags? a survey on structure learning and causal discovery. ACM Computing Surveys, 55(4):1-36.
|
| 265 |
+
Michael Waldmann. 2017. The Oxford handbook of causal reasoning. Oxford University Press.
|
| 266 |
+
Xiaoqiang Wang, Yali Du, Shengyu Zhu, Liangjun Ke, Zhitang Chen, Jianye Hao, and Jun Wang. 2021. Ordering-based causal discovery with reinforcement learning. arXiv preprint arXiv:2105.06631.
|
| 267 |
+
Xiaozhi Wang, Yulin Chen, Ning Ding, Hao Peng, Zimu Wang, Yankai Lin, Xu Han, Lei Hou, Juanzi Li, Zhiyuan Liu, Peng Li, and Jie Zhou. 2022. MAVEN-ERE: A unified large-scale dataset for event coreference, temporal, causal, and subevent relation extraction. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 926–941, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.
|
| 268 |
+
Phillip Wolff and Grace Song. 2003. Models of causation and the semantics of causal verbs. Cognitive psychology, 47(3):276-332.
|
| 269 |
+
Jie Yang, Soyeon Caren Han, and Josiah Poon. 2022. A survey on extraction of causal relations from natural language text. Knowledge and Information Systems, 64(5):1161-1186.
|
| 270 |
+
Yue Yu, Jie Chen, Tian Gao, and Mo Yu. 2019. Dag-gnn: Dag structure learning with graph neural networks. In International Conference on Machine Learning, pages 7154-7163. PMLR.
|
| 271 |
+
Matej Zečević, Moritz Willig, Devendra Singh Dhami, and Kristian Kersting. 2023. Causal parrots: Large language models may talk causality but are not causal. arXiv preprint arXiv:2308.13067.
|
| 272 |
+
Keli Zhang, Shengyu Zhu, Marcus Kalander, Ignavier Ng, Junjian Ye, Zhitang Chen, and Lujia Pan. 2021. gcastle: A python toolbox for causal discovery.
|
| 273 |
+
Xun Zheng, Bryon Aragam, Pradeep K Ravikumar, and Eric P Xing. 2018. Dags with no tears: Continuous optimization for structure learning. Advances in neural information processing systems, 31.
|
| 274 |
+
|
| 275 |
+
Xun Zheng, Chen Dan, Bryon Aragam, Pradeep Ravikumar, and Eric Xing. 2020. Learning sparse nonparametric dags. In International Conference on Artificial Intelligence and Statistics, pages 3414-3425. PMLR.
|
| 276 |
+
Xinyu Zuo, Pengfei Cao, Yubo Chen, Kang Liu, Jun Zhao, Weihua Peng, and Yuguang Chen. 2021. Improving event causality identification via self-supervised representation learning on external causal statement. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pages 2162-2172, Online. Association for Computational Linguistics.
|
| 277 |
+
Rolf A Zwaan, Mark C Langston, and Arthur C Graesser. 1995. The construction of situation models in narrative comprehension: An event-indexing model. *Psychological science*, 6(5):292-297.
|
| 278 |
+
|
| 279 |
+
# A Related Work
|
| 280 |
+
|
| 281 |
+
Theory of causation. Extensive research into theories of causation spans various disciplines (Dalal et al., 2023) such as philosophy (Beebee et al., 2009), cognitive science (Waldmann, 2017), and probability and statistics (Pearl, 2009). In this paper, we follow the counterfactual theory of causation (Lewis, 2013), which entails three aspects: a relational aspect (involving a cause and an effect components), a temporal aspect (the cause precedes the effect), and a counterfactual aspect (if the causing event had not taken place, the effect would not have occurred).
|
| 282 |
+
|
| 283 |
+
Causal discovery in Statistics. The task of causal discovery or structure learning is to recover the causal DAG using available observational or experimental data. It remains a challenging problem in statistics since the search space is superexponential in the number of variables and the identifiability of the true DAG does not always exist. Causal discovery methods primarily fall into two categories: constraint-based and score-based approaches. Constraint-based methods such as PC (Spirtes and Glymour, 1991) and FCI (Spirtes et al., 2000) extract conditional independencies from the data distribution to detect edge existence and direction. Meanwhile, score-based methods search for model parameters in the DAG space by optimizing a scoring function (Chickering, 2002; Zheng et al., 2018; Yu et al., 2019; Bello et al., 2022).
|
| 284 |
+
|
| 285 |
+
Causal discovery in NLP. Ample of work in NLP focuses on event causality identification (ECI), which identifies cause/effect spans from textual descriptions. ECI is commonly treated as a classification task that relies heavily on annotated data for supervised learning (Oh et al., 2013; Hashimoto et al., 2014; Riaz and Girju, 2014; Cheng and Miyao, 2017; Gao et al., 2019), or at least partially for semi-supervised training (Zuo et al., 2021; Shen et al., 2022). Machine learning models trained on mention-level annotations exploit event temporal links (Pustejovsky et al., 2003, 2006) and/or lexical cues or semantics that signal causal information, including, but not limited to, prepositions e.g. because of, by, due to, conjunctions/conjunctive adverbs e.g. because, since, therefore, as a result or verb semantics (Wolff and Song, 2003; Mirza and Tonelli, 2014) such as causation e.g. cause, force. However, as these annotated benchmarks are relatively limited in scale, ECI models are prone to
|
| 286 |
+
|
| 287 |
+
overfitting and tend to mishandle new and unseen cases (Zuo et al., 2021; Sun et al., 2023).
|
| 288 |
+
|
| 289 |
+
Causal discovery with LLMs. Despite impressive language skills and breakthroughs in AI capabilities, large language models (LLMs) are reported to exhibit the same difficulty where they fail to perform causal inference in out-of-distribution settings when variable names and textual expressions used in the queries are different to those in the training set (Jin et al., 2023; Zečević et al., 2023). On the other hand, whether LLMs can perform causal discovery is a controversial topic. Kuci-Man et al. (2023) show that in medical and climate domains, LLMs can achieve competitive performance in determining pairwise causal relationships with accuracies up to $97\%$ , yet heavily relying on prompt engineering with occasional inconsistencies. Meanwhile, full graph discovery in LLMs remains excessively challenging, though proper prompting could yield some potential. However, when evaluated on datasets of real-world events, GPT-3 and GPT-4 are consistently surpassed by small fine-tuned small pre-trained language models on ECI tasks (Gao et al., 2019) while underperforming greatly on binary pairwise causality inference (Romanou et al., 2023). Since LLMs are trained on massive volume of natural language texts, they excel in identifying causal event pairs but not non-causal ones, raises concerns regarding the memorization of event knowledge rather than generalization (Jacovi et al., 2023; Gao et al., 2023; Romanou et al., 2023).
|
| 290 |
+
|
| 291 |
+
Abstract reasoning. The human brain is equipped with a remarkable capability of abstract reasoning: thinking of concepts and generalizations of concrete entities that exist in infinity. Conceptualization glues separate pieces of experiences into a mental world that forms commonsense knowledge and allows us to function in the complex reality (Murphy, 2004). By the same logic, events sharing a physical mechanisms should exhibit the similar causal dynamics. For examples, broken window and shattered glass both refers to an effect resulting from a hard physical object hitting against a glassy surface. GLUCOSE (Mostafazadeh et al., 2020) is one large-scale annotated corpus that explicitly facilitates causal commonsense knowledge. The dataset captures 10 dimensions of causal explanations in story events covering a wider range of entities and contexts. GLUCOSE provides rich translations of specific expressions into generic inferential
|
| 292 |
+
|
| 293 |
+
rules dependent on story contexts. GLUCOSE is thus a rich resource for exploiting abstract causal knowledge, which remains a promising yet unexplored avenue. In the common pursuit of abstract knowledge, He et al. (2022) build an event conceptualization pipeline on top of ATOMIC (Sap et al., 2019), a large-scale annotated commonsense knowledge graph, wherein the mentioned textual entities are replaced with the corresponding abstract concepts.
|
| 294 |
+
|
| 295 |
+
# B Data Annotation Pipeline
|
| 296 |
+
|
| 297 |
+
We recruit in total 13 university students in Malaysia aged $20 - 30$ . The total hours are 329.7, where the hourly rate is RM20 (Malaysian ringgit), which is higher than the minimum wage of RM7.1.
|
| 298 |
+
|
| 299 |
+
As for the annotation guidelines, we translate the technical terminologies in Section 2 into layman language comprehensible to human annotators.
|
| 300 |
+
|
| 301 |
+
# B.1 Abstract Event Extraction
|
| 302 |
+
|
| 303 |
+
There are five steps in this annotation phase. Steps 1 and 2 are key to extracting abstract events, whereas Steps 3 - 5 serve as post-processing to strengthen consistency among human annotators.
|
| 304 |
+
|
| 305 |
+
Step 1: Sub-clustering. Each annotator is presented with a set of clusters generated from an automatic clustering algorithm. Each cluster contains multiple English sentences that describe events in daily life. Each word in every sentence is lemmatized to its base form so that the tense of the sentence does not influence the judgment of meaning. For every cluster, they are required to sub-group event sentences that are semantically similar or related together. There can exist clusters in which all sentences are related to one another; in this case no sub-clustering is needed. There can also be outlier events i.e., sentences that do not belong to any sub-clusters. For a sub-cluster to exist, it must contain at least two events. If an event cannot be sub-clustered, the annotator is to classify it as an outlier. If a sentence is lexically or grammatically erroneous that makes it unjustifiable, the annotator is also asked to highlight and correct it whenever appropriate before clustering.
|
| 306 |
+
|
| 307 |
+
Two event sentences are considered semantically related or similar if they describe the same event, and the decision must not be affected by the information about location and time. We note there is
|
| 308 |
+
|
| 309 |
+
a difference between a state/action actually taking place with the prospect of the state/action taking place. In particular, we outline 11 scenarios where word uses convey differences in meaning.
|
| 310 |
+
|
| 311 |
+
1. single participant vs. group of participants e.g., a person be playing in the park $\neq$ a person and another person be playing in the park.
|
| 312 |
+
2. affirmation vs. negation e.g., a person be asleep $\neq$ a person do not sleep.
|
| 313 |
+
3. present vs. future tense e.g., a person go to sleep $\neq$ a person will go to sleep.
|
| 314 |
+
4. ability e.g., a person do not eat $\neq$ a person cannot eat.
|
| 315 |
+
5. intention or desire e.g., a person do not eat $\neq$ a person do not want to eat.
|
| 316 |
+
6. deduction or possibility e.g., it rain $\neq$ it may rain..
|
| 317 |
+
7. obligation, advice or prohibition e.g., a person do not eat $\neq$ a person should not eat.
|
| 318 |
+
8. offers, effort or decision e.g., a person help another person $\neq$ a person offer to assist another person; a person go to the gym $\neq$ a person decide to go to the gym.
|
| 319 |
+
9. location as object. In some cases, the object receives an action from the verb refers to a place or location e.g., a person clean a place. Here room is considered an (spatial) item being taken action on and similar to any other items such as cup or a table $\rightarrow$ a person clean a place $=$ a person clean something.
|
| 320 |
+
10. multiple actions. Some sentences describe two actions happening at the same time e.g., a person take something and leave. In order to evaluate its meaning, one must select one of them to the key action. The key action is the action that is described by most of other events in the same cluster. This means that if most of the other events are about someone leaving somewhere, the leave action should be focused instead of take action.
|
| 321 |
+
11. continuous vs. simple tense. Some sentences describe actions in the continuous state e.g., a person be go home. We ignore the continuous state of the action and consider them equivalent to the action described simple tense $\rightarrow$ a person be go home $=$ a person go home.
|
| 322 |
+
|
| 323 |
+
Step 2: Topic identification. In this step, the annotator asked to identify the topic for every cluster or sub-cluster formed. The topic must first be an event, therefore it must contain at least two components: participant(s) and action. The topic must be specific about the state or action that takes place. At the same time, the topic must be written in a way that makes it general or abstract enough to include all event sentences. Whenever possible, it is acceptable to use the most representative event sentence in a cluster as the topic.
|
| 324 |
+
|
| 325 |
+
Intermediate processing. In Steps 1 and 2, we divide the collection of clusters into 7 batches. Each of the batch contains 60 clusters and two workers are asked to annotate one same batch of clusters. This results in one cluster having two annotation results. Subsequently, an algorithm is run to automatically unify the results from two annotators. For every cluster in the original data, the algorithm starts by randomly selecting an event as a centroid. It then forms a sub-cluster around the centroid that contains all other events that are considered by both annotators to be semantically related to the centroid. The topic assigned to that sub-cluster is presented in the format TOPIC : [Text 1] / [Text 2] where [Text 1] is the topic assigned to events in this sub-cluster by the first annotator and [Text 2] is the topic assigned to them by the second annotator. Repeat the process with the other events until all instances are processed. Thereafter, any event that is not assigned to any cluster will exist as a stand-alone instance and temporarily be considered an outlier.
|
| 326 |
+
|
| 327 |
+
The next steps focus on resolving the disagreements from two annotation results, which includes Topic alignment and Outliers processing. We assume that a sub-cluster is properly annotated if it (1) contain at least 2 instances and (2) no annotators consider that sub-cluster to be an outlier.
|
| 328 |
+
|
| 329 |
+
Step 3: Topic alignment. Every cluster is now annotated with two topics. If both topics describe the same event, the annotator is asked to choose either or the one more representative. Otherwise, choose the one that fits most of the sentences in the cluster. If the chosen topic is already assigned to some previous cluster, merge the current cluster into that cluster. If at least one of the topics is Outliers (i.e., at least one annotator considers the sub-cluster as Outliers), temporarily view them as Outliers.
|
| 330 |
+
|
| 331 |
+
Step 4: Outliers processing. The annotator moves on to process the outliers. For any event that is assigned by only one of the previous annotators to be outliers while assigned by the other to be associated some existing sub-cluster, the annotator is asked to merge it into the assigned sub-cluster if the event can be represented by the topic of that sub-cluster; otherwise, keep it as an outlier. For any event that is agreed by both annotators to be an outlier, the current annotator is asked to re-examine it for possible assignment to any existing sub-cluster. The merging decision must be again based on the conditions described in Step 1. Any remaining stand-alone instances are discarded.
|
| 332 |
+
|
| 333 |
+
Step 5: Topic matching. This step aims to correct for potential mis-clustering from the automatic procedure. We obtain the outlier events and attempt re-categorize them into the post-annotated clustering results from all above steps. For each outlier, we present the annotators with a set of candidate clusters to which adding the outlier would not violate causal consistency. We ask them to select one cluster with whose topic the outlier is most semantically similar. The rules to determine semantic similarity of a sentence pair follows from Step 1. It is possible that there is no topic that matches the outlier. If there is any topic that is a word-by-word exact match, that topic must be selected. We also add another rule that requires the annotators to select the topic with the same level of abstraction (generality) or concreteness (specificity) as the outlier event, since there are some topics that are abstract or concrete versions of other topics. More specifically, if the outlier is concrete but the concrete topic is not presented for selection, select the abstract topic. If the outlier is abstract but the abstract topic is not presented for selection, the concrete topic must not be selected.
|
| 334 |
+
|
| 335 |
+
# B.2 Causal Relations Discovery
|
| 336 |
+
|
| 337 |
+
The annotator is tasked with evaluating candidate pairs of clusters to determine whether a cause-and-effect relationship exists between them, based on their respective topics. Since each cluster's topic represents an event abstraction, and in essence, an event itself, the decision on causal relation hinges on whether the two topics describe causally linked events. Based on the cause-effect definition in Section 2, we provide them with the following criteria to guide their decision about whether an event $A$ causes another event $B$ :
|
| 338 |
+
|
| 339 |
+
1. a causal relation must be temporal, but a temporal relation is not always causal;
|
| 340 |
+
2. the action/state of $A$ directly leads to the action/state of $B$ i.e., there must be no intermediate events or if there is one, it should be extremely rare in real-world scenarios;
|
| 341 |
+
3. an event $B$ would not occur if $A$ did not occur.
|
| 342 |
+
|
| 343 |
+
Initially, the workers provide non-contextual annotations based solely on their commonsense understanding of the abstractions. A relation is deemed valid if the annotator can envision a plausible scenario in daily life where the situation occurs frequently, commonly, and is highly likely. If no such scenario comes to mind, the clusters are considered unrelated. In the subsequent step, we identify the highly disagreed pairs, where the three annotators each make distinct decisions regarding causality i.e., $A$ causes $B$ , $B$ causes $A$ , $A$ and $B$ are unrelated. For these pairs, workers are presented with contextual information from stories in GLUCOSE and asked to reconsider their decisions. The final determination of the relationship is made through majority voting.
|
| 344 |
+
|
| 345 |
+
# C Clustering Algorithm
|
| 346 |
+
|
| 347 |
+
Our clustering algorithm, named PIVOT, is inspired by the pivoting algorithm proposed in Fukunaga (2019). The PIVOT algorithm first randomly selects a pair of cause-effect events and then, for each of them, find its most similar neighbors against a threshold of $70\%$ . We repeat the process for the remaining event mentions, while excluding the previously assigned events. The initial results are passed to the following process to remove self-loop and bi-directions. We remove clusters with fewer than 10 samples and maximum pairwise similarity is less than $50\%$ . Each cluster can now be considered a node in a graph and we use GLUCOSE to recover the causal relations among them to construct a temporary causal graph.
|
| 348 |
+
|
| 349 |
+
Ablation study. The main motivation behind PIVOT algorithm is to ensure the initial graph is mostly acyclic while avoiding any sub-optimality produced from post-processing. To validate whether PIVOT is most effective in ensuring causal consistency, we conduct an ablation study against popular clustering algorithms, including OPTICS (Ankerst et al., 1999), LOUVAIN (Blondel et al., 2008) and LEIDEN (Traag et al., 2019) algorithms,
|
| 350 |
+
|
| 351 |
+
where LOUVAIN and LEIDEN were proposed for community detection problems. The criteria for selecting these clustering algorithms include: (1) scalability to medium-to-large-sized data, (2) ability to accommodate custom affinity matrix and (3) high cluster homogeneity score. Table 7 further reports the quality of the algorithms under analysis, which shows that our PIVOT algorithm yields the most desirable performance.
|
| 352 |
+
|
| 353 |
+
Notations. We use lower case letters (i.e., $v$ ) to denote single event, capital letters (i.e., $V$ ) for cluster of events, and blackboard bold letter (i.e., $\mathbb{V}$ ) for set of clusters. We let $\mathcal{D}$ denote the dataset of causal event mentions; $x \to y$ indicates event $x$ is a cause of event $y$ ; $x \gets y$ indicates event $x$ is an effect of event $y$ ; $x \leftrightarrow y$ indicates $x$ and $y$ are causally related (either cause or effect). We also define the similarity between an event $y$ and cluster $V$ as the average of similarity between $y$ and every event $x$ in $V$
|
| 354 |
+
|
| 355 |
+
$$
|
| 356 |
+
S _ {y V} = \frac {1}{| V |} \sum_ {x \in V} S _ {x y},
|
| 357 |
+
$$
|
| 358 |
+
|
| 359 |
+
where $S_{xy}$ is the similarity score between two events according to Eq. (1).
|
| 360 |
+
|
| 361 |
+
Performance metrics. In the following, we describe the unsupervised performance metrics to assess clustering algorithms in Table 7. Given a set of clusters $\mathbb{C}$ , let $A$ be the matrix where $A_{ij}$ is the number of events in cluster $C_i \in \mathbb{C}$ is the cause of any event in the cluster $C_j \in \mathbb{C}$ . Recall that in this stage the causal relations between events are extracted from GLUCOSE dataset. A cluster $A$ is said to cause another cluster $B$ if at least one event mentions in cluster $A$ causes any other event mentions in cluster $B$ , according to the cause-effect definition in Section 2.
|
| 362 |
+
|
| 363 |
+
1. Self-loop ratio: Proportion of clusters in which the events are either cause or effect of each other.
|
| 364 |
+
|
| 365 |
+
$$
|
| 366 |
+
\frac {1}{| \mathbb {C} |} \sum_ {i = 1} ^ {| \mathbb {C} |} \frac {\boldsymbol {A} _ {i i}}{2 | C _ {i} |}.
|
| 367 |
+
$$
|
| 368 |
+
|
| 369 |
+
2. Bi-directional ratio: Proportion of cluster pairs that are both cause and effect of one another.
|
| 370 |
+
|
| 371 |
+
$$
|
| 372 |
+
\frac {2}{| \mathbb {C} | ^ {2} - | \mathbb {C} |} \sum_ {i = 1} ^ {| \mathbb {C} | - 1} \sum_ {j = i + 1} ^ {| \mathbb {C} |} \frac {\min (\boldsymbol {A} _ {i j} , \boldsymbol {A} _ {j i})}{\max (\boldsymbol {A} _ {i j} , \boldsymbol {A} _ {j i})}.
|
| 373 |
+
$$
|
| 374 |
+
|
| 375 |
+
3. Silhouette coefficient (Rousseeuw, 1987): Measure of how similar an instance is to its own cluster (cohesion) compared to other clusters (separation). A high value indicates that the object is well matched to its own cluster and poorly matched to neighboring clusters.
|
| 376 |
+
|
| 377 |
+
$$
|
| 378 |
+
\frac {1}{| \mathcal {D} |} \sum_ {x \in \mathcal {D}} \frac {a _ {x} - b _ {x}}{1 - \min (a _ {x} , b _ {x})},
|
| 379 |
+
$$
|
| 380 |
+
|
| 381 |
+
where $a_{x}$ is the mean similarity between event $x$ and all other events in the same cluster; $b_{x}$ is the mean similarity between event $x$ and all other events in the next nearest cluster.
|
| 382 |
+
|
| 383 |
+
4. Homogeneity score: Average pairwise similarity of events in a cluster.
|
| 384 |
+
|
| 385 |
+
$$
|
| 386 |
+
\frac {1}{| \mathbb {C} |} \sum_ {i = 1} ^ {| \mathbb {C} |} \frac {2}{| C _ {i} | ^ {2} - | C _ {i} |} \sum_ {x, y \in C _ {i}, x \neq y} S _ {x y},
|
| 387 |
+
$$
|
| 388 |
+
|
| 389 |
+
where $S_{xy}$ is the similarity score between two events according to Eq. (1).
|
| 390 |
+
|
| 391 |
+
Table 8 reports the numerical results for the experiment on Abstract Event Identification in Section 4.1. For the supervised metrics, we refer readers to scikit-learn's documentation for the precise formulations and implementations of Adjusted Rand Index (Steinley, 2004) and Normalized Mutual Information (Vinh et al., 2009).
|
| 392 |
+
|
| 393 |
+
# D Statistical Causal Discovery
|
| 394 |
+
|
| 395 |
+
Background. The causal relations among $n$ variables $X = [X_{i}]_{i=1}^{n}$ is characterized via a structural causal model (SCM) (Pearl, 2009) over the tuple $\langle U, X, f \rangle$ that, in its general form, consists of a sets of assignments
|
| 396 |
+
|
| 397 |
+
$$
|
| 398 |
+
X _ {i} := f _ {i} \big (\mathrm {P A} _ {X _ {i}}, U _ {i} \big), \quad i = 1, \dots , n,
|
| 399 |
+
$$
|
| 400 |
+
|
| 401 |
+
where $U_{i}$ is an exogenous variable assumed to be mutually independent with variables $\{U_1,\dots ,U_n\} \backslash U_i$ . The functions $f = [f_{1},\dots ,f_{n}]$ define a joint distribution $P(X)$ over the endogenous variables $X$ , given a joint distribution over exogenous variables $P(U_{1},\dots ,U_{n})$ . Each SCM induces a causal graph $\mathbf{G}$ , which is often assumed to be a DAG. A directed graph $\mathbf{G} = (\mathbf{V},\mathbf{E})$ consists of a set of nodes $\mathbf{V}$ and an edge set $\mathrm{E}\subseteq \mathbf{V}^2$ of ordered pairs of nodes with $(v,v)\notin \mathbf{E}$ for any $v\in \mathbf{V}$ (one without self-loops).
|
| 402 |
+
|
| 403 |
+
For a pair of nodes $i,j$ with $(i,j)\in \mathbf{E}$ , there is an arrow pointing from $i$ to $j$ and we write $i\rightarrow j$ . Two nodes $i$ and $j$ are adjacent if either $(i,j)\in \mathbf{E}$ or $(j,i)\in \mathbf{E}$ . If there is an arrow from $i$ to $j$ then $i$ is a parent of $j$ and $j$ is a child of $i$ . Let $\mathrm{PA}_{X_i}$ denote the set of variables associated with parents of node $i$ in $\mathbf{G}$ . The graph $\mathbf{G}$ of an SCM is obtained by creating one vertex for each $X_{i}$ and drawing directed edges from each parent $X_{j}\in \mathrm{PA}_{X_{i}}$ to $X_{i}$ . We sometimes call the elements of $\mathrm{PA}_{X_i}$ the direct causes of $X_{i}$ , and we call $X_{i}$ a direct effect of each of its direct causes. Importantly, these functions are to be read as assignments rather than as mathematical equations, and they should be viewed as modelling physical mechanisms inducing or generating every $X_{i}$ from variables $\mathrm{PA}_{X_i}$ .
|
| 404 |
+
|
| 405 |
+
Experiments. We here discuss how ACCESS is used to assess to what extent the statistical structure learning methods is applicable to recover causal relations among event abstractions. As illustrated in Figure 1, after extracting abstractions, one can build representations for abstract events in the original corpus and apply structure learning on top of such data for full graph discovery. A simple representation is the co-occurrence matrix size (#stories $\times$ #abstractions) where each entry takes a binary value indicating whether an abstraction has any of its mentions appearing in a story. This means each abstraction is now considered as a Bernoulli random variable and the task of causal discovery is to recover the underlying SCM where the structural functions are commonly non-convex.
|
| 406 |
+
|
| 407 |
+
Due to the limited scalability of existing statistical algorithms, we resort to learning sub-graphs by setting thresholds to select nodes that appear frequently while ensuring that the true graph is acyclic. Specifically, our selected sub-graphs are composed of edges where both nodes are adjacent to at least one other node, and each node corresponds to an abstraction whose occurrences exceed a certain frequency threshold. In our experiment, we set thresholds for document frequency within $\{25, 30, 35, 40, 45\}$ , resulting in sub-graphs with 5, 7, 16, 19, 45 nodes. The experiments are run on 5 CPU cores.
|
| 408 |
+
|
| 409 |
+
We experiment with popular constraint-based and score-based algorithms. We select those that are scalable and capable of capturing non-linear causal relationships without relying on specific model forms such as additive noise. In this paper, we report the results for the following algorithms:
|
| 410 |
+
|
| 411 |
+
<table><tr><td>Metrics</td><td>LOUVAIN</td><td>LEIDEN</td><td>OPTICS</td><td>PIVOT</td></tr><tr><td>Bi-directional ratio ↓</td><td>0.179</td><td>0.162</td><td>0.011</td><td>0.004</td></tr><tr><td>Self-loop ratio ↓</td><td>0.252</td><td>0.361</td><td>0.007</td><td>0.001</td></tr><tr><td>Silhouette coefficient (Euclidean) ↑</td><td>-0.120</td><td>-0.137</td><td>-0.252</td><td>-0.015</td></tr><tr><td>Silhouette coefficient (Cosine) ↑</td><td>-0.234</td><td>-0.262</td><td>-0.392</td><td>-0.036</td></tr><tr><td>Homogeneity score ↑</td><td>0.506</td><td>0.577</td><td>0.810</td><td>0.907</td></tr></table>
|
| 412 |
+
|
| 413 |
+
Table 7: Evaluation of alternative clustering algorithms. Bold indicates best performance. $\uparrow$ Higher is better. $\downarrow$ Lower is better.
|
| 414 |
+
|
| 415 |
+
<table><tr><td>Metrics</td><td>LOUVAIN</td><td>LEIDEN</td><td>OPTICS</td><td>PIVOT(*)</td></tr><tr><td colspan="5">Generalizations from GPT-4o-mini</td></tr><tr><td>Adjusted rand index ↑</td><td>0.016</td><td>0.018</td><td>0.001</td><td>0.168</td></tr><tr><td>Normalized mutual information ↑</td><td>0.450</td><td>0.463</td><td>0.384</td><td>0.784</td></tr><tr><td colspan="5">Generalizations from GLUCOSE</td></tr><tr><td>Adjusted rand index ↑</td><td>0.042</td><td>0.045</td><td>0.011</td><td>0.347</td></tr><tr><td>Normalized mutual information ↑</td><td>0.635</td><td>0.639</td><td>0.699</td><td>0.869</td></tr></table>
|
| 416 |
+
|
| 417 |
+
Table 8: Experimental results of using automatic clustering for identifying abstractions using generalizations by ChatGPT and human-annotated generalizations from GLUCOSE. (*) In this experiment, we use the original implementation of the PIVOT algorithm in Fukunaga (2019). Bold indicates best performance.
|
| 418 |
+
|
| 419 |
+

|
| 420 |
+
Figure 2: SHD (left) and F1 score (right) of estimated DAGs from statistical structure learning methods. Lower SHD is better. Higher F1 is better.
|
| 421 |
+
|
| 422 |
+

|
| 423 |
+
|
| 424 |
+
- PC algorithm (Spirtes and Glymour, 1991): A classic approach based on conditional independence tests, for which we run two kinds of tests: Chi-squared and G-squared.
|
| 425 |
+
- DAG-GNN (Yu et al., 2019): DAG structure learning with graph neural networks.
|
| 426 |
+
- GAE (Ng et al., 2019): This method utilizes gradient descent and graph auto-encoders to model non-linear causal relationships.
|
| 427 |
+
CORL (Wang et al., 2021): A reinforcement
|
| 428 |
+
|
| 429 |
+
learning-based algorithm with flexible score functions with enhanced efficiency.
|
| 430 |
+
|
| 431 |
+
Besides the above methods, we have also tested NOTEARS (Zheng et al., 2020), a popular score-based algorithm and its more efficient variant GOLEM (Ng et al., 2020). However, they both unfortunately fail to recover any edges across all settings. To ensure consistency in implementation and evaluation, we utilize the standardized framework provided by gCastle (Zhang et al., 2021). As for evaluation metrics, we report the structured Ham
|
| 432 |
+
|
| 433 |
+
ming distance (SHD), which quantifies the smallest number of edge additions, deletions, and reversals required to transform the recovered DAG into the true one. Additionally, we assess classification accuracy using the F1 score. Ideally, we aim for a lower normalized Hamming distance and a higher F1 score. Figure 2 reports the SHD and F1 score of the estimated DAGs from these methods.
|
| 434 |
+
|
| 435 |
+
It is seen the methods achieve relatively low accuracy on our benchmark causal graphs, which are sparse. As the SHD scores are much higher than the graph size, these model tend to predict plenty of edges, most of which are incorrect due to the low F1 scores. Scalability remains a serious challenge to statistical structure learning. As the graph scales up to 45 nodes, their performance further deteriorates significantly, where most of them of them even fail to recover any edges. It is noteworthy that the representation power of the input data also affects the causal discovery performance. It is very likely that the co-occurrence matrix is not sufficiently expressive to capture the causal knowledge. This motivates a dedicated line of research into abstract causal representation learning.
|
| 436 |
+
|
| 437 |
+
# E GLUCOSE-QA Reasoning
|
| 438 |
+
|
| 439 |
+
We here provide the prompts for LLMs in Tables 9-12. Tables 13-17 present illustrative examples of the responses from LLMs across our QA tasks.
|
| 440 |
+
|
| 441 |
+
# PROMPT: Pairwise Causal Discovery
|
| 442 |
+
|
| 443 |
+
Given the two events:
|
| 444 |
+
event_a: <Input the first event>
|
| 445 |
+
event_b: <Input the second event>
|
| 446 |
+
Which cause-and-effect relationship is more likely between two events?
|
| 447 |
+
|
| 448 |
+
A. event_a causes event_b..
|
| 449 |
+
B. event_b causes event_a.
|
| 450 |
+
C. There are no cause-effect relation between two events.
|
| 451 |
+
|
| 452 |
+
Let's work this out in a step by step way to be sure that we have the right answer. Then provide one final answer within the tags <answer>A or B or C</answer>.
|
| 453 |
+
|
| 454 |
+
Table 9: Prompt for the pairwise causal discovery task.
|
| 455 |
+
|
| 456 |
+
# PROMPT: Multi-choice Answer Generation on Specific-QA (zero-shot COT)
|
| 457 |
+
|
| 458 |
+
Given the following story: <Input story context>. What could be the <cause/effect> of the event <Input target effect/cause event?> Choose one or more correct answers out of the following choices: <Input answer choices>. (*) This information can help answer the question: A possible <cause/effect> of the event <Input effect/cause event abstraction> is <Input cause/effect event abstraction>. Let's work this out in a step-by-step way to be sure that we have the right answer. Then provide your final answer beginning with 'The correct answer(s):' followed by a list of the indices of the correct answers.
|
| 459 |
+
|
| 460 |
+
Table 10: Prompt for the specific multi-choice answer generation on GLUCOSE. (*) This line is removed for the experiments that do not involve causal graphs.
|
| 461 |
+
|
| 462 |
+
# PROMPT: Multi-choice Answer Generation on Abstract-QA (zero-shot COT)
|
| 463 |
+
|
| 464 |
+
Given the following story: <Input story context>.
|
| 465 |
+
|
| 466 |
+
The story describes an event where <Input generalization of target effect/cause event>. What could be the <cause-effect> of the event?
|
| 467 |
+
|
| 468 |
+
Choose one or more correct answers out of the following choices: <Input answer choices>.
|
| 469 |
+
|
| 470 |
+
(*)This information can help answer the question: A possible <cause/effect> of the event <Input effect/cause event abstraction> is <Input cause/effect event abstraction>.
|
| 471 |
+
|
| 472 |
+
Let's work this out in a step-by-step way to be sure that we have the right answer. Then provide your final answer beginning with 'The correct answer(s):' followed by a list of the indices of the correct answers.
|
| 473 |
+
|
| 474 |
+
# PROMPT: Multi-choice Answer Generation on Abstract-QA (bi-level COT)
|
| 475 |
+
|
| 476 |
+
Given the following story: <Input story context>.
|
| 477 |
+
|
| 478 |
+
The story describes an event where <Input generalization of target effect/cause event>. What could be the <cause-effect> of the event?
|
| 479 |
+
|
| 480 |
+
Choose one or more correct answers out of the following choices: <Input answer choices>.
|
| 481 |
+
|
| 482 |
+
(*)This information can help answer the question: A possible <cause/effect> of the event <Input effect/cause event abstraction> is <Input cause/effect event abstraction>.
|
| 483 |
+
|
| 484 |
+
The event [Input generalization of target effect/cause event> is described by one of the sentences in the story context. First identify that part of the story. Then retrieve the event mentioned in the story that is a corresponding cause/effect."
|
| 485 |
+
|
| 486 |
+
Let's work this out in a step-by-step way to be sure that we have the right answer. Then provide your final answer beginning with 'The correct answer(s):' followed by a list of the indices of the correct answers.
|
| 487 |
+
|
| 488 |
+
Table 11: Prompts for the abstract multi-choice answer generation on GLUCOSE. (*) This line is removed for the experiments that do not involve causal graphs.
|
| 489 |
+
|
| 490 |
+
# PROMPT: Abstract Event Identification
|
| 491 |
+
|
| 492 |
+
We need to convert the input sentence into a more general expression. The conversion consists of three steps.
|
| 493 |
+
|
| 494 |
+
First, identifying: identify entities and verb words.
|
| 495 |
+
|
| 496 |
+
Second, conversion: convert the entities with more generic words and transform the verb words into the base form.
|
| 497 |
+
|
| 498 |
+
Third, further conversion: convert the sentence into a more general expression.
|
| 499 |
+
|
| 500 |
+
Note: The generic expressions used in the conversion are placeholders for the specific details in the original sentence.
|
| 501 |
+
|
| 502 |
+
The following is a conversion example.
|
| 503 |
+
|
| 504 |
+
Original Sentence: John went to buy a new collar for his dog.
|
| 505 |
+
|
| 506 |
+
1. Identifying:
|
| 507 |
+
|
| 508 |
+
- Person: John
|
| 509 |
+
Action: went, buy
|
| 510 |
+
- Object: a new collar
|
| 511 |
+
- Possession: his dog
|
| 512 |
+
|
| 513 |
+
2. Conversion: a person go to buy another thing for something
|
| 514 |
+
3. Further Conversion: a person buy something to do something
|
| 515 |
+
|
| 516 |
+
The following is another example.
|
| 517 |
+
|
| 518 |
+
Original Sentence: John drives near the woman.
|
| 519 |
+
|
| 520 |
+
1. Identifying:
|
| 521 |
+
|
| 522 |
+
- Person: John
|
| 523 |
+
Action: drives
|
| 524 |
+
- Object: the woman
|
| 525 |
+
- Preposition: near
|
| 526 |
+
|
| 527 |
+
2. Conversion: a person see another person
|
| 528 |
+
3. Further Conversion: a person see another person
|
| 529 |
+
|
| 530 |
+
Now we have a test instance. Please refer to the task instruction and the above examples to do the conversion.
|
| 531 |
+
|
| 532 |
+
The input sentence is: <Input event mention>.
|
| 533 |
+
|
| 534 |
+
Please convert the sentence into a more general expression following the above-mentioned three steps.
|
| 535 |
+
|
| 536 |
+
Table 12: Prompt for the abstract event identification task.
|
| 537 |
+
|
| 538 |
+
<table><tr><td>Story</td><td>In a store, two women were arguing, and Howard wanted to intervene. He attempted to get them to stop talking, but it didn't work. So, he stepped in between them, which caused them to cease their fighting.</td></tr><tr><td>Specific Question</td><td>What could be the cause of the event 'howard wants to help the women'?</td></tr><tr><td>Abstract Question</td><td>The question describes an event where 'a person hears something in a place'. What could be the effect of the event?</td></tr><tr><td>Choices</td><td>0: "Two women fights each other.", 1: "He went in between them.", 2: "Two women were fighting in a store.", 3: "They stopped.", 4: "Howard wanted to help." 5: "He tried telling them to stop but it did not work."</td></tr><tr><td>Causal Graph (CG)</td><td>a person have a fight with another person → a person want to stop another person</td></tr><tr><td>Correct Answers</td><td>0, 2</td></tr><tr><td>GPT-4o-mini Answers</td><td>2, 4</td></tr><tr><td>GPT-4o-mini Answers w/ CG</td><td>0, 2</td></tr><tr><td>Llama3.1-8B Answers</td><td>0, 1</td></tr><tr><td>Llama3.1-8B Answers w/ CG</td><td>0, 2, 4</td></tr></table>
|
| 539 |
+
|
| 540 |
+
Table 13: Examples of multi-choice Specific-QA reasoning in GPT-4o-mini and Llama3.1-8B.
|
| 541 |
+
|
| 542 |
+
<table><tr><td>Story</td><td>His cousins were scheduled to visit later that day, so his mom had him clean in the morning, shop for groceries in the afternoon, and get ready in the evening. Eventually, his cousins arrived at his house.</td></tr><tr><td>Abstract Question</td><td>The question describes an event where 'a person are coming to a place (that is another person house)'. What could be the effect of the event?</td></tr><tr><td>Choices</td><td>0: "His cousins were coming later too his house.", 1: "He get groceries in the afternoon.", 2: "His mom made him clean all morning.", 3: "His cousins came to his house.", 4: "He get ready in the evening."</td></tr><tr><td>Causal Graph (CG)</td><td>a person come to another person's place → a person clean something</td></tr><tr><td>Correct Answers</td><td>1, 2, 4</td></tr><tr><td>GPT-4o-mini Answers</td><td>0, 3</td></tr><tr><td>GPT-4o-mini Answers w/ CG</td><td>0, 2</td></tr><tr><td>Llama3.2-3B Answers</td><td>0, 3</td></tr><tr><td>Llama3.2-3B Answers w/ CG</td><td>1, 3</td></tr></table>
|
| 543 |
+
|
| 544 |
+
Table 14: Examples of multi-choice Abstract-QA reasoning in GPT-4o-mini and Llama3.2-3B.
|
| 545 |
+
|
| 546 |
+
<table><tr><td>Story</td><td>Felix wanted to visit Disney World. One day, he won two tickets and invited his friend Alissa. However, Alissa disliked Disney, so Felix ended up going by himself.</td></tr><tr><td>Abstract Question</td><td>The question describes an event where 'a person invited another person'. What could be the cause of the event?</td></tr><tr><td>Choices</td><td>0: "Alissa hated disney.",1: "Felix wanted to go to disney world.",2: "One day he won two tickets for entry.",3: "He invited his friend Alissa.",4: "He ended up going alone."</td></tr><tr><td>Causal Graph (CG)</td><td>a person want to go to a place → a person give another person an invitation to a place</td></tr><tr><td>Correct Answers</td><td>1, 2</td></tr><tr><td>GPT-4o-mini Answers</td><td>0, 1, 3</td></tr><tr><td>GPT-4o-mini Answers w/ CG</td><td>1, 2</td></tr><tr><td>Llama2-7B Answers</td><td>1, 2</td></tr><tr><td>Llama2-7B Answers w/ CG</td><td>1, 2</td></tr></table>
|
| 547 |
+
|
| 548 |
+
Table 15: Examples of multi-choice Abstract-QA reasoning in GPT-4o-mini and Llama2-7B.
|
| 549 |
+
|
| 550 |
+
<table><tr><td>Story</td><td>He wanted toast, so he got some bread and put it in the toaster. When it popped out and landed on the floor, he ate it anyway.</td></tr><tr><td>Abstract Question</td><td>The question describes an event where 'a person got another thing (that is an ingredient in another thing'). What could be the cause of the event?</td></tr><tr><td>Choices</td><td>0: "He ate it anyway.", 1: "He put it in the toaster.", 2: "He got some bread.", 3: "It shot out of the toaster and onto the floor.", 4: "He was making toast."</td></tr><tr><td>Correct Answers</td><td>4</td></tr><tr><td>Llama3.2-3B Answers (zero-shot)</td><td>1, 2</td></tr><tr><td>Llama3.2-3B Answers</td><td>1, 4</td></tr><tr><td>Llama3.2-3B Answers + CG</td><td>1, 4</td></tr><tr><td>Llama3.1-8B Answers(zero-shot)</td><td>1, 3</td></tr><tr><td>Llama3.1-8B Answers</td><td>2</td></tr><tr><td>Llama3.1-8B Answers + CG</td><td>4</td></tr><tr><td>Llama2-7B Answers (zero-shot)</td><td>1, 2</td></tr><tr><td>Llama2-7B Answers</td><td>4</td></tr><tr><td>Llama2-7B Answers + CG</td><td>1, 4</td></tr></table>
|
| 551 |
+
|
| 552 |
+
Table 16: Examples of multi-choice Abstract-QA reasoning with bi-level COT.
|
| 553 |
+
|
| 554 |
+
<table><tr><td>Story</td><td>He wanted toast, so he got some bread and put it in the toaster. When it popped out and landed on the floor, he ate it anyway.</td></tr><tr><td>Abstract Question</td><td>The question describes an event where 'a person got another thing (that is an ingredient in another thing'. What could be the cause of the event?</td></tr><tr><td>Choices</td><td>0: "He ate it anyway.",1: "He put it in the toaster.",2: "He got some bread.",3: "It shot out of the toaster and onto the floor.",4: "He was making toast."</td></tr><tr><td>Prompt</td><td>The event 'a person got another thing (that is an ingredient in another thing)' is described by one of the sentences in the story context. First identify that part of the story. Then retrieve the event mentioned in the story that is a corresponding cause/effect.</td></tr><tr><td>Correct Answers</td><td>4</td></tr><tr><td>Rationale</td><td>To solve this question, let's break down the steps:1. Identify the sentence describing 'another thing which is an ingredient in another thing': - The relevant sentence from the given text is not explicitly stated but rather implied through analysis. In this case, what seems like 'he made breakfast food'. In other words - Toaster = Breakfast Food; Bread + Toast = Ingredients for the meal at hand. Therefore the first item on our consideration has been identified as number four because if you are going into kitchen then most likely, your objective would be about cooking something.2. Now find any events occurring after items were used during said activity using index numbers provided against them starting their respective sequence no. This means looking back towards end, start at #5 because there will definitely exist only those indexed things found before each index respectively being associated w/ this particular action taking place afterward within same timeframe i.e., related actions happening after certain things are prepared here.Considering these points: We look forward now...4 provides our initial object 'the ingredients'. This makes us say - That must come after getting things ready prior than eating. Therefore we also know - getting stuff does indeed precede putting things inside the device whose result gets eaten later.So while option zero fits too since all meals need ending act (eating) coming second to doing preparatory tasks involving having desired objects ('bread')..But finally- lets verify... As shown below, the correct answer(s): 4, 1</td></tr></table>
|
| 555 |
+
|
| 556 |
+
Table 17: Example of multi-choice Abstract-QA reasoning with bi-level COT in Llama3.2-3B.
|
accessabenchmarkforabstractcausaleventdiscoveryandreasoning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f4d82392e222b2f98ae2abe00ebccd445f86bb51044545180409f9b48a758c3b
|
| 3 |
+
size 1261413
|
accessabenchmarkforabstractcausaleventdiscoveryandreasoning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fee0d318bcfbfb152a15d60cf7a2ef2543f1337c230548107878c24981578822
|
| 3 |
+
size 727016
|
accordclosingthecommonsensemeasurabilitygap/51b70a61-a300-4946-bd40-f5512a80d45c_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f96fa854d621d33fe691d515e2bcd08e1e61209532484bf2f22e5c46396d459
|
| 3 |
+
size 242385
|
accordclosingthecommonsensemeasurabilitygap/51b70a61-a300-4946-bd40-f5512a80d45c_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10b927360288bbcbae9ead546c152063e8f669d90218f3071adaf8b89aa4d761
|
| 3 |
+
size 308412
|
accordclosingthecommonsensemeasurabilitygap/51b70a61-a300-4946-bd40-f5512a80d45c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0fb6305b1fd8ae9b6bd3f2b15551baa65d31b6b16b36c1be81045de1c4559fc8
|
| 3 |
+
size 925526
|
accordclosingthecommonsensemeasurabilitygap/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
accordclosingthecommonsensemeasurabilitygap/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:478311d89390a0044d7b70d8b0db99c4098d781541fab9ea1bcec160bcbd32ae
|
| 3 |
+
size 801583
|
accordclosingthecommonsensemeasurabilitygap/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3adfd07a30e40bebaddc300ec6f86d6a80af9f5f1b0ebadfad0ce03450c10973
|
| 3 |
+
size 1287454
|
acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/4d3753eb-da4e-4fdc-b38d-da331bf2b58e_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b854a140870b9f829fb963c0d2ef58b28cccc946426ea76bc1d6eeab5c3d066d
|
| 3 |
+
size 132758
|
acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/4d3753eb-da4e-4fdc-b38d-da331bf2b58e_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:85dca4c5126c9c5fecb07ad31c75727fa95ff670dffbeeac701703b6a1ade8ea
|
| 3 |
+
size 169697
|
acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/4d3753eb-da4e-4fdc-b38d-da331bf2b58e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb79319b9068c75bcac86bb28a347f849d595bc92081386c8b47d73c40d13404
|
| 3 |
+
size 1592206
|
acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/full.md
ADDED
|
@@ -0,0 +1,558 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Cognitive Evaluation Benchmark of Image Reasoning and Description for Large Vision-Language Models
|
| 2 |
+
|
| 3 |
+
Xiujie Song $^{1}$ , Mengyue Wu $^{1*}$ , Kenny Q. Zhu $^{2*}$ , Chunhao Zhang $^{1}$ , Yanyi Chen $^{3}$
|
| 4 |
+
|
| 5 |
+
\(^{1}\)X-LANCE Lab, Department of Computer Science and Engineering MoE Key Lab of Artificial Intelligence, AI Institute Shanghai Jiao Tong University, Shanghai, China \(^{2}\)University of Texas at Arlington, Arlington, Texas, USA \(^{3}\)University of Chicago, Chicago, Illinois, USA \(^{1}\{xiujiesong, mengyuewu\} @sjtu.edu.cn, \({ }^{2}\)kenny.zhu@uta.edu
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Large Vision-Language Models (LVLMs), despite their recent success, are hardly comprehensively tested for their cognitive abilities. Inspired by the prevalent use of the Cookie Theft task in human cognitive tests, we propose a novel evaluation benchmark to evaluate high-level cognitive abilities of LVLMs using images with rich semantics. The benchmark consists of 251 images along with comprehensive annotations. It defines eight reasoning capabilities and comprises an image description task and a visual question answering task. Our evaluation of well-known LVLMs shows that there is still a significant gap in cognitive abilities between LVLMs and humans<sup>1</sup>.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Recently, with the emergence of Large Language Models (LLMs) such as GPT-4 (OpenAI, 2023), the cognitive abilities of language models have reached a new level (Zhuang et al., 2023). They demonstrate remarkable performance in many tasks (Bubeck et al., 2023). In Vision-Language (VL), several researchers (Zhu et al., 2023b; Liu et al., 2023b; Ye et al., 2023) endeavor to boost Vision-Language Pre-trained Models (VLPMs) by integrating powerful LLMs (Touvron et al., 2023; Chiang et al., 2023), referred to as Large Vision-Language Models (LVLMs) (Li et al., 2023c). With LLM serving as the "brain", the cognitive abilities of LVLMs are enhanced, enabling them to tackle more challenging tasks (Yang et al., 2023). Some state-of-the-art LVLMs, such as GPT-4o (OpenAI, 2023), are progressing toward human-level cognitive abilities. Thus, there is growing interest in evaluating cognitive abilities of LVLMs. Though some LVLM evaluation benchmarks, such as MME (Fu et al., 2023), MMBench (Liu et al., 2023c), and SEED
|
| 14 |
+
|
| 15 |
+
Bench (Li et al., 2023a), evaluate cognitive reasoning ability as one aspect of their evaluation, they do not provide a comprehensive evaluation of higher-level reasoning abilities. Most of the images they use contain less semantics and thus require relatively little reasoning to understand.
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: Cookie Theft picture description task. The descriptions in the green frame and the orange frame were respectively produced by a healthy 75-year-old man and a 66-year-old woman with probable AD dementia $^2$ .
|
| 19 |
+
|
| 20 |
+
In our study, we draw inspiration from the Cookie Theft picture description task (Figure 1), a key component of the Boston Diagnostic Aphasia Examination (Goodglass et al., 2001), which
|
| 21 |
+
|
| 22 |
+
is widely used in clinical practice within speech-language pathology for language and cognitive function screening (Cummings, 2019; Mueller et al., 2018). Notably, despite being designed more than half a century ago, this picture remains prevalent in contemporary psychological discussions.
|
| 23 |
+
|
| 24 |
+
Is it possible to transfer the success of the Cookie Theft picture description in human cognitive tests to evaluating the cognitive abilities of LVLMs? Linguists and psychologists (Cummings, 2019) conducted an analysis to determine the factors contributing to the success of the Cookie Theft picture. The study reveals that the narrative includes information with varying levels of importance and encompasses a broad range of semantic categories. It is observed that during the description of the Cookie Theft picture, individuals with intact cognitive functions exhibit their cognitive prowess by logically deducing the events and their interconnections, the relationships between characters and their mental states, etc. In contrast, those with cognitive impairments tend to merely list the superficial aspects of the situation without deeper reasoning. For instance, in Figure 1, by comparing the descriptions produced by the healthy man and the woman with probable Alzheimer's disease (AD) dementia, we can identify the following differences:
|
| 25 |
+
|
| 26 |
+
- The description produced by the healthy man used “mother” instead of “lady”, indicating reasoning about character relationship.
|
| 27 |
+
- The healthy man used "stealing cookies" instead of "taking cookies", indicating his reasoning about this high-level event. The description produced by the patient even did not mention this event at all.
|
| 28 |
+
- The healthy man used "the mother is preoccupied" and "happy" to describe people's mental state.
|
| 29 |
+
- The description reflected the causal relationships between events. "The kids are stealing cookies", because "the mother is preoccupied" and "not paying attention to her kids".
|
| 30 |
+
|
| 31 |
+
Through these reasoning processes, the difference in cognitive abilities between the two individuals is reflected in their descriptions. A picture that can evaluate cognitive functions needs to be carefully designed and crafted. Tasnim et al. (2022) introduced guidelines for drawing pictures similar to
|
| 32 |
+
|
| 33 |
+
Cookie Theft, which is consistent with the findings mentioned above. Generally speaking, compared to ordinary images, Cookie Theft-like images feature i) a prominent story theme, ii) richer content, iii) display complex relationships among entities, and thus require stronger cognitive abilities to understand and describe.
|
| 34 |
+
|
| 35 |
+
With the above design principles, we propose to construct a Cognitive Evaluation Benchmark, referred to as CogBench, to evaluate cognitive abilities of LVLMs mainly from the reasoning perspective using high-quality Cookie Theft-like images. CogBench defines eight core cognitive reasoning capabilities, including reasoning about special time, location, character, character relationship, event, event relationship, next moment event and mental state. Both a generative Image Description task and a discriminative Visual Question Answering (VQA) task are designed. Our main contributions are as follows:
|
| 36 |
+
|
| 37 |
+
- To the best of our knowledge, this is the first-of-its-kind attempt to incorporate the concept of the well-known Cookie Theft picture description task, originally designed for human cognitive testing, into the cognitive evaluation of LVLMs.
|
| 38 |
+
- Taking insights from human cognition research, we are the first to define Cookie Theft like images with eight reasoning dimensions and to create a dataset with semantically complex images aligned with the Cookie Theft. This approach allows for a comprehensive evaluation of the visual reasoning capabilities of LVLMs across these dimensions.
|
| 39 |
+
- Our evaluation on existing LVLMs shows that a significant gap exists between the cognitive abilities of LVLMs and human beings, indicatingCogBench will be a valuable evaluation benchmark in the near future.
|
| 40 |
+
|
| 41 |
+
# 2 Dataset Construction
|
| 42 |
+
|
| 43 |
+
In this section, we will introduce the construction of CogBench, detailing its image collection, annotation process, tasks, and data statistics.
|
| 44 |
+
|
| 45 |
+
# 2.1 Image Collection
|
| 46 |
+
|
| 47 |
+
Building on previous studies (Cummings, 2019; Tasnim et al., 2022), we establish the following criteria for collecting Cookie Theft-like images that we propose:
|
| 48 |
+
|
| 49 |
+

|
| 50 |
+
(a)
|
| 51 |
+
|
| 52 |
+

|
| 53 |
+
Images for visual reasoning from previous datasets
|
| 54 |
+
(b)
|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
(c)
|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
(d)
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
Images in CogBench (Ours)
|
| 64 |
+
(e)
|
| 65 |
+
|
| 66 |
+

|
| 67 |
+
(f)
|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
(g)
|
| 71 |
+
Figure 2: Comparison between our images and those from previous visual reasoning tasks. Our images contain rich entities and CoRs. Compared to our images, image (a) has fewer entities and CoRs, while image (b) and (c) have some entities but fewer CoRs.
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
(h)
|
| 75 |
+
|
| 76 |
+
Rule 1: Storytelling The image depicts an interesting story. For instance, the Cookie Theft picture tells the story of a mother busy washing dishes while two kids take the opportunity to stand on a stool and sneakily steal cookies.
|
| 77 |
+
|
| 78 |
+
Rule 2: Rich Chain-of-Reasonings Images should display rich Chain-of-Reasonings (CoRs) in a scene. A CoR connects low-level observations in an image to produce a high-level reasoning conclusion or connects the cause and effect of events. For example, "The mother is busy washing dishes. + The boy is standing on the stool behind the mother. + The girl standing by the boy is shushing him. + The boy is fetching cookies from the jar in the cabinet. → The boy and girl are stealing cookies." is a CoR about the high-level event "stealing cookies". Note that a story is usually constructed through various CoRs.
|
| 79 |
+
|
| 80 |
+
Rule 3: Restricted Content Complexity Images should contain rich content but not be overly complex. The number of entities should be sufficient to support a good story while being restricted to emphasize the key points effectively.
|
| 81 |
+
|
| 82 |
+
With the above criteria, we manually collect pic-
|
| 83 |
+
|
| 84 |
+
tures from Pinterest<sup>3</sup>, and the Cookie Theft picture is also included. Figure 2 shows the differences between our images and those from other datasets by examples. Note that most of the images in CogBench are in a painting style because they are abstracted from reality and tend to contain richer CoRs than real-world images, making them more effective for picture description tasks. Even so, painting-style images that meet our criteria are still rare, highlighting the value of our dataset.
|
| 85 |
+
|
| 86 |
+
# 2.2 Image Annotation
|
| 87 |
+
|
| 88 |
+
Human annotators, mostly undergraduate or graduate students aged 18-28, are hired to annotate the collected images. As shown in Figure 3, the annotation includes three parts: [Entities], [CoRs] and [Description]. By annotating [Entities] and [CoRs], we aim to evaluate the low-level recognition ability and high-level cognitive reasoning ability of models respectively based on their descriptions. [Description] is annotated as the reference description for the image. The three parts are annotated in that order.
|
| 89 |
+
|
| 90 |
+
[Entity Annotation] We ask annotators to list as many entities in the image as possible and entities that are difficult to recognize should be omitted.
|
| 91 |
+
|
| 92 |
+
[CoR Annotation] To evaluate model cognition in a fine-grained manner, we design eight reasoning dimensions based on studies in human cognition (Cummings, 2019; Byom and Mutlu, 2013; Addis et al., 2007) and Computer Vision (CV) (Fu et al., 2022; Park et al., 2020; Zellers et al., 2019; Shen et al., 2020). CoRs for these dimensions are annotated:
|
| 93 |
+
|
| 94 |
+
- Special Time Reasoning: reasoning about the special time of the story in the image, e.g., festivals.
|
| 95 |
+
- Location Reasoning: reasoning about the location of the story in the image, e.g., near a school.
|
| 96 |
+
- Character Reasoning: reasoning about the characters of subjects in the image, e.g., a doctor.
|
| 97 |
+
- Character Relationship Reasoning: reasoning about the relationships between characters in the image, e.g., "the woman is the mother of the kids."
|
| 98 |
+
|
| 99 |
+

|
| 100 |
+
[ENTITIES]: men, women, bus stop, sign, phone booth, newspaper, coat, buildings, snow, wind.
|
| 101 |
+
Figure 3: An example of the Description task from CogBench.
|
| 102 |
+
|
| 103 |
+
# [SPECIAL TIME REASONING]:
|
| 104 |
+
|
| 105 |
+
There is some snow on the ground. $\rightarrow$ It is winter.
|
| 106 |
+
|
| 107 |
+
# [LOCATION REASONING]:
|
| 108 |
+
|
| 109 |
+
There is a sign that says "NO STANDING BUS STOP". $\rightarrow$ It is near a bus stop.
|
| 110 |
+
|
| 111 |
+
# [EVENT REASONING]:
|
| 112 |
+
|
| 113 |
+
It is near a bus stop. + A man is standing on the road side, and peering out onto the road. + A man is standing in a phone booth. + There are also two women. → People are probably waiting for a bus here.
|
| 114 |
+
It is windy and cold. + A man is standing in a phone booth reading a newspaper. → The man is sheltering from the cold wind in the phone booth
|
| 115 |
+
|
| 116 |
+
# [EVENT RELATIONSHIP REASONING]:
|
| 117 |
+
|
| 118 |
+
It is windy and cold. + A man is standing in a phone booth reading a newspaper. $\rightarrow$ Two women are looking at the man.
|
| 119 |
+
|
| 120 |
+
# [MENTAL STATE REASONING]:
|
| 121 |
+
|
| 122 |
+
Inside a phone booth, a man is smiling while looking at a newspaper. + It is windy and cold outside. $\rightarrow$ The man in the phone booth is happy.
|
| 123 |
+
|
| 124 |
+
# [DESCRIPTION]:
|
| 125 |
+
|
| 126 |
+
There is some snow on the ground and it is windy, indicating it is winter. There are two men and two women standing on the roadside. There is a sign that says "NO STANDING BUS STOP", indicating it is near a bus stop. A man is standing on the road side, wrapping his coat tightly around himself, and peering out onto the road. They are probably waiting for a bus here. We can see it is cold. Inside a phone booth, a man is smiling while looking at a newspaper. He is sheltering from the cold wind in the phone booth. He looks happy, because it is warm there. The two women are also wrapping their coats tightly and looking at the man in the phone booth. They are probably friends and standing together. They are unhappy with the man. There are some buildings by the road.
|
| 127 |
+
|
| 128 |
+
- Event Reasoning: reasoning about the high-level events in the current and previous moments in the image. The difference between high-level and low-level events lies in the amount of semantic information they contain. For example, "stealing cookies" is a higher-level event compared to "taking cookies" as it additionally conveys the semantic of "taking advantage without permission or knowledge."
|
| 129 |
+
- Event Relationship Reasoning: reasoning about the causal and temporal relationships between different events in the image. For instance, "the sink is overflowing because the mother left the tap on."
|
| 130 |
+
- Next Moment Event Reasoning: reasoning about the events that will happen in the next moment in the image. For example, "the police officer will reprimand the boy who violates the rules."
|
| 131 |
+
- Mental State Reasoning: reasoning about the mental states of subjects in the image, including their emotions, thoughts, and other psychological states. For example, "the girl appears to be happy."
|
| 132 |
+
|
| 133 |
+
[Description Summary] Annotators are finally asked to write a description that conveys the entire story in the image based on the annotated [Entities] and [CoRs].
|
| 134 |
+
|
| 135 |
+
The complete annotation instruction can be found in Appendix A. Considering different people may have different understanding about some images, we ask three annotators to annotate each image. Then, we draw on the idea of majority voting to merge the three annotations into one. For
|
| 136 |
+
|
| 137 |
+
[Entities] and [CoRs], we first accept most of the entities and CoRs that are annotated by at least two annotators. Other entities and CoRs are also included if reasonable. The final [Description] is obtained by modifying the best annotated [Description] with the merged [Entities] and [CoRs]. We discard images where there is a significant difference in understanding among the three annotators.
|
| 138 |
+
|
| 139 |
+
# 2.3 Tasks in CogBench
|
| 140 |
+
|
| 141 |
+
We design a generative Image Description task and a discriminative Multiple-Choice Question Answering task in CogBench.
|
| 142 |
+
|
| 143 |
+
# 2.3.1 Image Description Task
|
| 144 |
+
|
| 145 |
+
This is the primary task of the benchmark. The difference between our description task and existing image description tasks (Xie et al., 2022; Zhu et al., 2023a; Zhuge et al., 2023) is that we expect LVLMs to understand and describe the story in the image through high-level cognitive reasoning. For instance, in Figure 3, the description of the image should not only include what is in the picture but also focus on elucidating the story of "on a cold winter day, a man is reading a newspaper in a phone booth near the bus stop to escape the cold, while two passing women express their displeasure upon seeing that" through a series of reasoning processes.
|
| 146 |
+
|
| 147 |
+
# 2.3.2 Visual Question Answering Task
|
| 148 |
+
|
| 149 |
+
The VQA task features standard four-option multiple-choice questions, easing the evaluation process. Like the Description task, VQA questions involve different types of high-level cognitive reasoning, as illustrated by the question about event
|
| 150 |
+
|
| 151 |
+
# [EVENT REASONING]:
|
| 152 |
+
|
| 153 |
+
It is windy and cold. + A man is standing in a phone booth reading a newspaper. $\rightarrow$ The man is sheltering from the cold wind in the phone booth.
|
| 154 |
+
|
| 155 |
+
# [EVENT REASONING]:
|
| 156 |
+
|
| 157 |
+
What is the man doing in the phone booth?
|
| 158 |
+
A. Making a phone call.
|
| 159 |
+
B. Reading a book.
|
| 160 |
+
C. Avoiding someone he doesn't want to see.
|
| 161 |
+
D. Sheltering from the wind.
|
| 162 |
+
Correct Answer: [D]
|
| 163 |
+
|
| 164 |
+
Figure 4: Generating a multiple-choice question based on an [Event Reasoning] CoR annotation.
|
| 165 |
+
|
| 166 |
+
in Figure 4. We use GPT-4 to assist in generating questions based on the annotations from Section 2.2. With the annotated CoRs, both the conclusion (right side of $\rightarrow$ ) and the reasoning behind it (left side of $\rightarrow$ ) in each CoR can be used to generate questions and corresponding options, as depicted in Figure 4. These components in each CoR provide the correct options directly to the questions generated based on them. Specifically, this process is two-fold. 1) Automated Question Generation: We use GPT-4 to generate questions for CogBench images, tailoring prompts for each reasoning category to produce CoR-related questions. The key point is to prompt GPT-4 to generate higher-quality distractors. An example prompt for this CoR-based GPT-assisted question generation approach is provided in Appendix B. 2) Manual Refinement: Despite GPT-4's capabilities, some generated questions are not challenging enough. In this stage, we manually refine the questions, ensuring they do not overtly favor the correct answer and that distractors are closely related to the question and misleading. Additionally, ChatGPT aids in identifying and filtering out simple questions that can be answered without image input.
|
| 167 |
+
|
| 168 |
+
# 2.4 CogBench Statistics
|
| 169 |
+
|
| 170 |
+
CogBench consists of 251 semantically rich images with a total of 2670 entities, 2243 CoRs, 251 descriptions and 2577 questions, indicating the content contained in each image is complex, showcased in Table 1. The number of CoRs of event-related reasoning and [Mental State Reasoning] is large, which is a manifestation of the rich interesting stories in the images.
|
| 171 |
+
|
| 172 |
+
# 3 Experiments
|
| 173 |
+
|
| 174 |
+
We will evaluate the selected LVLMs, detail evaluation strategies and discuss the results in this section.
|
| 175 |
+
|
| 176 |
+
# 3.1 Large Vision-Language Models
|
| 177 |
+
|
| 178 |
+
We evaluate a selection of recent representative open-source and closed-source LVLMs, including
|
| 179 |
+
|
| 180 |
+
InstructBLIP-7B (Dai et al., 2023), Qwen-VL series models (Bai et al., 2023; Wang et al., 2024), mPLUG-Owl-2 (Ye et al., 2023), LLaVA series models (Liu et al., 2023a, 2024; Li et al., 2024), ShareGPT4V (Chen et al., 2023a), CogVLM series models (Wang et al., 2023; Hong et al., 2024), InternVL2-26B (Chen et al., 2023c), GPT-4V and GPT-4o (OpenAI, 2023). A brief introduction to these models is provided in Appendix C.
|
| 181 |
+
|
| 182 |
+
# 3.2 CogBench Evaluation Strategy
|
| 183 |
+
|
| 184 |
+
# 3.2.1 Evaluation of the Description Task
|
| 185 |
+
|
| 186 |
+
Evaluation Modes For the Description task, we set up two evaluation modes: Spontaneous Description and Directed Reasoning. In the Spontaneous Description mode, we prompt the LVLMs with the following instruction to obtain detailed image descriptions: "Describe this image in detail." This mode is more similar to the Cookie Theft picture description task, which aims to stimulate spontaneous descriptions (Matias-Guiu et al., 2022). It can help analyze the behavior of LVLMs when they describe images. For the Directed Reasoning mode, the corresponding prompt is: "Please provide a detailed description of the story depicted in the image, including high-level reasoning about the time and location, the roles and relationships of the characters, the events and their causal relationships, what might happen next, and the mental states of the characters." This mode simplifies the Description task compared to the Spontaneous Description mode and aims to evaluate whether models can reason correctly when they know the directions we expect.
|
| 187 |
+
|
| 188 |
+
Evaluation Metrics We consider model performance at two levels: low-level Recognition ability and high-level Cognition ability. Evaluation metrics for both levels are calculated based on recall scores, referred to as Recognition Score and Cognition Score, respectively.
|
| 189 |
+
|
| 190 |
+
The Recognition Score is calculated as the ratio of recognized [Entities] to annotated [Entities] across all images. First, we use $\mathsf{spaCy}^4$ to extract nouns from the model-generated description, and use sentence-transformers $^5$ to encode the annotated [Entities] and extracted nouns into embeddings.
|
| 191 |
+
|
| 192 |
+
<table><tr><td></td><td>Time</td><td>Location</td><td>Character</td><td>Character Relationship</td><td>Event</td><td>Event Relationship</td><td>Next Moment Event</td><td>Mental State</td></tr><tr><td>CoR</td><td>47</td><td>177</td><td>106</td><td>263</td><td>701</td><td>425</td><td>107</td><td>417</td></tr><tr><td>QA</td><td>86</td><td>220</td><td>162</td><td>317</td><td>658</td><td>402</td><td>135</td><td>597</td></tr></table>
|
| 193 |
+
|
| 194 |
+
Then, we calculate the cosine similarity between the embeddings of the [Entities] and the nouns. For each entity, if the cosine similarity score between the entity and any noun is greater than a threshold (0.6 in this paper), we consider the entity to be recognized by the model.
|
| 195 |
+
|
| 196 |
+
For the Cognition Score, we calculate the scores for each of the eight cognitive reasoning dimensions, as well as an overall score using GPT-4. To enhance objectivity and granularity, GPT-4 is utilized for a binary classification task to assess if a generated description includes the semantics of each annotated CoR. For reasoning types other than [Event Relationship Reasoning], we task GPT-4 with determining whether the conclusion in each CoR is mentioned in the description. For [Event Relationship Reasoning], we task GPT-4 with determining whether each causal relationship between events (i.e., the entire CoR), as annotated, is present in the description. The CoR scores for each dimension are then used to compute a recall score for each respective type. The overall Cognition Score is the sum of all CoR scores divided by the total number of CoRs. The corresponding prompts are shown in Appendix D.
|
| 197 |
+
|
| 198 |
+
# 3.2.2 Evaluation of the VQA Task
|
| 199 |
+
|
| 200 |
+
For multiple-choice questions in the VQA task, we use accuracy as the evaluation metric. As questions are generated based on CoRs, we can also calculate the accuracy for each reasoning capability as well as the overall cognitive capability.
|
| 201 |
+
|
| 202 |
+
# 3.3 Results of the Description Task
|
| 203 |
+
|
| 204 |
+
We evaluate the LVLMs on the Description task in terms of both recognition and cognition abilities. As a reference, we also calculate traditional image captioning evaluation metrics by comparing the model-generated description with the annotated reference [Description], and details are shown in Appendix E.
|
| 205 |
+
|
| 206 |
+
# 3.3.1 Recognition
|
| 207 |
+
|
| 208 |
+
Table 2 shows the Recognition Scores of models on the Description task. InternVL2-26B, LLaVA-OV-7B-Chat, GPT-4o and Qwen2-VL achieve rela
|
| 209 |
+
|
| 210 |
+
tively better performance than other models, which means they can recognize and describe more entities. GPT-4V, CogVLM and CogVLM2 also demonstrate competitive performance. It can be concluded that the recognition ability of open-source LVLMs is approaching that of GPT-4o, with some models even surpassing it. Nevertheless, other open-source LVLMs have a significant gap in recognition capability compared to GPT-4, indicating some LVLMs still have room for development before reaching the recognition capability of GPT-4. Note that some models perform worse in mode 2 than in mode 1, which could be because these models focus more on high-level semantic reasoning and overlook the description of some low-level entities. Although many LVLMs perform well, such as InternVL2-26B and GPT-4o, they still miss many entities, suggesting room for improvement in recognition capability.
|
| 211 |
+
|
| 212 |
+
Table 1: Distribution of CoRs and questions in CogBench.
|
| 213 |
+
|
| 214 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">Recognition Score</td></tr><tr><td>Mode 1</td><td>Mode 2</td></tr><tr><td>InstructBLIP-7B</td><td>40.0</td><td>36.4</td></tr><tr><td>Qwen-VL-Chat</td><td>43.3</td><td>45.8</td></tr><tr><td>LLaVA-v1.5-7B</td><td>39.8</td><td>41.2</td></tr><tr><td>LLaVA-v1.5-13B</td><td>41.0</td><td>39.3</td></tr><tr><td>mPLUG-Owl-2</td><td>37.4</td><td>37.8</td></tr><tr><td>ShareGPT4V-7B</td><td>46.9</td><td>47.3</td></tr><tr><td>ShareGPT4V-13B</td><td>48.7</td><td>47.4</td></tr><tr><td>LLaVA-v1.6-vicuna-7B</td><td>49.3</td><td>51.1</td></tr><tr><td>LLaVA-v1.6-vicuna-13B</td><td>53.3</td><td>53.9</td></tr><tr><td>LLaVA-v1.6-34B</td><td>52.2</td><td>52.0</td></tr><tr><td>CogVLM-Chat</td><td>61.6</td><td>56.5</td></tr><tr><td>CogVLM2-Llama3-Chat</td><td>62.3</td><td>58.9</td></tr><tr><td>InternVL2-26B</td><td>70.7</td><td>65.9</td></tr><tr><td>Qwen2-VL-7B</td><td>66.3</td><td>59.6</td></tr><tr><td>LLaVA-OV-7B-Chat</td><td>67.4</td><td>72.3</td></tr><tr><td>GPT-4V</td><td>62.9</td><td>56.5</td></tr><tr><td>GPT-4o</td><td>66.8</td><td>65.1</td></tr><tr><td>Oracle</td><td colspan="2">92.8</td></tr></table>
|
| 215 |
+
|
| 216 |
+
Table 2: Recognition Scores of LVLMs on the Description task. Mode 1 and Mode 2 refer to the Spontaneous Description mode and the Directed Reasoning mode respectively. For reference, the Recognition Score of Oracle is calculated based on the annotated [Description] in CogBench dataset as an estimated upper bound. Numbers are presented in % with a full score of $100\%$ .
|
| 217 |
+
|
| 218 |
+
<table><tr><td>Model</td><td>Time</td><td>Location</td><td>Character</td><td>Character Relationship</td><td>Event</td><td>Event Relationship</td><td>Next Moment Event</td><td>Mental State</td><td>Overall</td></tr><tr><td>InstructBLIP-7B</td><td>14.9 / 17.0</td><td>54.2 / 59.9</td><td>24.5 / 29.2</td><td>30.0 / 41.1</td><td>10.3 / 9.3</td><td>4.9 / 5.6</td><td>1.9 / 6.5</td><td>17.3 / 22.8</td><td>16.7 / 19.8</td></tr><tr><td>Qwen-VL-Chat</td><td>23.4 / 25.5</td><td>57.1 / 58.8</td><td>28.3 / 31.1</td><td>29.3 / 50.6</td><td>15.1 / 19.4</td><td>12.7 / 13.2</td><td>3.7 / 9.3</td><td>12.0 / 25.7</td><td>19.3 / 26.3</td></tr><tr><td>LLaVA-v1.5-7B</td><td>8.5 / 19.1</td><td>45.2 / 59.3</td><td>15.1 / 18.9</td><td>18.3 / 40.7</td><td>8.8 / 8.6</td><td>4.9 / 5.6</td><td>1.9 / 1.9</td><td>13.2 / 21.8</td><td>12.8 / 18.6</td></tr><tr><td>LLaVA-v1.5-13B</td><td>12.8 / 14.9</td><td>48.6 / 55.9</td><td>17.9 / 19.8</td><td>24.3 / 39.5</td><td>9.8 / 9.3</td><td>4.7 / 5.9</td><td>3.7 / 4.7</td><td>16.3 / 21.3</td><td>15.0 / 18.5</td></tr><tr><td>mPLUG-Owl-2</td><td>6.4 / 12.8</td><td>48.0 / 57.1</td><td>23.6 / 22.6</td><td>21.3 / 41.8</td><td>8.4 / 9.4</td><td>4.0 / 4.5</td><td>1.9 / 3.7</td><td>13.7 / 17.3</td><td>13.6 / 17.9</td></tr><tr><td>ShareGPT4V-7B</td><td>19.1 / 14.9</td><td>60.5 / 58.8</td><td>20.8 / 25.5</td><td>22.4 / 35.0</td><td>10.0 / 12.6</td><td>4.0 / 6.4</td><td>2.8 / 3.7</td><td>15.1 / 17.5</td><td>15.6 / 18.8</td></tr><tr><td>ShareGPT4V-13B</td><td>23.4 / 17.0</td><td>57.1 / 60.5</td><td>23.6 / 29.2</td><td>26.2 / 37.6</td><td>12.4 / 12.4</td><td>6.6 / 7.3</td><td>2.8 / 3.7</td><td>14.4 / 16.1</td><td>17.1 / 19.3</td></tr><tr><td>LLaVA-v1.6-vicuna-7B</td><td>17.0 / 25.5</td><td>61.0 / 62.1</td><td>23.6 / 29.2</td><td>25.1 / 47.9</td><td>12.1 / 14.1</td><td>7.5 / 8.5</td><td>2.8 / 8.4</td><td>17.0 / 23.0</td><td>17.7 / 23.1</td></tr><tr><td>LLaVA-v1.6-vicuna-13B</td><td>17.0 / 27.7</td><td>63.3 / 65.5</td><td>27.4 / 33.0</td><td>23.6 / 42.2</td><td>15.1 / 15.8</td><td>9.6 / 9.9</td><td>0.9 / 2.8</td><td>17.7 / 26.4</td><td>19.3 / 24.1</td></tr><tr><td>LLaVA-v1.6-34B</td><td>25.5 / 27.7</td><td>65.5 / 68.4</td><td>28.3 / 47.2</td><td>22.8 / 48.7</td><td>16.8 / 18.1</td><td>10.8 / 13.4</td><td>3.7 / 10.3</td><td>16.3 / 30.5</td><td>20.2 / 28.3</td></tr><tr><td>CogVLM-Chat</td><td>29.8 / 46.8</td><td>75.1 / 72.3</td><td>40.6 / 50.0</td><td>23.6 / 45.2</td><td>28.1 / 29.1</td><td>21.6 / 27.3</td><td>4.7 / 7.5</td><td>27.1 / 33.3</td><td>29.4 / 35.2</td></tr><tr><td>CogVLM2-Llama3-Chat</td><td>34.0 / 31.9</td><td>73.4 / 75.7</td><td>46.2 / 49.1</td><td>24.7 / 41.1</td><td>28.4 / 32.2</td><td>26.8 / 32.5</td><td>5.6 / 8.4</td><td>30.2 / 40.0</td><td>31.4 / 37.9</td></tr><tr><td>InternVL2-26B</td><td>40.4 / 44.7</td><td>77.4 / 79.7</td><td>48.1 / 54.7</td><td>27.8 / 54.0</td><td>27.4 / 31.5</td><td>24.5 / 27.8</td><td>3.7 / 15.0</td><td>36.7 / 44.1</td><td>32.7 / 40.2</td></tr><tr><td>Qwen2-VL-7B</td><td>38.3 / 34.0</td><td>72.3 / 75.1</td><td>48.1 / 48.1</td><td>19.4 / 53.2</td><td>25.1 / 23.8</td><td>20.0 / 20.2</td><td>4.7 / 9.3</td><td>29.5 / 33.3</td><td>28.4 / 33.1</td></tr><tr><td>LLaVA-OV-7B-Chat</td><td>42.6 / 46.8</td><td>75.1 / 76.3</td><td>50.9 / 48.1</td><td>18.3 / 46.4</td><td>25.5 / 30.4</td><td>25.6 / 26.4</td><td>9.3 / 17.8</td><td>33.8 / 42.4</td><td>30.9 / 37.9</td></tr><tr><td>GPT-4V</td><td>40.4 / 40.4</td><td>74.0 / 70.1</td><td>44.3 / 49.1</td><td>34.2 / 57.4</td><td>28.0 / 33.2</td><td>25.4 / 29.2</td><td>10.3 / 18.7</td><td>42.7 / 51.6</td><td>34.8 / 41.8</td></tr><tr><td>GPT-4o</td><td>38.3 / 51.1</td><td>79.7 / 82.5</td><td>50.9 / 62.3</td><td>29.3 / 73.4</td><td>42.2 / 48.8</td><td>42.1 / 50.6</td><td>11.2 / 34.6</td><td>43.2 / 58.5</td><td>42.7 / 56.5</td></tr><tr><td>Oracle</td><td>91.5</td><td>97.8</td><td>94.3</td><td>81.4</td><td>98.1</td><td>92.2</td><td>89.7</td><td>92.3</td><td>93.2</td></tr></table>
|
| 219 |
+
|
| 220 |
+
Table 3: Cognition Scores of LVLMs on the Description task evaluated by GPT-4. The results of the evaluation under the Spontaneous Description mode and Directed Reasoning mode are presented before and after the “/” in each table cell. For reference, the Cognition Scores of Oracle are calculated based on the annotated [Description] in CogBench dataset as an estimated upper bound. Numbers are presented in % with a full score of $100\%$ .
|
| 221 |
+
|
| 222 |
+
# 3.3.2 Cognition
|
| 223 |
+
|
| 224 |
+
General analysis. Table 3 shows the Cognition Scores of LVLMs on the Description task. GPT-4o achieves the best performance and there is a large performance gap between GPT-4o and other open-source models. Among open-source models, InternVL2-26B, LLaVA-OV-7B-Chat, CogVLM models and Qwen2-VL achieve relatively better performance, with some approaching that of GPT-4V. In terms of different reasoning capabilities, all LVLMs show better performance on [Location Reasoning] than others, probably because it is a kind of relatively lower-level reasoning. Differently, for [Event Reasoning], [Event Relationship Reasoning], and [Next Moment Event Reasoning], most open-source LVLMs show lower performance. The Cognition Scores of some open-source LVLMs are only around or even lower than $10\%$ across these dimensions, indicating they almost do not understand the story in the images. In contrast, GPT-4o shows significantly better performance in these three kinds of reasoning capabilities. Besides, though GPT-4o achieves the best performance, there is also a large gap between its Cognition Scores and the Oracle scores. This indicates that LVLMs still have a lot of room for development in terms of cognitive abilities.
|
| 225 |
+
|
| 226 |
+
Analysis based on different evaluation modes. When the Directed Reasoning mode is applied, significant performance improvements in the LVLMs can be observed compared to the Spontaneous Description mode. This suggests that current LVLMs,
|
| 227 |
+
|
| 228 |
+
while potentially grasping some high-level semantic information in images, cannot spontaneously generate comprehensive descriptions through reasoning from various perspectives without appropriate prompts. Another intriguing finding is that for some open-source LVLMs, shifting from the Spontaneous Description mode to the Directed Reasoning mode results in noticeable performance improvements across various dimensions, such as [Character Relationship Reasoning]; however, for [Event Reasoning] and [Event Relationship Reasoning], the enhancements are rather limited. This further highlights their struggle to fully comprehend events in the images, with event-based reasoning being a key area where open-source LVLMs differ significantly from GPT-4o.
|
| 229 |
+
|
| 230 |
+
Analysis of recognition and cognition abilities. The performance gap between GPT-4o and open-source LVLMs in terms of cognition is obviously larger than that of recognition. This suggests that cognitive reasoning is a more advanced ability than recognition, and that these LVLMs still have significant room for development.
|
| 231 |
+
|
| 232 |
+
# 3.3.3 Case Study
|
| 233 |
+
|
| 234 |
+
Figure 5 shows a failure case of GPT-4o on the Description task under the Directed Reasoning mode. In terms of recognition, GPT-4o shows a good performance by recognizing most annotated entities such as men, women, buildings, phone booth, newspaper, coat, snow, sign and only fails to recognize bus stop and wind. However, GPT-4o
|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
Figure 5: Case study of the Description task. The description is generated by GPT-4o in the Directed Reasoning mode. Recognized entities are marked in blue, and CoRs are marked in green.
|
| 238 |
+
|
| 239 |
+

|
| 240 |
+
|
| 241 |
+
The image depicts a scene set in what appears to be a bustling urban area, possibly mid-20th century based on the fashion, architecture, and presence of a classic phone booth. The scene unfolds on a snowy sidewalk in front of what seems to be a row of apartment buildings. Characters and their roles: 1. Man in the phone booth:- This man, dressed in a beige trench coat and fedora hat, is engaged in a phone conversation. He holds a newspaper and appears to be happily engrossed in his conversation, possibly laughing or smiling. 2. Two women on the sidewalk: - These women, bundled up in winter clothing (one in a fur coat and the other in a red coat), appear to be startled or alarmed. Their body language indicates they are huddled together, possibly sharing a moment of surprise or gossip while glancing furtively at the man. 3. Man bending over in the background: - This individual, whose back is turned to both the phone booth and the women, is bent over presumably to pick something up. He is wearing a dark overcoat and hat, suggesting he is also trying to stay warm in the cold weather.
|
| 242 |
+
|
| 243 |
+
Setting and Time: - The setting is an urban street, likely in North America, given the architecture and signage. The presence of snow and winter coats suggests it is in the winter season, likely mid-20th century based on the style of clothing and the design of the phone booth.
|
| 244 |
+
|
| 245 |
+
Events and Causal Relationships: - The man in the phone booth is happily engaged in his conversation, seemingly unaware of his surroundings. - The women seem startled or shocked, perhaps at something they have overheard or something they have seen. Their gaze and body language suggest they are reacting to a shared experience, possibly connected to the man in the phone booth. - The man in the background is not directly interacting with others, suggesting his actions are unrelated, yet his presence adds an element of everyday normalcy to the scene.
|
| 246 |
+
|
| 247 |
+
Mental States of Characters: - The man in the phone booth appears to be enjoying his conversation, perhaps sharing a joke or good news. - The women exhibit signs of surprise or shock, holding onto each other for comfort or out of fear of what they have encountered. - The man in the background seems focused on his immediate task, oblivious to the tension between the women and the man in the phone booth.
|
| 248 |
+
|
| 249 |
+
Possible Next Events: - The women may discuss or confront the man in the phone booth if he is the source of their alarm. - The man in the phone booth might finish his call oblivious to the commotion he's caused. - The man in the background may finish picking up whatever he is bending over for and continue on his way.
|
| 250 |
+
|
| 251 |
+
Overall, the image captures a moment of everyday life with an intriguing twist, prompting questions about the relationship between the characters and the source of the women's alarm.
|
| 252 |
+
|
| 253 |
+
Recognition Score: 80.0%
|
| 254 |
+
|
| 255 |
+
Cognition Score: $30.0\%$
|
| 256 |
+
|
| 257 |
+
fails to understand the story in the image and gets a $30.0\%$ in terms of cognition. One of the most important reasons is that it does not recognize that the man is in the phone booth to escape the cold, rather than to make a phone call. This case demonstrates that CogBench reveals current LVLMs falling short in cognition, with a gap remaining between their cognitive abilities and human levels.
|
| 258 |
+
|
| 259 |
+
# 3.3.4 Effectiveness of GPT-based Evaluation
|
| 260 |
+
|
| 261 |
+
To validate the GPT-based cognition evaluation method, we manually scored CoRs of 20 images with a binary scale (0/1) and compared the accuracy of various evaluation methods on this subset. Table 4 reveals that GPT-4 offers the highest accuracy, demonstrating that GPT-based evaluation aligns well with human assessment. Therefore, it is effective to assess LVLMs' performance on the Description task. Implementation details of evaluation methods beyond ChatGPT and GPT-4 (Lin, 2004; Zhang et al., 2020; Sellam et al., 2020; He et al., 2021; Yin et al., 2021) can be found in Appendix F.
|
| 262 |
+
|
| 263 |
+
# 3.4 Results of the VQA Task
|
| 264 |
+
|
| 265 |
+
Table 5 shows the performance of LVLMs on the VQA task. GPT-4o achieves the best performance. Among open-source LVLMs, LLaVA-OV-7B-Chat, InternVL2-26B, LLaVA-v1.6-34B, CogVLM2, and Qwen2-VL-7B demonstrate better results, approaching the level of GPT-4o.
|
| 266 |
+
|
| 267 |
+
<table><tr><td>Model</td><td>Accuracy</td></tr><tr><td>ROUGE</td><td>0.656</td></tr><tr><td>BERTScore</td><td>0.635</td></tr><tr><td>BLEURT</td><td>0.620</td></tr><tr><td>DeBERTa</td><td>0.693</td></tr><tr><td>DocNLI</td><td>0.714</td></tr><tr><td>GPT-3.5</td><td>0.807</td></tr><tr><td>GPT-4</td><td>0.833</td></tr></table>
|
| 268 |
+
|
| 269 |
+
Table 4: CoR accuracy of cognition evaluation methods for the Description task.
|
| 270 |
+
|
| 271 |
+
Consistent with previous findings, reasoning about location is also the easiest for LVLMs and event-related reasoning dimensions are more difficult. There is also a large gap between the performance of LVLMs and humans. Note that the accuracy of Human in Table 5 is calculated based on the responses of five healthy people. They all have obtained a bachelor's degree and are between the ages of 20 and 30. Furthermore, some LVLMs perform differently on the two tasks, e.g. LLaVA-v1.6-34B, which highlights the necessity of including both tasks in the design of CogBench.
|
| 272 |
+
|
| 273 |
+
# 4 Related Work
|
| 274 |
+
|
| 275 |
+
Evaluation Benchmark for LVLMs. To better understand the emerging capabilities of LVLMs, various evaluation benchmarks have been proposed. LVLM-eHub (Xu et al., 2023) evaluates LVLMs' capabilities across six categories using various publicly available CV datasets. MME (Fu et al.,
|
| 276 |
+
|
| 277 |
+
<table><tr><td>Model</td><td>Time</td><td>Location</td><td>Character</td><td>Character Relationship</td><td>Event</td><td>Event Relationship</td><td>Next Moment Event</td><td>Mental State</td><td>Overall</td></tr><tr><td>InstructBLIP-7B</td><td>60.5</td><td>71.4</td><td>48.8</td><td>54.9</td><td>40.3</td><td>36.8</td><td>46.7</td><td>47.6</td><td>47.4</td></tr><tr><td>Qwen-VL-Chat</td><td>65.1</td><td>82.3</td><td>60.5</td><td>54.3</td><td>50.9</td><td>45.0</td><td>47.4</td><td>51.1</td><td>54.0</td></tr><tr><td>LLaVA-V1.5-7B</td><td>58.1</td><td>81.4</td><td>54.3</td><td>54.6</td><td>45.9</td><td>45.0</td><td>54.1</td><td>52.6</td><td>52.8</td></tr><tr><td>LLaVA-V1.5-13B</td><td>69.8</td><td>82.3</td><td>65.4</td><td>59.9</td><td>50.2</td><td>47.3</td><td>57.8</td><td>57.1</td><td>57.3</td></tr><tr><td>mPLUG-Owl-2</td><td>51.2</td><td>81.8</td><td>58.6</td><td>54.6</td><td>46.0</td><td>47.5</td><td>47.4</td><td>51.8</td><td>52.7</td></tr><tr><td>ShareGPT4V-7B</td><td>58.1</td><td>80.5</td><td>63.6</td><td>53.6</td><td>48.8</td><td>40.3</td><td>51.1</td><td>54.1</td><td>53.4</td></tr><tr><td>ShareGPT4V-13B</td><td>67.4</td><td>80.0</td><td>65.4</td><td>56.5</td><td>49.4</td><td>49.8</td><td>60.0</td><td>54.6</td><td>56.3</td></tr><tr><td>LLaVA-v1.6-vicuna-7B</td><td>60.5</td><td>80.9</td><td>57.4</td><td>55.8</td><td>51.4</td><td>43.8</td><td>55.6</td><td>58.6</td><td>55.8</td></tr><tr><td>LLaVA-v1.6-vicuna-13B</td><td>66.3</td><td>85.9</td><td>64.8</td><td>59.3</td><td>58.2</td><td>53.7</td><td>65.9</td><td>60.0</td><td>61.5</td></tr><tr><td>LLaVA-v1.6-34B</td><td>80.2</td><td>92.7</td><td>83.3</td><td>74.8</td><td>68.4</td><td>66.9</td><td>68.8</td><td>74.2</td><td>73.7</td></tr><tr><td>CogVLM-Chat</td><td>73.3</td><td>87.7</td><td>75.9</td><td>66.9</td><td>58.5</td><td>53.2</td><td>63.0</td><td>63.7</td><td>64.2</td></tr><tr><td>CogVLM2-Llama3-Chat</td><td>73.3</td><td>92.3</td><td>86.4</td><td>76.0</td><td>71.9</td><td>62.9</td><td>67.4</td><td>71.9</td><td>73.5</td></tr><tr><td>InternVL2-26B</td><td>81.4</td><td>91.8</td><td>82.7</td><td>78.2</td><td>71.9</td><td>67.2</td><td>66.7</td><td>72.0</td><td>74.4</td></tr><tr><td>Qwen2-VL-7B</td><td>80.2</td><td>90.0</td><td>82.1</td><td>75.1</td><td>66.4</td><td>63.2</td><td>72.6</td><td>69.8</td><td>71.6</td></tr><tr><td>LLaVA-OV-7B-Chat</td><td>81.4</td><td>93.2</td><td>85.2</td><td>75.1</td><td>71.6</td><td>69.4</td><td>71.9</td><td>74.5</td><td>75.4</td></tr><tr><td>GPT-4V</td><td>70.9</td><td>81.8</td><td>72.8</td><td>63.7</td><td>63.4</td><td>66.9</td><td>68.9</td><td>69.2</td><td>68.0</td></tr><tr><td>GPT-4o</td><td>81.4</td><td>90.9</td><td>81.5</td><td>68.5</td><td>75.5</td><td>74.1</td><td>83.7</td><td>77.1</td><td>77.1</td></tr><tr><td>Human</td><td>98.8</td><td>95.9</td><td>98.8</td><td>94.3</td><td>95.6</td><td>96.0</td><td>96.3</td><td>93.3</td><td>95.3</td></tr></table>
|
| 278 |
+
|
| 279 |
+
Table 5: Model performance on the VQA task. Each QA contains four options, with a chance rate of $25\%$ . Numbers are presented in $\%$ with a full score of $100\%$ .
|
| 280 |
+
|
| 281 |
+
2023), MMBench (Liu et al., 2023c) and SEED Bench (Li et al., 2023a) use True/False questions or multiple-choice questions to evaluate different abilities. MM-VET (Yu et al., 2024) evaluates LVLMs in terms of their integrated VL capabilities with open-ended questions. In contrast, CogBench focuses on high-level cognitive evaluation. Though some of them also consider cognition as one of the evaluation dimensions, they do not provide a comprehensive evaluation and most images they use evaluate only limited aspects of reasoning.
|
| 282 |
+
|
| 283 |
+
Image Captioning. Image Captioning is a classical VL task (Zhou et al., 2022). As model capabilities advance, researchers strive to enhance their ability to describe images in detail. Krause et al. (2017) propose Image Paragraph Captioning, tasking models with generating a descriptive paragraph for an image. Recently, some researchers (Xie et al., 2022; Zhu et al., 2023a; Zhuge et al., 2023; Chen et al., 2023a) have been leveraging the ability of LLMs to generate more detailed image descriptions. None of these tasks considers evaluating the high-level cognitive abilities of models through description. HL dataset (Cafagna et al., 2023) requires models to generate high-level captions, but it only considers three aspects (scene, action, rationale). The content of most images in existing datasets does not reach the level of a story. This reveals the need for higher-level datasets like CogBench.
|
| 284 |
+
|
| 285 |
+
Visual Reasoning. Visual Reasoning task is closely related to the cognitive abilities of models.
|
| 286 |
+
|
| 287 |
+
Visual Commonsense Reasoning (VCR) (Zellers et al., 2019) tasks models with answering visual questions using commonsense reasoning and justifying their answers. VisualCOMET (Park et al., 2020) is a framework of visual commonsense reasoning tasks to predict past, future events, and present intents. Hessel et al. (2022) utilize images from VCR and Visual Genome (Krishna et al., 2017) to evaluate the ability of models to perform abductive reasoning. Fu et al. (2022) propose a task to identify the time and location of a given image. CURE (Chen et al., 2023b) is proposed to measure both the zero-shot reasoning performance and consistency of VLMs. Similarly, these tasks consider fewer kinds of reasoning, andCogBench can be seen as the next step of these efforts.
|
| 288 |
+
|
| 289 |
+
# 5 Conclusion
|
| 290 |
+
|
| 291 |
+
In this paper, we incorporate the concept of the Cookie Theft picture description task into the evaluation of high-level cognitive abilities of LVLMs and design a novel evaluation benchmark called CogBench. The images in CogBench are of high quality and require more complex cognitive reasoning for interpretation, distinguishing it from existing image datasets. Experiments reveal a significant gap between the cognitive abilities of LVLMs and humans, highlighting CogBench as a challenging benchmark.
|
| 292 |
+
|
| 293 |
+
# Limitations
|
| 294 |
+
|
| 295 |
+
Given the scarcity of images that meet our data collection standards, the number of images in Cog-
|
| 296 |
+
|
| 297 |
+
Bench is relatively limited. However, the number of images in CogBench is comparable to existing evaluation benchmarks, such as MM-VET, and is sufficient to serve as a reliable evaluation benchmark.
|
| 298 |
+
|
| 299 |
+
# Ethical Considerations
|
| 300 |
+
|
| 301 |
+
Most images in CogBench are manually collected from Pinterest in accordance with its terms of service. The images are used under fair use for research purposes only and we will share our dataset with researchers who adhere to the ethical considerations established in this study. During the annotation process, we ensure that our annotators receive a fair wage and promptly address any questions they have. They are free to take breaks or quit the annotation task at any time.
|
| 302 |
+
|
| 303 |
+
# Acknowledgments
|
| 304 |
+
|
| 305 |
+
This work has been supported by the China NSFC Project (No. U23B2018). Kenny Q. Zhu was partly supported by NSF Award No. 2349713.
|
| 306 |
+
|
| 307 |
+
# References
|
| 308 |
+
|
| 309 |
+
Donna Rose Addis, Alana T. Wong, and Daniel L. Schacter. 2007. Remembering the past and imagining the future: Common and distinct neural substrates during event construction and elaboration. Neuropsychologia, 45(7):1363-1377.
|
| 310 |
+
Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. 2023. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond. Preprint, arXiv:2308.12966.
|
| 311 |
+
Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An automatic metric for MT evaluation with improved correlation with human judgments. In Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization, pages 65-72, Ann Arbor, Michigan. Association for Computational Linguistics.
|
| 312 |
+
Sebastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yuanzhi Li, Scott Lundberg, et al. 2023. Sparks of artificial general intelligence: Early experiments with GPT-4. arXiv preprint arXiv:2303.12712.
|
| 313 |
+
Lindsey J. Byom and Bilge Mutlu. 2013. Theory of mind: Mechanisms, methods, and new directions. Frontiers in Human Neuroscience, 7.
|
| 314 |
+
|
| 315 |
+
Michele Cafagna, Kees van Deemter, and Albert Gatt. 2023. Hl dataset: visually-grounded description of scenes, actions and rationales. In Proceedings of the 16th International Natural Language Generation Conference, pages 293-312.
|
| 316 |
+
Lin Chen, Jisong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. 2023a. ShareGPT4V: Improving large multimodal models with better captions. arXiv preprint arXiv:2311.12793.
|
| 317 |
+
Yangyi Chen, Karan Sikka, Michael Cogswell, Heng Ji, and Ajay Divakaran. 2023b. Measuring and improving chain-of-thought reasoning in vision-language models. arXiv preprint arXiv:2309.04461.
|
| 318 |
+
Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, Bin Li, Ping Luo, Tong Lu, Yu Qiao, and Jifeng Dai. 2023c. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. arXiv preprint arXiv:2312.14238.
|
| 319 |
+
Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. 2023. Vicuna: An open-source chatbot impressing GPT-4 with $90\%$ ChatGPT quality. See https://vicuna.lmsys.org (accessed 14 April 2023).
|
| 320 |
+
Louise Cummings. 2019. Describing the cookie theft picture: Sources of breakdown in alzheimer's dementia. *Pragmatics and Society*, 10:151-174.
|
| 321 |
+
Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. 2023. Instructlip: Towards general-purpose vision-language models with instruction tuning. Preprint, arXiv:2305.06500.
|
| 322 |
+
Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. 2023. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394.
|
| 323 |
+
Xingyu Fu, Ben Zhou, Ishaan Chandratreya, Carl Vondrick, and Dan Roth. 2022. There's a time and place for reasoning beyond the image. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1138-1149, Dublin, Ireland. Association for Computational Linguistics.
|
| 324 |
+
Harold Goodglass, Edith Kaplan, and Sandra Weintraub. 2001. BDAE: The Boston Diagnostic Aphasia Examination. Lippincott Williams & Wilkins, Philadelphia, PA.
|
| 325 |
+
Pengcheng He, Jianfeng Gao, and Weizhu Chen. 2023. DeBERTav3: Improving deBERTa using ELECTRA-style pre-training with gradient-disentangled embedding sharing. In The Eleventh International Conference on Learning Representations.
|
| 326 |
+
|
| 327 |
+
Pengcheng He, Xiaodong Liu, Jianfeng Gao, and Weizhu Chen. 2021. Deberta: Decoding-enhanced bert with disentangled attention. In International Conference on Learning Representations.
|
| 328 |
+
Jack Hessel, Jena D Hwang, Jae Sung Park, Rowan Zellers, Chandra Bhagavatula, Anna Rohrbach, Kate Saenko, and Yejin Choi. 2022. The abduction of sherlock holmes: A dataset for visual abductive reasoning. In European Conference on Computer Vision, pages 558-575. Springer.
|
| 329 |
+
Wenyi Hong, Weihan Wang, Ming Ding, Wenmeng Yu, Qingsong Lv, Yan Wang, Yean Cheng, Shiyu Huang, Junhui Ji, Zhao Xue, et al. 2024. Cogvlm2: Visual language models for image and video understanding. arXiv preprint arXiv:2408.16500.
|
| 330 |
+
Jonathan Krause, Justin Johnson, Ranjay Krishna, and Li Fei-Fei. 2017. A hierarchical approach for generating descriptive image paragraphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 317-325.
|
| 331 |
+
Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. 2017. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73.
|
| 332 |
+
Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. 2024. Llavaonevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326.
|
| 333 |
+
Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. 2023a. Seed-bench: Benchmarking multimodal LLMs with generative comprehension. arXiv preprint arXiv:2307.16125.
|
| 334 |
+
Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. 2023b. Blip-2: Bootstrapping language-image pretraining with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597.
|
| 335 |
+
Yifan Li, Yifan Du, Kun Zhou, Jinping Wang, Xin Zhao, and Ji-Rong Wen. 2023c. Evaluating object hallucination in large vision-language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 292-305, Singapore. Association for Computational Linguistics.
|
| 336 |
+
Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81.
|
| 337 |
+
Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. 2023a. Improved baselines with visual instruction tuning. Preprint, arXiv:2310.03744.
|
| 338 |
+
Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. 2024. Llavanext: Improved reasoning,OCR, and world knowledge.
|
| 339 |
+
|
| 340 |
+
Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. 2023b. Visual instruction tuning. In NeurIPS.
|
| 341 |
+
Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. 2023c. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281.
|
| 342 |
+
Jordi A. Matias-Guiu, Paz Suárez-Coalla, Miguel Yus, Vanesa Pytel, Laura Hernández-Lorenzo, Cristina Delgado-Alonso, Alfonso Delgado-Álvarez, Natividad Gómez-Ruiz, Carmen Polidura, María Nieves Cabrera-Martín, Jorge Matías-Guiu, and Fernando Cuetos. 2022. Identification of the main components of spontaneous speech in primary progressive aphasia and their neural underpinnings using multimodal mri and fdg-pet imaging. Cortex, 146:141-160.
|
| 343 |
+
Kimberly D Mueller, Bruce Hermann, Jonilda Mecollari, and Lyn S Turkstra. 2018. Connected speech and language in mild cognitive impairment and alzheimer's disease: A review of picture description tasks. Journal of clinical and experimental neuropsychology, 40(9):917-939.
|
| 344 |
+
OpenAI. 2023. GPT-4 technical report. Preprint, arXiv:2303.08774.
|
| 345 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pages 311-318.
|
| 346 |
+
Jae Sung Park, Chandra Bhagavatula, Roozbeh Mottaghi, Ali Farhadi, and Yejin Choi. 2020. Visualcomet: Reasoning about the dynamic context of a still image. In In Proceedings of the European Conference on Computer Vision (ECCV).
|
| 347 |
+
Thibault Sellam, Dipanjan Das, and Ankur P Parikh. 2020. Bleurt: Learning robust metrics for text generation. In Proceedings of ACL.
|
| 348 |
+
Guangyao Shen, Xin Wang, Xuguang Duan, Hongzhi Li, and Wenwu Zhu. 2020. Memor: A dataset for multimodal emotion reasoning in videos. In Proceedings of the 28th ACM International Conference on Multimedia, MM '20, page 493-502, New York, NY, USA. Association for Computing Machinery.
|
| 349 |
+
Mashrura Tasnim, Malikeh Ehghaghi, Brian Diep, and Jekaterina Novikova. 2022. DEPAC: a corpus for depression and anxiety detection from speech. In Proceedings of the Eighth Workshop on Computational Linguistics and Clinical Psychology, pages 1-16, Seattle, USA. Association for Computational Linguistics.
|
| 350 |
+
Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.
|
| 351 |
+
|
| 352 |
+
Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. 2015. Cider: Consensus-based image description evaluation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4566-4575.
|
| 353 |
+
Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. 2024. Qwen2vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191.
|
| 354 |
+
Weihan Wang, Qingsong Lv, Wenmeng Yu, Wenyi Hong, Ji Qi, Yan Wang, Junhui Ji, Zhuoyi Yang, Lei Zhao, Xixuan Song, Jiazheng Xu, Bin Xu, Juanzi Li, Yuxiao Dong, Ming Ding, and Jie Tang. 2023. Cogvlm: Visual expert for pretrained language models. Preprint, arXiv:2311.03079.
|
| 355 |
+
Yujia Xie, Luowei Zhou, Xiyang Dai, Lu Yuan, Nguyen Bach, Ce Liu, and Michael Zeng. 2022. Visual clues: Bridging vision and language foundations for image paragraph captioning. Advances in Neural Information Processing Systems, 35:17287-17300.
|
| 356 |
+
Peng Xu, Wenqi Shao, Kaipeng Zhang, Peng Gao, Shuo Liu, Meng Lei, Fanqing Meng, Siyuan Huang, Yu Qiao, and Ping Luo. 2023. Lvlm-ehub: A comprehensive evaluation benchmark for large vision-language models. arXiv preprint arXiv:2306.09265.
|
| 357 |
+
Zhengyuan Yang, Linjie Li, Kevin Lin, Jianfeng Wang, Chung-Ching Lin, Zicheng Liu, and Lijuan Wang. 2023. The dawn of Imms: Preliminary explorations with GPT-4V (ision). arXiv preprint arXiv:2309.17421, 9(1):1.
|
| 358 |
+
Qinghao Ye, Haiyang Xu, Jiabo Ye, Ming Yan, Haowei Liu, Qi Qian, Ji Zhang, Fei Huang, and Jingren Zhou. 2023. MPLug-owl2: Revolutionizing multi-modal large language model with modality collaboration. arXiv preprint arXiv:2311.04257.
|
| 359 |
+
Wenpeng Yin, Dragomir Radev, and Caiming Xiong. 2021. DocNLI: A large-scale dataset for document-level natural language inference. In *Findings of the Association for Computational Linguistics: ACLIJCNLP* 2021, pages 4913-4922, Online. Association for Computational Linguistics.
|
| 360 |
+
Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. 2024. Mm-vet: Evaluating large multimodal models for integrated capabilities. In International conference on machine learning. PMLR.
|
| 361 |
+
Rowan Zellers, Yonatan Bisk, Ali Farhadi, and Yejin Choi. 2019. From recognition to cognition: Visual commonsense reasoning. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR).
|
| 362 |
+
|
| 363 |
+
Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. Bertscore: Evaluating text generation with BERT. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.
|
| 364 |
+
Zelin Zhou, Zhiling Zhang, Xuanan Xu, Zeyu Xie, Mengyue Wu, and Kenny Q Zhu. 2022. Can audio captions be evaluated with image caption metrics? In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 981-985. IEEE.
|
| 365 |
+
Deyao Zhu, Jun Chen, Kilichbek Haydarov, Xiaogian Shen, Wenxuan Zhang, and Mohamed Elhoseiny. 2023a. ChatGPT asks, blip-2 answers: Automatic questioning towards enriched visual descriptions. arXiv preprint arXiv:2303.06594.
|
| 366 |
+
Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. 2023b. MiniGPT-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592.
|
| 367 |
+
Yan Zhuang, Qi Liu, Yuting Ning, Weizhe Huang, Rui Lv, Zhenya Huang, Guanhao Zhao, Zheng Zhang, Qingyang Mao, Shijin Wang, et al. 2023. Efficiently measuring the cognitive ability of LLMs: An adaptive testing perspective. arXiv preprint arXiv:2306.10512.
|
| 368 |
+
Mingchen Zhuge, Haozhe Liu, Francesco Faccio, Dylan R Ashley, Robert Csordas, Anand Gopalakrishnan, Abdullah Hamdi, Hasan Hammoud, Vincent Herrmann, Kazuki Irie, Louis Kirsch, Bing Li, Guohao Li, Shuming Liu, Jinjie Mai, Piotr Piekos, Aditya Ramesh, Imanol Schlag, Weimin Shi, Aleksandar Stanić, Wenyi Wang, Yuhui Wang, Mengmeng Xu, Deng-Ping Fan, Bernard Ghanem, and Jürgen Schmidhuber. 2023. Mindstorms in natural language-based societies of mind. arXiv preprint arXiv:2305.17066.
|
| 369 |
+
|
| 370 |
+
# A Image Annotation Instruction
|
| 371 |
+
|
| 372 |
+
Figure 6 shows the image annotation instruction for annotators of CogBench.
|
| 373 |
+
|
| 374 |
+
# B Prompt of CoR-based GPT-assisted Question Generation
|
| 375 |
+
|
| 376 |
+
Figure 7 shows an example prompt of the CoR-based GPT-assisted question generation method for GPT-4. This prompt is used to generate questions based on [Event Reasoning] CoRs. Prompts for other reasoning types are similar to this one.
|
| 377 |
+
|
| 378 |
+
# C Introduction to Selected LVLMs
|
| 379 |
+
|
| 380 |
+
<table><tr><td>Model</td><td>Visual Encoder</td><td>Language Model</td></tr><tr><td>InstructBLIP-7B</td><td>EVA-G</td><td>Vicuna-7B</td></tr><tr><td>Qwen-VL-Chat</td><td>ViT-G/16</td><td>Qwen-7B</td></tr><tr><td>LLaVA-v1.5-7B</td><td>CLIP ViT-L/14</td><td>Vicuna-v1.5-7B</td></tr><tr><td>LLaVA-v1.5-13B</td><td>CLIP ViT-L/14</td><td>Vicuna-v1.5-13B</td></tr><tr><td>mPLUG-Owl-2</td><td>CLIP ViT-L/14</td><td>LLaMA2-7B</td></tr><tr><td>ShareGPT4V-7B</td><td>CLIP ViT-L/14</td><td>Vicuna-v1.5-7B</td></tr><tr><td>ShareGPT4V-13B</td><td>CLIP ViT-L/14</td><td>Vicuna-v1.5-13B</td></tr><tr><td>LLaVA-v1.6-vicuna-7B</td><td>CLIP ViT-L/14</td><td>Vicuna-v1.5-7B</td></tr><tr><td>LLaVA-v1.6-vicuna-13B</td><td>CLIP ViT-L/14</td><td>Vicuna-v1.5-13B</td></tr><tr><td>LLaVA-v1.6-34B</td><td>CLIP ViT-L/14</td><td>Nous-Hermes-2-Yi-34B</td></tr><tr><td>CogVLM-Chat</td><td>EVA2-CLIP-E</td><td>Vicuna-v1.5-7B</td></tr><tr><td>CogVLM2-Llama3-Chat</td><td>EVA2-CLIP-E</td><td>LLaMA3-8B</td></tr><tr><td>InternVL2-26B</td><td>InternViT-6B</td><td>InternLM2-Chat-20B</td></tr><tr><td>Qwen2-VL-7B</td><td>QwenViT</td><td>Qwen2-7B</td></tr><tr><td>LLaVA-OV-7B-Chat</td><td>SigLIP-400M</td><td>Qwen2-7B</td></tr><tr><td>GPT-4V</td><td>-</td><td>-</td></tr><tr><td>GPT-4o</td><td>-</td><td>-</td></tr></table>
|
| 381 |
+
|
| 382 |
+
Table 6: LVLMs evaluated in this paper.
|
| 383 |
+
|
| 384 |
+
- InstructBLIP (Dai et al., 2023) is an extension of BLIP-2 (Li et al., 2023b), designed to tackle the challenges of vision-language instruction tuning. It consists of an image encoder, an LLM, and a Q-Former. We use "clip2_vicuna_instruct" + "vicuna7b" for testing.
|
| 385 |
+
- Qwen-VL-Chat (Bai et al., 2023) is the instruction-tuned VL chatbot based on Qwen-VL, which consists of a visual encoder, an LLM, and a position-aware vision-language adapter. Its training process consists of two pre-training stages followed by a final instruction fine-tuning stage. We test "Qwen-VL-Chat" in the paper.
|
| 386 |
+
- LLaVA v1.5 (Liu et al., 2023a) is an upgraded version of LLaVA (Liu et al., 2023b), who connects a vision encoder and LLM for visual and language understanding. LLaVA is instruction-tuned on the language-image instruction-following data generated by
|
| 387 |
+
|
| 388 |
+
language-only GPT-4. By using CLIP-ViT-L-336px with an MLP projection and adding academic-task-oriented VQA data with simple response formatting prompts, LLaVA v1.5 achieves better performance. "llava-v1.5-7b" and "llava-v1.5-13b" are tested.
|
| 389 |
+
|
| 390 |
+
- mPLUG-Owl-2 (Ye et al., 2023) leverages modality collaboration to enhance performance across both text and multi-modal tasks. It adopts a modularized network design, with the language decoder serving as a universal interface to manage different modalities. We test "mplug-owl2-llama2-7b" in the paper.
|
| 391 |
+
- ShareGPT4V (Chen et al., 2023a) follows the design of LLaVA v1.5. It incorporates a large-scale resource featuring highly descriptive captions into both the pre-training and supervised fine-tuning phases. We test "ShareGPT4V-7B" and "ShareGPT4V-13B".
|
| 392 |
+
- LLaVA v1.6 (Liu et al., 2024) maintains the minimalist design and data efficiency of LLaVA-v1.5. It enhances LLaVA-v1.5 with dynamic high resolution, data mixture, and scaling of the LLM backbone. We test "llava-v1.6-vicuna-7b-hf", "llava-v1.6-vicuna-13b-hf" and "llava-v1.6-34b-hf" in the paper.
|
| 393 |
+
- CogVLM (Wang et al., 2023) comprises a ViT encoder, an MLP adapter, a pretrained LLM, and a visual expert module. Unlike the common shallow alignment method that maps image features to the input space of language model, CogVLM uses a trainable visual expert module in the attention and FFN layers to bridge the gap between the frozen pretrained language model and image encoder. "cogglm-chat-hf" is tested in the paper.
|
| 394 |
+
- CogVLM2 (Hong et al., 2024) inherits the visual expert architecture from CogVLM. Differently, CogVLM2 uses a $2 \times 2$ downsampling module to increase input resolution while maintaining efficiency, with LLaMA3-8B as its backbone. The pre-training and post-training data are also improved in terms of diversity and quality. "cogglm2-llama3-chat19B" is tested.
|
| 395 |
+
- InternVL2 (Chen et al., 2023c) family adopts a progressive alignment training strategy, resulting in the first vision foundation model
|
| 396 |
+
|
| 397 |
+
You are going to see some pictures. Each picture tells a story and requires different kinds of reasoning to fully understand it. You will be first asked to identify the entities and reasoning processes in the picture. Then, you will need to describe the story of the picture based on your identified entities and reasoning processes.
|
| 398 |
+
|
| 399 |
+
First, you will be asked to identify the entities in the picture. The annotation format is [A, B, C], where A, B, C are entities.
|
| 400 |
+
|
| 401 |
+
[Entities]: Please list the entities appearing in the picture, including people, animals, objects etc. You are encouraged to list as many entities as possible. Note that these entities need to be in your picture description afterwards. For entities that are difficult to recognize, please do not list them here or describe them.
|
| 402 |
+
|
| 403 |
+
Then, you will be asked to identify different reasoning processes in the picture. The annotation format should follow the structure $\mathrm{[A1 + A2\rightarrow B]}$ , where A1 and A2 are premises and B is the conclusion. Note that if you write a conclusion, there must be at least one premise. Do not write a conclusion only, like [B]. Please write one conclusion at a time, and do not write a reasoning process like $\mathrm{[A1\rightarrow B\rightarrow C]}$ , which should be split into two. Each picture does not necessarily require all kinds of reasoning. Please write None, if a picture does not involve a specific kind of reasoning or it is not important in the picture.
|
| 404 |
+
|
| 405 |
+
[Special Time Reasoning] Please write your reasoning processes about the special time of the story in the picture, e.g. festivals, seasons etc. The special time is usually relevant to the story of the picture. For instance, if it is daytime in a picture, it is easily recognized, requires no reasoning and there is nothing special, you can write None. However, if there is a lamp on or a clock indicating a specific time, you can write down your reasoning about the time.
|
| 406 |
+
|
| 407 |
+
[Location Reasoning] Please write your reasoning processes about the location of the story in the picture, e.g. near the school.
|
| 408 |
+
|
| 409 |
+
[Character Reasoning]: Please write your reasoning processes about the characters of the subjects in the picture, e.g. a teacher, a doctor etc.
|
| 410 |
+
|
| 411 |
+
[Character Relationship Reasoning]: Please write your reasoning processes about the relationships between the characters in the picture, e.g. mother-son relationship.
|
| 412 |
+
|
| 413 |
+
[Event Reasoning]: Please write your reasoning processes about the events in the current and previous moments in the picture based on the clues provided. Note that you only need to annotate those high-level events and can ignore the low-level ones. For instance, "the woman is looking at the man" is a low-level event and you can ignore its reasoning process. Differently, the reasoning process [A mother is busy cooking. + A boy is fetching cookies behind the mom. + A girl is shushing the boy. -> The boy is stealing cookies.] is about a high-level event "stealing" and you should write it down.
|
| 414 |
+
|
| 415 |
+
[Event Relationship Reasoning]: Please write your reasoning processes about the relationships between different events in the picture. These events are usually linked through causal and temporal relations. Note that events in this part do not necessarily appear in the [Event Reasoning] part as some events here are low-level events.
|
| 416 |
+
|
| 417 |
+
[Next Moment Event Reasoning]: Please write your reasoning processes about the events that will happen in the next moment. Note that you only need to write down events that have a very high probability of happening, instead of guessing what might happen next.
|
| 418 |
+
|
| 419 |
+
[mental state reasoning]: Please write your reasoning processes about the mental states of the subjects in the picture, e.g. daydreaming, happy, etc. You need to reason as best you can about the mental states of all the subjects in the picture, unless they are not showing obvious emotions.
|
| 420 |
+
|
| 421 |
+
Finally, you will be asked to describe the picture in as much detail as you can.
|
| 422 |
+
|
| 423 |
+
[Description]: Please describe all you see in the picture in a paragraph based on the entities and reasoning processes you identified above, ensuring that all of them are included in your description. Each picture has a story behind it and you need to tell that story through your description.
|
| 424 |
+
|
| 425 |
+
Figure 6: Image annotation instruction for annotators.
|
| 426 |
+
|
| 427 |
+
natively aligned with LLMs, and enabling efficient training of large models with limited resources. It supports multimodal input using a single parameter set and provides multitask output, including images, bounding boxes, and masks. By connecting the LVLM with multiple downstream task decoders, it can be generalized to many VL tasks. We test "InternVL2-26B" in this paper.
|
| 428 |
+
|
| 429 |
+
- Qwen2-VL (Wang et al., 2024) is built upon the Qwen-VL architecture. It introduces the Naive Dynamic Resolution mechanism, which allows the model to handle arbitrary image resolutions, dynamically adjusting the number of visual tokens. It also incorporates Multimodal Rotary Position Embedding (M-RoPE), enabling the effective fusion of positional information across text, images, and videos. "Qwen2-VL-7B-Instruct" is tested.
|
| 430 |
+
- LLaVA-OneVision-Chat (Li et al., 2024) is an enhanced version of LLaVA-OneVision, with improvements achieved through preference alignment for better visual-chat capabilities. LLaVA-OneVision is developed by integrating insights into data, models, and visual representations in the LLaVA v1.6 series. "llava-onevision-qwen2-7b-ov-chat" is tested and we use LLaVA-OV-Chat to refer to LLaVA-OneVision-Chat in this paper.
|
| 431 |
+
- GPT-4V (OpenAI, 2023) is a powerful LVLM developed by OpenAI. The version of "gpt-4-turbo" is tested.
|
| 432 |
+
- GPT-4o (OpenAI, 2023) is currently one of the most powerful multimodal models. It is a single model trained end-to-end across text, vision, and audio. The version of "gpt-4o" is tested.
|
| 433 |
+
|
| 434 |
+
Table 6 shows an overview of the designs of different LVLMs.
|
| 435 |
+
|
| 436 |
+
# D Prompts for GPT-based Cognition Evaluation of the Description Task
|
| 437 |
+
|
| 438 |
+
Figure 8 and Figure 9 show the prompts used for cognition evaluation of the Description task for GPT-4 (or ChatGPT).
|
| 439 |
+
|
| 440 |
+
# E Evaluation of LVLMs on the Description Task Using Traditional Image Captioning Metrics
|
| 441 |
+
|
| 442 |
+
Table 7 shows the model performance on traditional image captioning evaluation metrics. Following Krause et al. (2017), we use METEOR (Banerjee and Lavie, 2005), CIDEr (Vedantam et al., 2015), BLEU-1, BLEU-2, BLEU-3, and BLEU-4 (Papineni et al., 2002) to evaluate the model performance on the Description task. Similar to the findings of Zhu et al. (2023a), it can be observed that traditional image captioning evaluation metrics are not quite suitable for evaluating the Description task. There are two possible reasons. The first possible reason is that image descriptions are longer and more flexible than traditional image captions. The second possible reason is that our Description task requires evaluation metrics to place more emphasis on high-level semantics in the description.
|
| 443 |
+
|
| 444 |
+
# F Implementation of Non-GPT-Based Cognition Evaluation Methods for the Description Task
|
| 445 |
+
|
| 446 |
+
The cognition evaluation of the Description task is to determine whether each CoR is mentioned in the description. Apart from ChatGPT or GPT-4, some other evaluation methods are also implemented to perform this classification task, as shown in Table 4.
|
| 447 |
+
|
| 448 |
+
For methods based on ROUGE (Lin, 2004), BERTScore (Zhang et al., 2020), and BLEURT (Sellam et al., 2020), we first split the description into sentences and then use each CoR as the reference to calculate the (recall) score for each sentence compared to the CoR. Then, the highest score among all calculated scores is taken as the score of the CoR corresponding to the description. Finally, the score is converted into 0 or 1 using a threshold.
|
| 449 |
+
|
| 450 |
+
We also tried Natural Language Inference (NLI) models to perform the task. First, we use DeBERTa (He et al., 2021, 2023) to perform a sentence-level NLI task similar to the method mentioned above. If there is at least one "Entailment" for all the sentences, the score of the CoR will be 1. The model we adopt is mDeBERTa-v3-base-xnli-multilingual-nli-2mil7. The second NLI model we tried is Doc-NLI (Yin et al., 2021), which can directly take the description and CoR as input and perform the classification task.
|
| 451 |
+
|
| 452 |
+
<table><tr><td>Model</td><td>METEOR</td><td>CIDEr</td><td>BLEU-1</td><td>BLEU-2</td><td>BLEU-3</td><td>BLEU-4</td></tr><tr><td>InstructBLIP-7B</td><td>0.130 / 0.183</td><td>0.043 / 0.003</td><td>0.255 / 0.218</td><td>0.127 / 0.104</td><td>0.063 / 0.049</td><td>0.033 / 0.024</td></tr><tr><td>Qwen-VL-Chat</td><td>0.130 / 0.196</td><td>0.037 / 0.016</td><td>0.242 / 0.262</td><td>0.124 / 0.138</td><td>0.059 / 0.069</td><td>0.030 / 0.036</td></tr><tr><td>LLaVA-V1.5-7B</td><td>0.146 / 0.182</td><td>0.054 / 0.020</td><td>0.309 / 0.275</td><td>0.158 / 0.138</td><td>0.076 / 0.065</td><td>0.037 / 0.032</td></tr><tr><td>LLaVA-V1.5-13B</td><td>0.146 / 0.176</td><td>0.051 / 0.018</td><td>0.312 / 0.274</td><td>0.160 / 0.137</td><td>0.076 / 0.066</td><td>0.037 / 0.034</td></tr><tr><td>mPLUG-Owl-2</td><td>0.132 / 0.184</td><td>0.035 / 0.012</td><td>0.260 / 0.235</td><td>0.126 / 0.116</td><td>0.057 / 0.053</td><td>0.027 / 0.025</td></tr><tr><td>ShareGPT4V-7B</td><td>0.162 / 0.191</td><td>0.017 / 0.014</td><td>0.259 / 0.222</td><td>0.120 / 0.113</td><td>0.050 / 0.053</td><td>0.024 / 0.027</td></tr><tr><td>ShareGPT4V-13B</td><td>0.165 / 0.184</td><td>0.024 / 0.015</td><td>0.278 / 0.254</td><td>0.129 / 0.126</td><td>0.055 / 0.059</td><td>0.026 / 0.030</td></tr><tr><td>LLaVA-v1.6-vicuna-7B</td><td>0.169 / 0.190</td><td>0.026 / 0.000</td><td>0.278 / 0.159</td><td>0.134 / 0.085</td><td>0.060 / 0.041</td><td>0.029 / 0.021</td></tr><tr><td>LLaVA-v1.6-vicuna-13B</td><td>0.172 / 0.195</td><td>0.023 / 0.000</td><td>0.283 / 0.166</td><td>0.137 / 0.090</td><td>0.061 / 0.043</td><td>0.030 / 0.021</td></tr><tr><td>LLaVA-v1.6-34B</td><td>0.172 / 0.199</td><td>0.026 / 0.000</td><td>0.279 / 0.173</td><td>0.134 / 0.092</td><td>0.060 / 0.044</td><td>0.030 / 0.021</td></tr><tr><td>CogVLM-Chat</td><td>0.157 / 0.174</td><td>0.069 / 0.056</td><td>0.297 / 0.326</td><td>0.151 / 0.154</td><td>0.075 / 0.069</td><td>0.039 / 0.034</td></tr><tr><td>CogVLM2-Llama3-Chat</td><td>0.176 / 0.191</td><td>0.059 / 0.032</td><td>0.356 / 0.257</td><td>0.183 / 0.127</td><td>0.091 / 0.059</td><td>0.049 / 0.029</td></tr><tr><td>InternVL2-26B</td><td>0.210 / 0.202</td><td>0.008 / 0.000</td><td>0.250 / 0.166</td><td>0.136 / 0.091</td><td>0.070 / 0.044</td><td>0.037 / 0.023</td></tr><tr><td>Qwen2-VL-7B</td><td>0.205 / 0.183</td><td>0.007 / 0.000</td><td>0.222 / 0.154</td><td>0.121 / 0.084</td><td>0.061 / 0.041</td><td>0.032 / 0.021</td></tr><tr><td>LLaVA-OV-7B-Chat</td><td>0.198 / 0.192</td><td>0.009 / 0.000</td><td>0.245 / 0.138</td><td>0.118 / 0.072</td><td>0.051 / 0.033</td><td>0.024 / 0.016</td></tr><tr><td>GPT-4V</td><td>0.189 / 0.191</td><td>0.013 / 0.000</td><td>0.250 / 0.153</td><td>0.113 / 0.071</td><td>0.047 / 0.028</td><td>0.022 / 0.011</td></tr><tr><td>GPT-4o</td><td>0.182 / 0.189</td><td>0.070 / 0.000</td><td>0.332 / 0.145</td><td>0.161 / 0.074</td><td>0.077 / 0.033</td><td>0.039 / 0.016</td></tr></table>
|
| 453 |
+
|
| 454 |
+
Table 7: Model performance on the Description task evaluated using traditional image captioning metrics. The results of the evaluation under the Spontaneous Description mode and Directed Reasoning mode are presented before and after the “/” in each table cell.
|
| 455 |
+
|
| 456 |
+
We have a description of an image and the description tells a detailed story unfolding in the image. In the process of describing an image, it is often necessary to engage in reasoning about events based on the clues within the image, leading to certain conclusions. For example, when we see the wind is blowing outside, and a man is reading a newspaper in the telephone booth, we can infer that he is actually hiding from the wind in the telephone booth. Therefore, in this task, in addition to the image description, the reasoning processes about event within the image description have also been extracted. For each reasoning process, we use $\mathrm{A1 + A2 + \ldots - > B}$ to represent it, where A1, A2, ... are clues we observed in the picture and B represents the conclusion about event we inferred.
|
| 457 |
+
|
| 458 |
+
Thus, given an image description and the reasoning processes about event, our task is:
|
| 459 |
+
|
| 460 |
+
1) Generate a question based on reasoning processes about event.
|
| 461 |
+
2) Generate four options: A, B, C, and D. There is only one correct answer among the four options, which is consistent with the description and reasoning processes provided. The correct answer option should be randomly chosen from A, B, C, and D. For those incorrect options (distractors), you are encouraged to hallucinate some clues that are highly relevant to the question and the description but do not actually consistent with the description. That is, you can distort the facts in the description and reasoning processes using elements related to the question to generate some easily selectable distractors. It would be better if you can generate some distractors that are similar to but different from the correct option. Please avoid situations where the correct option is significantly longer or shorter than the distractors.
|
| 462 |
+
|
| 463 |
+
For example, if the description is "There are some snow on the ground and it is windy, ... We can see it is cold. Inside a phone booth, a man is smiling while looking at a newspaper. He is sheltering from the cold wind in the phone booth..." and the question is "Why can we tell that the man is seeking shelter for warmth?", you can use "newsstand", which is related to "seeking shelter for warmth" in the question, to distort the fact in description "in a phone booth." Then you can get "the man is in the newsstand." Similarly, you can hallucinate a question related distractor "it is raining and a man is smiling and reading a newspaper in a phone booth," which is similar to the correct option "it is windy and a man is smiling and reading a newspaper in a phone booth," but different from it and inconsistent with the description.
|
| 464 |
+
|
| 465 |
+
3) Generate the letter corresponding to the correct answer, that is A, B, C, or D.
|
| 466 |
+
|
| 467 |
+
Here are some examples:
|
| 468 |
+
|
| 469 |
+
[Description]:
|
| 470 |
+
|
| 471 |
+
There are some snow on the ground and it is windy, indicating it is winter. There are two men and two women standing on the roadside. There is a sign that says "NO STANDING BUS STOP", indicating it is near a bus stop. A man is standing on the road side, wrapping his coat tightly around himself, and peering out onto the road. They are probably waiting for a bus here. We can see it is cold. Inside a phone booth, a man is smiling while looking at a newspaper. He is sheltering from the cold wind in the phone booth. He looks happy, because it is warm there. Two women are also wrapping their coats tightly and looking at the man in the phone booth. They are probably friends and standing together. They are unhappy with the man. There are some buildings by the road.
|
| 472 |
+
|
| 473 |
+
[Event Reasoning]:
|
| 474 |
+
|
| 475 |
+
It is windy and cold. + A man is standing in a phone booth reading newspaper. -> The man is sheltering from the cold wind in the phone booth.
|
| 476 |
+
|
| 477 |
+
[Generated Multiple-Choice Questions]:
|
| 478 |
+
|
| 479 |
+
What is the man doing in the phone booth?
|
| 480 |
+
|
| 481 |
+
A. Making a phone call.
|
| 482 |
+
B. Reading a book.
|
| 483 |
+
C. Avoiding someone he doesn't want to see.
|
| 484 |
+
D. Sheltering from the cold wind.
|
| 485 |
+
Correct Answer: [D]
|
| 486 |
+
|
| 487 |
+
Why can we tell that the man is seeking shelter for warmth?
|
| 488 |
+
|
| 489 |
+
A. It is windy and a man is smiling and reading a newspaper in a newsstand.
|
| 490 |
+
B. It is raining and a man is smiling and reading a newspaper in a newsstand.
|
| 491 |
+
C. It is windy and a man is smiling and reading a newspaper in a phone booth.
|
| 492 |
+
D. It is raining and a man is smiling and reading a newspaper in a phone booth.
|
| 493 |
+
|
| 494 |
+
Correct Answer: [C]
|
| 495 |
+
|
| 496 |
+
Please:
|
| 497 |
+
|
| 498 |
+
1). Generate at least one question for each reasoning process.
|
| 499 |
+
2). Generate more diverse questions, try to generate questions from different perspectives or angles and don't limit yourself to the question templates provided in the examples.
|
| 500 |
+
3). Avoid generating repetitive questions with similar meanings.
|
| 501 |
+
|
| 502 |
+
Figure 7: An example prompt of CoR-based GPT-assisted question generation for GPT-4 to generate questions based on [Event Reasoning] CoRs.
|
| 503 |
+
|
| 504 |
+
Given a <DESCRIPTION> and some <KEY POINT>s, please tell me if the <DESCRIPTION> explicitly presents the exact or similar semantics of each <KEY POINT>. The following points are required:
|
| 505 |
+
|
| 506 |
+
1) Instead of reasoning about whether <KEY POINT> is possibly correct based on the <DESCRIP- TION>, you only need to determine whether the <DESCRIPTION> mentions the semantics in the <KEY POINT>.
|
| 507 |
+
2) Do not overlook the semantics in the <DESCRIP- TION> that are semantically equivalent to the <KEY POINT> but expressed in different ways. For instance, if the <DESCRIPTION> mentions "The woman is playing with her son...," we can tell it successfully includes semantics in the <KEY POINT> "The woman is the mother of the boy."
|
| 508 |
+
3) If several possible scenarios are listed using 'or' at a <KEY POINT>, you only need to determine whether one of these scenarios is mentioned in the <DESCRIPTION>.
|
| 509 |
+
|
| 510 |
+
Assign a score of 0 or 1 to each $<\mathrm{KEY~POINT}>.$ where 0 represents NO and 1 represents YES.
|
| 511 |
+
|
| 512 |
+
<DESCRIPTION>:
|
| 513 |
+
|
| 514 |
+
{Description generated by a model.}
|
| 515 |
+
|
| 516 |
+
<KEY POINT>:
|
| 517 |
+
|
| 518 |
+
1. {Annotated key point 1.}
|
| 519 |
+
2. {Annotated key point 2.}
|
| 520 |
+
···
|
| 521 |
+
N. {Annotated key point N.}
|
| 522 |
+
|
| 523 |
+
Please write your answers in “[ ]” with 0 or 1 in the following format (number + square brackets):
|
| 524 |
+
|
| 525 |
+
1. [1] 2. [0]
|
| 526 |
+
|
| 527 |
+
Your answers to the $\{\mathbf{N}\} < \mathrm{KEY}$ POINT $\rightharpoondown$ s) above:
|
| 528 |
+
|
| 529 |
+
1. [] 2. [] ... N. []
|
| 530 |
+
|
| 531 |
+
Given a <DESCRIPTION> and some <EVENT RELATIONSHIP>s, please tell me whether this <DESCRIPTION> clearly depicts the cause-and-effect relationships between events.
|
| 532 |
+
|
| 533 |
+
The format of a <EVENT RELATIONSHIP> follows the structure "A1 + A2 + ... + An -> B", where A1, A2, ..., An and B are events. Events A1, A2, ..., An are the causes of event B, and event B is the result caused by events A1, A2, ..., An. The criteria for judgment lie in whether the <DESCRIPTION> mentions these events and clearly depicts the causal relationships between them.
|
| 534 |
+
|
| 535 |
+
Assign a score of 0 or 1 to each <EVENT RELATIONSHIP>, where 0 represents NO and 1 represents YES.
|
| 536 |
+
|
| 537 |
+
<DESCRIPTION>:
|
| 538 |
+
|
| 539 |
+
{Description generated by a model.}
|
| 540 |
+
|
| 541 |
+
<EVENT RELATIONSHIP>:
|
| 542 |
+
|
| 543 |
+
1. {Annotated event relationship 1.}
|
| 544 |
+
2. {Annotated event relationship 2.}
|
| 545 |
+
···
|
| 546 |
+
N. {Annotated event relationship N.}
|
| 547 |
+
|
| 548 |
+
Please write your answers in “[ ]” with 0 or 1 in the following format (number + square brackets):
|
| 549 |
+
|
| 550 |
+
1. [1] 2. [0]
|
| 551 |
+
|
| 552 |
+
Your answers to the $\{\mathbf{N}\}$ <EVENT RELATIONSHIP>(s) above:
|
| 553 |
+
|
| 554 |
+
1. [] 2. [] ... N. []
|
| 555 |
+
|
| 556 |
+
Figure 9: Cognition evaluation prompt of [Event Relationship Reasoning].
|
| 557 |
+
|
| 558 |
+
Figure 8: Cognition evaluation prompt of reasoning types other than [Event Relationship Reasoning].
|
acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f708a911f7b652cf3037216b8972f589cd42c0d82686481caacfb0acfd40a2ec
|
| 3 |
+
size 777482
|
acognitiveevaluationbenchmarkofimagereasoninganddescriptionforlargevisionlanguagemodels/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:15b841a2cd90698550ab0fa46a925910007abe8623f6db37fef7cb517e3e5f5a
|
| 3 |
+
size 577494
|
activefewshotlearningfortextclassification/1f71a233-7606-4149-981f-d07f210615f6_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1687e6fa7ace1f0ce3d5eb9ec65156d16c92fc11daffd640225ec7b3a571f834
|
| 3 |
+
size 149274
|
activefewshotlearningfortextclassification/1f71a233-7606-4149-981f-d07f210615f6_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:820ddc1b5d4612296c0c04dc3b63f901ae54326c1af4ca9a63302b06e677b693
|
| 3 |
+
size 168310
|
activefewshotlearningfortextclassification/1f71a233-7606-4149-981f-d07f210615f6_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:00b4ae27fbe42d12c4074fd832a136b1f08c0cb372ce92e8a15cce43781bb3b3
|
| 3 |
+
size 1955478
|
activefewshotlearningfortextclassification/full.md
ADDED
|
@@ -0,0 +1,512 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Few-Shot Learning for Text Classification
|
| 2 |
+
|
| 3 |
+
Saeed Ahmadnia<sup>1</sup> Arash Yousefi Jordehi<sup>2</sup> Mahsa Hosseini Khasheh Heyran<sup>2</sup> Seyed Abolghasem Mirroshandel<sup>2</sup> Owen Rambow<sup>3</sup> Cornelia Caragea<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>University of Illinois Chicago <sup>2</sup>University of Guilan <sup>3</sup>Sony Brook University sahmad67@uic.edu arashy76@phd.guilan.ac.ir mahsahsii@gmail.com mirroshandel@guilan.ac.ir owen.rambow@stonybrook.edu cornelia@uic.edu
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
The rise of Large Language Models (LLMs) has boosted the use of Few-Shot Learning (FSL) methods in natural language processing, achieving acceptable performance even when working with limited training data. The goal of FSL is to effectively utilize a small number of annotated samples in the learning process. However, the performance of FSL suffers when unsuitable support samples are chosen. This problem arises due to the heavy reliance on a limited number of support samples, which hampers consistent performance improvement even when more support samples are added. To address this challenge, we propose an active learning-based instance selection mechanism that identifies effective support instances from the unlabeled pool and can work with different LLMs. Our experiments on five tasks show that our method frequently improves the performance of FSL. We make our implementation available at https://github.com/theSaeed/activefewshot-learning.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Deep learning systems have shown great performance when given enough labeled data, yet they struggle to learn from a small amount of labeled data (Sun et al., 2019). However, constructing a large corpus of annotated data is costly and time-consuming, which hinders the building of supervised classifiers for new domains (Zhu et al., 2009). Few-Shot Learning (FSL) addresses this by seeking to grasp new concepts from limited labeled examples for broader applications (Sun et al., 2019). With recent advances in Large Language Models (LLM), the capabilities of FSL can be utilized much better than before and these methods can reach acceptable performance using minimal training data (Gao et al., 2021; Chen et al., 2021; Karimi Mahabadi et al., 2022; Lin et al., 2022).
|
| 14 |
+
|
| 15 |
+
There are two widely adopted approaches to addressing the FSL problem: In-Context Learning
|
| 16 |
+
|
| 17 |
+
(ICL) and Fine-Tuning (FT). Recent autoregressive decoder-only LLMs have demonstrated strong performance in the realm of FSL (Brown et al., 2020), providing the opportunity to achieve satisfactory results with little to no labeled data without requiring any fine-tuning. On the other hand, FT is a more established strategy for text classification, which leverages a model fine-tuned on labeled support data for prediction, and can show strong results as opposed to ICL (Edwards and Camacho-Collados, 2024), but a large amount of training data is required for model fine-tuning.
|
| 18 |
+
|
| 19 |
+
Interestingly, most FSL methods typically select the samples randomly. However, the quality of the samples can have a significant impact on the model's performance. In some scenarios, adding un- or less-informative samples can even decrease the accuracy or may cause a high variance in the model's performance (Zhang et al., 2020; Schick and Schütze, 2021b). Few studies in the field of Natural Language Processing (NLP) have explored sample selection strategies. Nevertheless, prior works are limited by their lack of advanced techniques, such as active learning (Chang et al., 2021), and by compromising performance to achieve efficiency (Müller et al., 2022).
|
| 20 |
+
|
| 21 |
+
Given these challenges, and following the revealed strong performance of FT approaches against ICL (Edwards and Camacho-Collados, 2024), we propose a new FT- and Active Learning (AL)-based Few-Shot (FS) sample selection method to enhance classification performance by choosing the most informative unlabeled samples, thus, minimizing annotation costs. Inspired by successful AL algorithms (Settles, 2009), our algorithm selects instances using entropy and clustering methods to ensure uncertainty, diversity, and representativeness in sample selection.
|
| 22 |
+
|
| 23 |
+
More specifically, we propose an iterative approach to progressively choose samples for human annotation and label them to be added to the sup
|
| 24 |
+
|
| 25 |
+
port set. This process begins with generating an organized depiction of the unlabeled data based on a certain embedding method to capture the requested key features of the data. Following this, a sampling method is applied to strategically select new data points based on the generated embeddings. These data points are then selected and given to a human annotator to be labeled and added to the support set. Afterward, we use this augmented support set for fine-tuning LLMs.
|
| 26 |
+
|
| 27 |
+
To evaluate our method, we conduct experiments on five classification tasks: the news topic, 5-star rating, and private states' type, polarity, and intensity classification. We use two pre-trained language models, BART and FLAN-T5 (Lewis et al., 2019; Chung et al., 2022), as the backbone and fine-tuning models in our approach. The proposed method is compared against weak and strong baselines to support its effectiveness and efficiency. These baselines include random sampling, in-context learning with Gemma 2 (Team et al., 2024), Llama 3.1 (Dubey et al., 2024), and Mistral v0.3 (Jiang et al., 2023), as well as other related work utilizing more advanced sample selection strategies in FSL. Our approach, especially when combining representative and uncertainty-based sampling techniques, exceeds the baselines considerably on average.
|
| 28 |
+
|
| 29 |
+
Our contributions can be summarized as follows: 1) We introduce an Active Learning-based sample selection scenario by combining uncertainty and representativeness measures for FS classification problems achieving state-of-the-art performance when paired with recent FSL algorithms. 2) We executed comprehensive experiments on a wide variety of tasks using various LLMs. 3) We present a thorough analysis to assess the performance of the models at different iterations, comparing them to the baselines presented in previous work. 4) We make our implementation publicly available.
|
| 30 |
+
|
| 31 |
+
# 2 Related Work
|
| 32 |
+
|
| 33 |
+
In previous studies, the few-shot scenario has been simulated by randomly sampling a subset from the complete training data (Chen et al., 2020; Schick and Schütze, 2021a; Gao et al., 2021; Chen et al., 2021; Lin et al., 2022; Edwards and Camacho-Collados, 2024). Among different FSL methods in NLP, there are few methods that have paid attention to the sample selection strategies. However, some recent studies in the field of image processing have
|
| 34 |
+
|
| 35 |
+
demonstrated the effectiveness of incorporating AL strategies in the context of FSL (Boney et al., 2019; Pezeshkpour et al., 2020; Li et al., 2021; Shin et al., 2022).
|
| 36 |
+
|
| 37 |
+
The study conducted by Chang et al. (2021) is one of the few works that specifically addresses sample selection in NLP. Their research focuses on FS training instance selection and using it in three text generation tasks with BART. Their approach is motivated by the idea that few-shot training instances should exhibit diversity and representativeness. To achieve this, the authors utilized K-Means clustering for choosing data points closer to the center of clusters as important (i.e., informative) samples. Their results demonstrate the success of this method even with this simple non-iterative clustering-based approach. In contrast, our research specifically targets classification tasks. Furthermore, our active learning approach incorporates a wider range of tasks and selection strategies (i.e., uncertainty, diversity, and representativeness) compared to their study. We then extend the usage of this idea to iteratively expand the support set.
|
| 38 |
+
|
| 39 |
+
The only recent study that incorporates FS and AL approaches in NLP is conducted by Müller et al. (2022), using a zero-shot approach for XLM-RoBERTa-based Siamese networks. They use label tuning to fine-tune the label embeddings for faster training. On the contrary, we use multiple language models (i.e., BART and FLAN-T5) and fine-tune the entire model on the support set in every iteration as we prioritize the FS instance selection quality and model performance, which is shown to produce significantly better performance. Also, the related work does not specify how they select samples in the first iteration using an uncertainty approach while the model has never seen any related data before. In contrast, we propose using representative sampling in the first step to boost the initial performance of the model, even in uncertainty sampling methods. Moreover, we introduce four new sampling methods compared to the mentioned work. Importantly, our methods and implementation are open source and publicly available to be freely used by fellow researchers. To the best of our knowledge, the previous work's methods are not freely available to the public.
|
| 40 |
+
|
| 41 |
+
# 3 Active Few-Shot Learning
|
| 42 |
+
|
| 43 |
+
In our definition of the problem, we have a large set of unlabeled training samples to start with. Our
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
Figure 1: Pipeline of the $i^{th}$ iteration in our approach. Yellow boxes represent different phases of the method. Blue boxes are constant inputs during all iterations. Red boxes are carried over and modified during all consecutive iterations. Green boxes are products of the current iteration that will not be used later in the approach.
|
| 47 |
+
|
| 48 |
+
goal is to select a small number of samples from this unlabeled pool to be labeled and used as a support set in a way that maximizes a model's performance on the test set. In different FS instance selection methods, we may have one or more iterations of sampling and model fine-tuning. We categorize experiments with a single round of this process as 'non-iterative', and those with multiple rounds as 'iterative'.
|
| 49 |
+
|
| 50 |
+
Figure 1 illustrates the full pipeline of a single iteration, which can be the one and only iteration in the non-iterative approaches. At each iteration, we first examine the data to determine which additional samples to include in the support set. This process begins by extracting embeddings from a certain source, specified by an embedding method, to have a structured representation of the unlabeled data. This source is derived from running inference on the fine-tuned model of the last iteration by feeding it the unlabeled training pool. Subsequently, a sampling method is applied to select new samples based on the obtained embeddings, guiding the model's learning toward optimal performance. The embedding and sampling methods are explained in the following subsections in more detail.
|
| 51 |
+
|
| 52 |
+
The selected samples are then removed from the unlabeled training pool and given to an oracle for labeling. Following that, the newly labeled samples are added to the support set. This augmented support set is next used to fine-tune a new instance of the pre-trained model on the validation set. Finally, we evaluate the latest fine-tuned model on the test set to analyze the performance of our approach at the end of each step. However, the test set is solely used for evaluation purposes and is
|
| 53 |
+
|
| 54 |
+
not needed for our approach to function in the defined setup. Moreover, in the initial round, the support set is an empty set, all the training data is included in the unlabeled pool, and a pre-trained model without any previous fine-tuning is used to extract embeddings.
|
| 55 |
+
|
| 56 |
+
# 3.1 Embedding Methods
|
| 57 |
+
|
| 58 |
+
We obtain the embeddings by using two distinct methods:
|
| 59 |
+
|
| 60 |
+
Encoder (En): In this method, we extract the last hidden states from the encoder of the model and apply mean pooling over it. This embedding serves as a dense representation of the input data, while providing a rich feature space that encodes the input sequence.
|
| 61 |
+
|
| 62 |
+
Scores (Sc): Here, we leverage the output logits of the model and apply a softmax function to calculate the probability distribution over the possible labels. This method focuses on the model's confidence in assigning labels to the input data, which can be interpreted as a measure of how well the model understands the instance.
|
| 63 |
+
|
| 64 |
+
In both cases, we use a pre-trained model without any fine-tuning during the first iteration and use the fine-tuned model of the previous iteration during the subsequent iterations. Moreover, the embedding methods we use in this study make our approach adaptable to any LLM that provides label probabilities and its encoder's last hidden states.
|
| 65 |
+
|
| 66 |
+
Since we are working with text generation models, additional processing is required to calculate the scores. Specifically, we need to compute the
|
| 67 |
+
|
| 68 |
+
probability $P_{m}^{<t>} [n]$ (Equation 1), which represents the likelihood that the token at position $t$ in sample $m$ 's logits corresponds to the $n^{th}$ class out of all $N$ classes.
|
| 69 |
+
|
| 70 |
+
Each pre-trained model has its own vocabulary that maps distinct numerical indices to the tokens it recognizes. $Logits_{m}^{<t>[i]}$ indicates the model's logit for the $i^{th}$ word in the vocabulary at position $t$ for sample $m$ . During this procedure, we need our classes to be represented by a single token. For cases where a class name is represented by multiple tokens in the pre-trained model's tokenizer, we handle this by manually replacing such multi-token labels with semantically close single-token labels. We then disregard all the other tokens in the vocabulary that do not correspond to any task-specific class. To manage this, we define and use a dictionary $ClassId(i)$ , which maps the $i^{th}$ class to its corresponding index in the vocabulary.
|
| 71 |
+
|
| 72 |
+
Once the class probabilities are computed, we calculate the score $Score_{m}[n]$ (Equation 2) by taking the maximum probability of the $n^{th}$ class over all $T$ output tokens for sample $m$ . This is especially important for multi-label tasks such as the MPQA Type task, where the model may generate multiple tokens in the output to indicate multiple labels. Using this method, we will eventually end up with a vector the same size as the label set $(|L|$ in Section 4.1).
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
P _ {m} ^ {< t >} [ n ] = \frac {e ^ {\text {L o g i t s} _ {m} ^ {< t >} [ \text {C l a s s I d} (n) ]}}{\sum_ {i = 1} ^ {N} e ^ {\text {L o g i t s} _ {m} ^ {< t >} [ \text {C l a s s I d} (i) ]}} \tag {1}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\operatorname {S c o r e} _ {m} [ n ] = \max _ {1 \leq i \leq T} \left(P _ {m} ^ {< i >} [ n ]\right) \tag {2}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
# 3.2 Sampling Methods
|
| 83 |
+
|
| 84 |
+
Within each iteration, $M$ instances need to be sampled from the training set and added to the support set of size $K$ , which is an empty set at the beginning of the initial round. More precisely, these instances are sampled from the (simulated) unlabeled training set by considering the inputs and their corresponding embeddings. Only after choosing the samples, we can look at the labels of the $M$ instances (simulating the human annotation) and use them in the fine-tuning process. $M$ is a small number and is considered to designate the whole selection size, unlike typical FSL classification tasks that select $M$ samples for each class (Ren et al., 2018; Chen et al., 2019; Wang et al., 2023), since we do not have access to those classes in our
|
| 85 |
+
|
| 86 |
+
definition of the problem. The sampling methods that we use in this paper are as follows:
|
| 87 |
+
|
| 88 |
+
Random: With this method, we simply sample $M$ instances randomly without replacement from the unlabeled pool. This sampling method does not require any embedding data.
|
| 89 |
+
|
| 90 |
+
Representative (Rep): This method gets help from the embeddings we retrieved in our desired embedding method to cluster the unlabeled data into $M$ groups using the $K$ -Means algorithm. Then, inside each cluster, we sample the data point that its corresponding embedding is the closest (euclidean distance) to the cluster centroid.
|
| 91 |
+
|
| 92 |
+
Uncertainty (Un): It can only benefit from the $Sc$ embeddings to select the $M$ samples about which the model has the most doubts. We will be using entropy (Shannon, 1948; Settles, 2009) as our uncertainty measure throughout this paper.
|
| 93 |
+
|
| 94 |
+
Uncertainty Representative (UnRep): Using this technique, we first choose the $\alpha \times M$ most uncertain samples based on the $Sc$ embeddings. Thereafter, we do a representative sampling based on the $En$ embeddings only on these selected data points in order to sample the final $M$ unlabeled samples.
|
| 95 |
+
|
| 96 |
+
Cluster Uncertainty (CIUn): This strategy, at first, splits the data into $M$ clusters considering the given embeddings using the $K$ -Means algorithm. It will then pick the data point that the model has the least confidence about inside each cluster by looking at their $Sc$ embeddings.
|
| 97 |
+
|
| 98 |
+
All of these methods can be used during the second iteration onwards, but only the ones that do not involve uncertainty (Random and Rep) can be used within the first iteration and/or non-iterative approaches since there's no previous step for the model to learn enough about the task and decide whether it has doubts about the data.
|
| 99 |
+
|
| 100 |
+
# 4 Experimental Setup
|
| 101 |
+
|
| 102 |
+
# 4.1 Datasets
|
| 103 |
+
|
| 104 |
+
We use the MPQA Opinion Corpus, which is annotated at the word or phrase level to extract the following features of the expressed attitudes: type, polarity, and intensity (Wiebe et al., 2005; Wilson, 2008). Refer to Appendix A for more details. Additionally, we use AG News Corpus (Gulli, 2005)
|
| 105 |
+
|
| 106 |
+
for news topic classification and the English portion of The Multilingual Amazon Reviews Corpus (Keung et al., 2020) for 5-star rating classification tasks.
|
| 107 |
+
|
| 108 |
+
<table><tr><td>Dataset</td><td>Multi-Label</td><td>Train</td><td>Val</td><td>Test</td><td>|L|</td><td>U%</td></tr><tr><td>MPQA Type</td><td>✓</td><td>4,248</td><td>1,060</td><td>1,327</td><td>4</td><td>85.1</td></tr><tr><td>MPQA Polarity</td><td>X</td><td>4,505</td><td>1,123</td><td>1,404</td><td>2</td><td>8.9</td></tr><tr><td>MPQA Intensity</td><td>X</td><td>4,505</td><td>1,123</td><td>1,404</td><td>5</td><td>34.6</td></tr><tr><td>AG News</td><td>X</td><td>118,800</td><td>1,200</td><td>7,600</td><td>4</td><td>0.0</td></tr><tr><td>Amazon Reviews</td><td>X</td><td>200,000</td><td>1,200</td><td>5,000</td><td>5</td><td>0.0</td></tr></table>
|
| 109 |
+
|
| 110 |
+
Table 1: Dataset Statistics. It specifies whether they are multi-label, the size of training, validation, and test set splits, and the number of classes and their uniformness.
|
| 111 |
+
|
| 112 |
+
Table 1 exhibits the statistics and diversity of the datasets. The MPQA dataset does not offer separate training, validation, and test data, so we use the splits of MPQA that are provided in previous work (Ahmadnia et al., 2024). Although the AG News dataset presents training and test sets, it does not include any validation set. Therefore, we randomly sample the validation data from the training set without replacement. Amazon Reviews, however, offers all three mentioned splits, but the given validation set is relatively large. We generate a new set by randomly down-sampling the original validation set to refrain from overfitting too easily on the validation data as suggested by previous study (Gunel et al., 2020). We maintain the original sets' label distributions for these new validation sets. Following Müller et al. (2022), we define $|L|$ as the cardinality of the label set $L$ , and $U = \sum_{l\in L}|f(l) - \frac{1}{|L|} |$ as a uniformity metric ( $U = 0$ is uniform), where $f(l)$ is the relative frequency of label $l$ .
|
| 113 |
+
|
| 114 |
+
# 4.2 Fine-Tuning Experiments
|
| 115 |
+
|
| 116 |
+
We evaluate our method across a wide range of tasks, comparing it to various baselines and related work leveraging several large language models. Specifically, for FT, we use the base versions of BART and FLAN-T5 with 139M and 248M parameters, respectively (Lewis et al., 2019; Chung et al., 2022).
|
| 117 |
+
|
| 118 |
+
To get better intuition about the tasks, we first calculate the majority baselines, which are the baselines we expect to beat. Additionally, we fine-tune the models using the entire training set as our support set ( $K =$ full training set size). These results represent a sort of top-line, which we do not expect to beat in our FS experiments.
|
| 119 |
+
|
| 120 |
+
Next, we fine-tune the pre-trained models with
|
| 121 |
+
|
| 122 |
+
varying support set sizes, $K \in \{10, 20, 50, 100\}$ , using random sampling, representative sampling, and our proposed iterative approaches. In the iterative approaches, within each iteration, we sample $M = 10$ new data points to be progressively added to our support set and present the results when we have fine-tuned the model using support sets of size $K \in \{10, 20, 50, 100\}$ . For these experiments, we assign $\alpha$ , in Section 3.2, the value of 10. Appendix D includes additional experiments evaluating the impact of different values of $\alpha$ .
|
| 123 |
+
|
| 124 |
+
We further repeat FT experiments, this time with $K \in \{5, 10, 25, 50\}$ while having $M = 5$ in iterative approaches on the MPQA tasks to assess the impact of different selection sizes with the same number of iterations. Additionally, we choose the best-performing models from the AG News and Amazon Reviews tasks and fine-tune them with $M = 16$ over 16 iterations, resulting in a total support set size of $K = 256$ . We compare our results to the best-performing models provided by Müller et al. (2022), namely LT margin and LT k-means.
|
| 125 |
+
|
| 126 |
+
# 4.3 In-Context Learning Experiments
|
| 127 |
+
|
| 128 |
+
For ICL, we utilize instruction-tuned models and prompt templates similar to related work (Reid et al., 2024; Dubey et al., 2024), and describe each task alongside a list of possible labels. These prompts can be applied directly in 0-shot settings as the system prompt. For FS settings, we append labeled instances to the system prompt to provide additional context. In the end, we query the model for predictions on each test instance in the user prompt and match the predicted output strings with the corresponding labels. The prompt template for each task is included in Appendix C. If a model does not support system prompts, we concatenate and merge the system and user prompts and treat them as a single (user) prompt. We employ two methods to sample training data for ICL:
|
| 129 |
+
|
| 130 |
+
Random: Similar to random sampling method in FT, we select $M$ instances randomly without replacement, regardless of any information.
|
| 131 |
+
|
| 132 |
+
Custom: This approach takes advantage of the instance selection used in FT methods by incorporating the support samples constructed by encoder-decoder models. This allows decoder-only models to leverage the encoder of the encoder-decoder models. The goal is to assess the performance gain by using samples interesting for and selected by FT models. However, the decoder-only models
|
| 133 |
+
|
| 134 |
+
<table><tr><td rowspan="2">Model Name</td><td colspan="5">MPQA Type</td><td colspan="5">MPQA Polarity</td><td colspan="5">MPQA Intensity</td><td></td><td></td><td></td></tr><tr><td>0</td><td>10</td><td>20</td><td>50</td><td>100</td><td>Full</td><td>0</td><td>10</td><td>20</td><td>50</td><td>100</td><td>Full</td><td>0</td><td>10</td><td>20</td><td>50</td><td>100</td><td>Full</td></tr><tr><td>Majority Baseline</td><td>56.6</td><td></td><td></td><td></td><td></td><td>54.8</td><td></td><td></td><td></td><td></td><td>37.2</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Random Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Random</td><td>56.81.4</td><td>56.71.5</td><td>59.51.8</td><td>63.33.3</td><td>80.3</td><td>73.25.1</td><td>78.94.3</td><td>82.91.6</td><td>86.81.7</td><td>92.5</td><td>36.02.4</td><td>37.00.2</td><td>37.10.1</td><td>35.22.1</td><td>47.0</td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Random</td><td>56.24.6</td><td>60.02.9</td><td>65.32.6</td><td>66.72.7</td><td>80.7</td><td>76.52.4</td><td>80.62.4</td><td>85.31.4</td><td>88.40.9</td><td>94.2</td><td>33.03.8</td><td>34.03.6</td><td>35.51.6</td><td>35.51.5</td><td>50.0</td><td></td><td></td><td></td></tr><tr><td>Representative Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Rep(En)</td><td>56.50.2</td><td>57.11.7</td><td>59.82.7</td><td>64.23.5</td><td></td><td>71.40.0</td><td>76.12.6</td><td>81.81.0</td><td>87.11.2</td><td></td><td>37.00.0</td><td>35.22.1</td><td>37.00.4</td><td>37.30.3</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)</td><td>53.66.2</td><td>63.72.3</td><td>65.11.5</td><td>67.81.9</td><td></td><td>77.55.1</td><td>79.31.8</td><td>85.32.7</td><td>89.21.6</td><td></td><td>33.92.2</td><td>35.40.9</td><td>36.31.1</td><td>35.61.1</td><td></td><td></td><td></td><td></td></tr><tr><td>Iterative Approaches</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Un</td><td>53.66.2</td><td>59.81.4</td><td>63.72.6</td><td>66.92.3</td><td></td><td>77.55.1</td><td>81.25.8</td><td>88.21.9</td><td>91.40.8</td><td></td><td>33.92.2</td><td>36.80.3</td><td>37.40.7</td><td>39.22.1</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(Sc)</td><td>53.66.2</td><td>61.31.1</td><td>65.81.9</td><td>68.50.8</td><td></td><td>77.55.1</td><td>80.83.4</td><td>87.40.8</td><td>90.61.4</td><td></td><td>33.92.2</td><td>35.32.0</td><td>37.42.5</td><td>38.01.4</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(En)</td><td>53.66.2</td><td>61.42.9</td><td>64.72.1</td><td>68.91.3</td><td></td><td>77.55.1</td><td>80.42.0</td><td>85.60.6</td><td>88.11.4</td><td></td><td>33.92.2</td><td>34.52.3</td><td>36.92.2</td><td>37.81.2</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-UnRep</td><td>53.66.2</td><td>60.64.6</td><td>63.22.7</td><td>68.82.0</td><td></td><td>77.55.1</td><td>82.42.5</td><td>87.52.0</td><td>90.10.5</td><td></td><td>33.92.2</td><td>36.21.4</td><td>37.40.6</td><td>38.82.2</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(Sc)</td><td>53.66.2</td><td>59.62.8</td><td>64.42.9</td><td>68.01.9</td><td></td><td>77.55.1</td><td>83.22.4</td><td>88.30.9</td><td>90.40.7</td><td></td><td>33.92.2</td><td>36.30.8</td><td>36.42.0</td><td>38.21.4</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(En)</td><td>53.66.2</td><td>59.23.6</td><td>64.62.0</td><td>69.30.8</td><td></td><td>77.55.1</td><td>81.51.8</td><td>87.51.4</td><td>90.80.8</td><td></td><td>33.92.2</td><td>35.12.9</td><td>38.01.6</td><td>39.71.8</td><td></td><td></td><td></td><td></td></tr><tr><td>In-Context Learning</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Random</td><td>49.3</td><td>50.96.8</td><td>55.23.3</td><td>-</td><td>-</td><td>67.0</td><td>73.02.1</td><td>75.03.3</td><td>-</td><td>-</td><td>-</td><td>32.0</td><td>32.82.6</td><td>33.22.6</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>Gemma 2-Custom</td><td>50.57</td><td>52.95.0</td><td>-</td><td>-</td><td></td><td>73.50.7</td><td>76.12.3</td><td>-</td><td>-</td><td></td><td>33.54.9</td><td>33.84.8</td><td>-</td><td>-</td><td></td><td></td><td></td><td></td></tr><tr><td>Llama 3.1-Random</td><td>43.4</td><td>57.24.6</td><td>58.23.9</td><td>59.53.0</td><td>60.12.9</td><td>64.9</td><td>69.73.7</td><td>70.42.9</td><td>68.36.4</td><td>75.71.9</td><td>-</td><td>23.9</td><td>31.32.2</td><td>31.62.5</td><td>30.61.7</td><td>31.01.9</td><td>-</td><td></td></tr><tr><td>Llama 3.1-Custom</td><td>60.90.9</td><td>61.01.1</td><td>61.81.1</td><td>61.51.1</td><td></td><td>63.33.5</td><td>67.64.7</td><td>72.13.4</td><td>67.24.4</td><td></td><td>-</td><td>32.42.3</td><td>33.02.9</td><td>31.43.3</td><td>31.44.4</td><td></td><td></td><td></td></tr><tr><td>Mistral v0.3-Random</td><td>39.4</td><td>54.54.3</td><td>57.72.6</td><td>58.70.9</td><td>58.52.1</td><td>72.4</td><td>73.82.1</td><td>75.21.3</td><td>76.52.1</td><td>78.13.4</td><td>-</td><td>22.9</td><td>28.92.4</td><td>28.52.4</td><td>29.30.6</td><td>28.72.1</td><td>-</td><td></td></tr><tr><td>Mistral v0.3-Custom</td><td>53.94.1</td><td>56.92.0</td><td>58.62.7</td><td>57.84.4</td><td></td><td>72.11.8</td><td>74.11.8</td><td>73.61.2</td><td>73.94.3</td><td></td><td>-</td><td>29.47.2</td><td>30.77.7</td><td>32.65.3</td><td>29.75.0</td><td></td><td></td><td></td></tr><tr><td rowspan="2">Model Name</td><td colspan="5">AG News</td><td colspan="5">Amazon Reviews</td><td colspan="5">Mean</td><td></td><td></td><td></td></tr><tr><td>0</td><td>10</td><td>20</td><td>50</td><td>100</td><td>Full</td><td>0</td><td>10</td><td>20</td><td>50</td><td>100</td><td>Full</td><td>0</td><td>10</td><td>20</td><td>50</td><td>100</td><td>Full</td></tr><tr><td>Majority Baseline</td><td>25.0</td><td></td><td></td><td></td><td></td><td>20.0</td><td></td><td></td><td></td><td></td><td>38.7</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Random Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Random</td><td>75.17.3</td><td>80.94.2</td><td>85.11.0</td><td>86.81.2</td><td>94.2</td><td>32.13.0</td><td>35.72.7</td><td>41.11.8</td><td>45.32.7</td><td>63.2</td><td>54.6</td><td>57.8</td><td>61.1</td><td>63.5</td><td>75.4</td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Random</td><td>71.86.3</td><td>87.31.2</td><td>88.41.2</td><td>89.30.7</td><td>94.4</td><td>47.24.8</td><td>52.74.3</td><td>55.90.7</td><td>58.81.5</td><td>65.7</td><td>56.9</td><td>62.9</td><td>66.1</td><td>67.7</td><td>77.0</td><td></td><td></td><td></td></tr><tr><td>Representative Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Rep(En)</td><td>61.40.3</td><td>75.911.9</td><td>86.30.4</td><td>86.61.5</td><td></td><td>29.90.0</td><td>34.91.7</td><td>38.72.6</td><td>45.92.7</td><td></td><td>51.2</td><td>55.8</td><td>60.7</td><td>64.2</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)</td><td>86.90.7</td><td>86.31.4</td><td>88.42.1</td><td>89.10.3</td><td></td><td>51.50.2</td><td>51.51.8</td><td>55.02.9</td><td>59.30.9</td><td></td><td>60.7</td><td>63.2</td><td>66.0</td><td>68.2</td><td></td><td></td><td></td><td></td></tr><tr><td>Iterative Approaches</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Un</td><td>86.90.7</td><td>87.71.0</td><td>88.50.6</td><td>88.71.4</td><td></td><td>51.50.2</td><td>54.73.1</td><td>57.01.7</td><td>57.52.8</td><td></td><td>60.7</td><td>64.0</td><td>67.0</td><td>68.7</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(Sc)</td><td>86.90.7</td><td>87.51.2</td><td>88.71.1</td><td>89.70.2</td><td></td><td>51.50.2</td><td>52.82.2</td><td>57.41.1</td><td>59.51.4</td><td></td><td>60.7</td><td>63.5</td><td>67.3</td><td>69.3</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(En)</td><td>86.90.7</td><td>86.62.4</td><td>88.60.4</td><td>89.30.2</td><td></td><td>51.50.2</td><td>52.62.9</td><td>57.31.0</td><td>58.70.9</td><td></td><td>60.7</td><td>63.1</td><td>66.6</td><td>68.6</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-UnRep</td><td>86.90.7</td><td>87.60.3</td><td>88.51.2</td><td>89.30.5</td><td></td><td>51.50.2</td><td>45.74.2</td><td>52.43.2</td><td>55.03.1</td><td></td><td>60.7</td><td>62.5</td><td>65.8</td><td>68.4</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(Sc)</td><td>86.90.7</td><td>86.41.8</td><td>87.61.0</td><td>88.80.7</td><td></td><td>51.50.2</td><td>54.71.8</td><td>58.40.9</td><td>59.40.4</td><td></td><td>60.7</td><td>64.0</td><td>67.0</td><td>69.0</td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(En)</td><td>86.90.7</td><td>87.50.7</td><td>88.120</td><td>89.10.8</td><td></td><td>51.50.2</td><td>52.03.5</td><td>58.31.6</td><td>59.91.1</td><td></td><td>60.7</td><td>63.1</td><td>67.3</td><td>69.8</td><td></td><td></td><td></td><td></td></tr><tr><td>In-Context Learning</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Random</td><td>84.6</td><td>85.21.6</td><td>86.81.4</td><td>-</td><td>-</td><td>62.2</td><td>60.61.6</td><td>60.11.9</td><td>-</td><td>-</td><td>-</td><td>59.0</td><td>60.5</td><td>62.1</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>Gemma 2-Custom</td><td>87.20.5</td><td>88.10.6</td><td>-</td><td>-</td><td></td><td>61.90.6</td><td>60.50.6</td><td>-</td><td>-</td><td></td><td>61.3</td><td>62.3</td><td>-</td><td>-</td><td></td><td></td><td></td><td></td></tr><tr><td>Llama 3.1-Random</td><td>82.5</td><td>85.81.4</td><td>85.11.4</td><td>86.31.3</td><td>86.51.3</td><td>-59.4</td><td>53.35.4</td><td>54.74.8</td><td>57.02.7</td><td>55.63.1</td><td>-54.8</td><td>59.5</td><td>60.0</td><td>60.3</td><td>61.8</td><td>-</td><td></td><td></td></tr><tr><td>Llama 3.1-Custom</td><td>84.81.0</td><td>85.70.7</td><td>86.00.9</td><td>85314</td><td></td><td>50.42.4</td><td>52.22.6</td><td>56.12.4</td><td>54.512</td><td></td><td>-58.4</td><td>59.9</td><td>61.5</td><td>60.0</td><td></td><td></td><td></td><td></td></tr><tr><td>Mistral v0.3-Random</td><td>84.9</td><td>82.92.6</td><td>85.31.4</td><td>86.10.8</td><td>86414</td><td>-54.3</td><td>59.41.2</td><td>58.52.2</td><td>49.77.9</td><td>46.19.0</td><td>-54.8</td><td>59.9</td><td>61.0</td><td>60.1</td><td>59.6</td><td>-</td><td></td><td></td></tr><tr><td>Mistral v0.3-Custom</td><td>80.42.8</td><td>86.20.7</td><td>83.22.9</td><td>81936</td><td></td><td>60.00.9</td><td>56.92.2</td><td>45.66.6</td><td>47.72.8</td><td></td><td>59.2</td><td>61.0</td><td>58.7</td><td>58.2</td><td></td><td></td><td></td><td></td></tr></table>
|
| 135 |
+
|
| 136 |
+
Table 2: The average micro-F1 (\%) results for MPQA Type, MPQA Polarity, MPQA Intensity, AG News, and Amazon Reviews when $M = 10$ (i.e., selection size) in iterative approaches, calculated over five different seeds for the sampling phase. The sub-columns denote $K$ (i.e., total support set size), and the subscripts indicate the standard deviation. Any experiment that encountered out-of-memory errors is marked with a “-” symbol.
|
| 137 |
+
|
| 138 |
+
themselves do not participate in identifying or selecting these informative samples in this process, and the encoder-decoder models operate entirely independent of the decoder-only models.
|
| 139 |
+
|
| 140 |
+
In ICL, we apply both random and custom sampling methods on the instruction-tuned models, i.e., Gemma 2, Llama 3.1, and Mistral v0.3 with 9B, 8B, and 7B parameters, respectively. We report results for support sets of sizes $K \in \{0, 10, 20, 50, 100\}$ . Notably, each step's support set is a subset of the support set from the subsequent step. In custom sampling, we only use instances selected by the overall best-performing model in FT experiments.
|
| 141 |
+
|
| 142 |
+
# 5 Discussion of Results
|
| 143 |
+
|
| 144 |
+
To better understand the impact of different approaches, we analyze the experimental results in this section. Table 2 summarizes the main experi
|
| 145 |
+
|
| 146 |
+
imental results for FT and ICL approaches. The outcomes for the smaller selection size and comparisons with other related work are also available in Tables 3 and 4, respectively. Additional fine-grained results, including more intermediate support set sizes, are provided in Appendix E as well.
|
| 147 |
+
|
| 148 |
+
The model names in the tables indicate the employed pre-trained model, followed by the sampling method. If an iterative approach is used, the name reflects the sampling method for the first iteration, followed by the method used in the subsequent iterations. Whenever a referred sampling method can make use of both embedding methods, we specify the used method inside parentheses.
|
| 149 |
+
|
| 150 |
+
# 5.1 Active Few-Shot Learning
|
| 151 |
+
|
| 152 |
+
Table 2 elucidates significant differences in task performance, particularly when fine-tuning on the
|
| 153 |
+
|
| 154 |
+
<table><tr><td rowspan="2">Model Name</td><td colspan="4">MPQA Type</td><td colspan="4">MPQA Polarity</td><td colspan="4">MPQA Intensity</td></tr><tr><td>5</td><td>10</td><td>25</td><td>50</td><td>5</td><td>10</td><td>25</td><td>50</td><td>5</td><td>10</td><td>25</td><td>50</td></tr><tr><td>Random Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Random</td><td>55.03.8</td><td>57.22.4</td><td>58.02.1</td><td>59.31.3</td><td>68.09.0</td><td>72.84.2</td><td>76.83.3</td><td>81.93.4</td><td>32.76.2</td><td>36.01.9</td><td>36.11.5</td><td>36.61.2</td></tr><tr><td>FLAN-T5-Random</td><td>46.88.5</td><td>55.64.1</td><td>59.73.6</td><td>64.52.3</td><td>67.28.9</td><td>74.44.9</td><td>80.51.7</td><td>84.32.2</td><td>28.05.0</td><td>31.04.6</td><td>34.64.9</td><td>36.01.2</td></tr><tr><td>Representative Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Rep(En)</td><td>52.70.0</td><td>56.20.7</td><td>56.91.6</td><td>59.22.3</td><td>62.915.3</td><td>71.40.0</td><td>78.93.4</td><td>82.52.7</td><td>35.91.7</td><td>37.00.0</td><td>36.41.2</td><td>37.00.6</td></tr><tr><td>FLAN-T5-Rep(En)</td><td>59.32.4</td><td>52.05.6</td><td>62.22.7</td><td>64.52.0</td><td>72.11.3</td><td>78.34.1</td><td>80.61.4</td><td>85.82.6</td><td>29.20.6</td><td>34.32.3</td><td>35.41.4</td><td>36.50.9</td></tr><tr><td>Iterative Approaches</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Un</td><td>59.32.4</td><td>59.45.2</td><td>63.51.9</td><td>65.71.8</td><td>72.11.3</td><td>73.63.1</td><td>84.72.1</td><td>88.61.6</td><td>29.20.6</td><td>33.32.8</td><td>35.72.7</td><td>38.02.2</td></tr><tr><td>FLAN-T5-Rep(En)-Rep(Sc)</td><td>59.32.4</td><td>61.23.2</td><td>61.03.5</td><td>65.12.1</td><td>72.11.3</td><td>81.21.7</td><td>83.52.2</td><td>87.72.1</td><td>29.20.6</td><td>34.02.2</td><td>35.71.9</td><td>37.41.5</td></tr><tr><td>FLAN-T5-Rep(En)-Rep(En)</td><td>59.32.4</td><td>62.22.0</td><td>63.23.0</td><td>65.42.4</td><td>72.11.3</td><td>78.23.2</td><td>81.92.2</td><td>84.11.7</td><td>29.20.6</td><td>31.92.8</td><td>33.83.1</td><td>34.71.8</td></tr><tr><td>FLAN-T5-Rep(En)-UnRep</td><td>59.32.4</td><td>57.24.7</td><td>62.74.3</td><td>65.01.3</td><td>72.11.3</td><td>79.12.8</td><td>84.31.4</td><td>87.51.5</td><td>29.20.6</td><td>32.62.7</td><td>35.12.5</td><td>38.91.0</td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(Sc)</td><td>59.32.4</td><td>61.83.3</td><td>63.52.8</td><td>65.02.4</td><td>72.11.3</td><td>80.32.3</td><td>84.01.8</td><td>88.51.8</td><td>29.20.6</td><td>33.71.5</td><td>36.61.0</td><td>37.61.5</td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(En)</td><td>59.32.4</td><td>60.71.7</td><td>63.22.4</td><td>65.12.6</td><td>72.11.3</td><td>78.23.0</td><td>84.51.7</td><td>87.81.3</td><td>29.20.6</td><td>34.11.9</td><td>35.23.8</td><td>37.42.4</td></tr></table>
|
| 155 |
+
|
| 156 |
+
Table 3: The average micro-F1 (\%) results of MPQA Type, MPQA Polarity, and MPQA Intensity tasks when $M = 5$ (i.e., selection size) in iterative approaches, calculated over ten seeds for the sampling phase. The sub-columns denote $K$ (i.e., total support set size), and the subscripts indicate the standard deviation.
|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
Figure 2: The range (min and max) and average of micro-F1 $(\%)$ scores of all tasks with steps of 10 over five runs.
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
|
| 163 |
+

|
| 164 |
+
|
| 165 |
+

|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
|
| 169 |
+
full dataset. These variations stem from the distinct nature of the tasks, which span binary, multi-class, and multi-label classification problems. Notably, the MPQA Type and MPQA Intensity tasks involve imbalanced independent label sets, while the remaining tasks are more balanced. Additionally, the MPQA Intensity and Amazon Reviews tasks have a greater number of labels, which are mostly semantically close, adding more complexity to the classification. Thus, we believe that the chosen tasks offer a diverse and representative spectrum of classification challenges.
|
| 170 |
+
|
| 171 |
+
As anticipated, the majority baseline generally yields the poorest performance. Both BART-Random and FLAN-T5-Random provide reasonable starting points, with improvements observed as the support set size $K$ increases. A common trend across FT approaches is the progressive enhancement in performance as the support set grows from 10 samples to the full dataset.
|
| 172 |
+
|
| 173 |
+
Furthermore, the table demonstrates that FLAN-T5-based models work better than BART-based models in most cases. This is why we focus our iterative experiments on FLAN-T5. However, additional experiments with BART can be found in Appendix E. The results also suggest that simple
|
| 174 |
+
|
| 175 |
+
representative sampling is more effective than random sampling when $K = 100$ , even when used in a non-iterative setup. Nevertheless, the iterative approaches outperform most non-iterative methods when $K \geq 20$ . The MPQA Intensity task, in particular, succeeds in excelling the majority baseline in FSL experiments only when iterative methods are applied.
|
| 176 |
+
|
| 177 |
+
Although the five tasks produce distinct outcomes, the iterative approach 'FLAN-T5-Rep(En)-ClUn(En)' usually outperforms the random and representative approaches, and it does so consistently when $K \in \{50, 100\}$ in three tasks and when $K = 100$ in four tasks. All of the iterative approaches manage to excel the best non-iterative approaches on average when $K = 100$ . Especially, 'FLAN-T5-Rep(En)-ClUn(En)' stands out as it beats the best non-iterative methods on average by $1.2\%$ points at $K = 50$ and $1.6\%$ points at $K = 100$ . Hence, we recommend using this method in new use cases. Figure 2 captures the contrast between the non-iterative FLAN-T5-based models and the best-performer model at $K = 100$ for each task in greater detail, including the intermediate steps results.
|
| 178 |
+
|
| 179 |
+
# 5.2 Impact of Smaller Selection Size
|
| 180 |
+
|
| 181 |
+
Table 3 presents additional experimental results for $K \in \{5,10,25,50\}$ on the MPQA dataset across 10 different seeds, using $M = 5$ in the iterative approaches. Even though, in our problem context, $K$ represents the overall size of the support set—distinct from the conventional FSL classification tasks where $K$ refers to the number of samples per class, making it challenging to ensure equitable representation of all labels in tasks like MPQA Intensity—the iterative approaches still surpass the non-iterative methods in most cases. Moreover, the iterative approach 'FLAN-T5-Rep(En)-ClUn(En)' still holds up and beats all the non-iterative approaches at $K = 50$ . An interesting insight that can be drawn by comparing the $K = 50$ columns in Tables 2 and 3 shows that having $M = 5$ leads to improved performance across nearly all iterative approaches compared to $M = 10$ at the same support set size ( $K = 50$ ). This suggests using a smaller $M$ can result in a more effective sample selection process.
|
| 182 |
+
|
| 183 |
+
# 5.3 In-Context Learning
|
| 184 |
+
|
| 185 |
+
An examination of the ICL results reveals that these models generally not only underperform iterative approaches but also struggle to beat non-iterative FT methods. They even fail to exceed the majority baseline in the MPQA Intensity task. Although ICL models can usually deliver reasonable performance when $K$ is small and may outperform iterative methods in Amazon Reviews, their performance stagnates or even declines as the number of support samples ( $K$ ) increases. This inability of ICL approaches to leverage larger support sets has been noted in previous work as well (Pecher et al., 2024). Additionally, ICL methods are inclined to exhibit higher standard deviations compared to FT approaches, indicating greater performance variability.
|
| 186 |
+
|
| 187 |
+
Comparing the pre-trained models in ICL shows there is no definitive winner between Llama 3.1 and Mistral v0.3. Generally, Mistral v0.3 performs better with smaller $K$ values, while Llama 3.1 surpasses Mistral v0.3 as $K$ increases. However, Gemma 2 stands out as the clear leader, outperforming the other two models in most cases. Interestingly, custom sampling fails to help Llama 3.1 and Mistral v0.3 to gain performance against random sampling, suggesting that these two models do not benefit from instances identified as informative by
|
| 188 |
+
|
| 189 |
+
FLAN-T5. Gemma 2, however, is the only pretrained model that benefits from custom sampling in most experiments.
|
| 190 |
+
|
| 191 |
+
# 5.4 Other Related Work
|
| 192 |
+
|
| 193 |
+
<table><tr><td>Model Name</td><td>AG News</td><td>Amazon Reviews</td></tr><tr><td>LT margin</td><td>86.20.7</td><td>46.61.4</td></tr><tr><td>LT k-means</td><td>82.81.2</td><td>48.60.9</td></tr><tr><td>FLAN-T5-Rep(En)-Rep(Sc)</td><td>90.70.2</td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(En)</td><td></td><td>61.01.0</td></tr></table>
|
| 194 |
+
|
| 195 |
+
Table 4: The average macro-F1 $(\%)$ results for AG News and Amazon Reviews when $M = 16$ and $K = 256$ , calculated over five different seeds in the sampling phase. The subscripts denote standard deviation.
|
| 196 |
+
|
| 197 |
+
Table 4 compares our best-performing models on AG News and Amazon Reviews datasets with the best-performing models reported in previous work on the same datasets, i.e., LT margin for AG News and LT k-means for Amazon Reviews (Müller et al., 2022). Our models outperform these baselines by a significant margin of $4.5\%$ points on AG News and $12.4\%$ points on Amazon Reviews, demonstrating the effectiveness of our methods. Figure 3 illustrates that LT margin and LT k-means at $K = 256$ struggle to beat the best iterative FT approaches even at small $K$ values. The figure also shows how the performance gains of iterative approaches diminish as $K$ increases. For example, there is a performance improvement of $3.3\%$ points in AG News and $8.2\%$ points in Amazon Reviews when $K$ increases from 16 to 96. Meanwhile, there is only a $1.1\%$ and $1.6\%$ points improvement when $K$ increases from 96 to 256 in AG News and Amazon Reviews, respectively. These results suggest the proposed methods are most effective in FS settings.
|
| 198 |
+
|
| 199 |
+

|
| 200 |
+
Figure 3: The range (min and max) and average of macro-F1 $(\%)$ scores of AG News and Amazon Reviews tasks with steps of 16 over five runs.
|
| 201 |
+
|
| 202 |
+

|
| 203 |
+
|
| 204 |
+
# 5.5 Performance-Efficiency Trade-off
|
| 205 |
+
|
| 206 |
+
In designing our methodology, we prioritized the quality of few-shot instance selection and model
|
| 207 |
+
|
| 208 |
+
performance, as these are critical in applications where model accuracy outweighs time efficiency constraints. However, here we analyze run-time performance to enable practical deployment in time-sensitive scenarios. We present a breakdown of execution times for different phases of the process, allowing for informed decisions when tuning key parameters to meet efficiency requirements.
|
| 209 |
+
|
| 210 |
+
<table><tr><td>Model Name</td><td>Embedding</td><td>Sampling</td><td>FT</td><td>Overall</td></tr><tr><td>FLAN-T5-Random</td><td>0:00:00</td><td>0:00:00</td><td>0:10:30</td><td>0:10:30</td></tr><tr><td>FLAN-T5-Rep(En)</td><td>0:00:16</td><td>0:00:03</td><td>0:08:59</td><td>0:09:18</td></tr><tr><td>FLAN-T5-Rep(En)-Un</td><td>0:01:58</td><td>0:00:01</td><td>1:48:06</td><td>1:50:04</td></tr><tr><td>FLAN-T5-Rep(En)-Rep(Sc)</td><td>0:02:14</td><td>0:00:02</td><td>1:55:16</td><td>1:57:32</td></tr><tr><td>FLAN-T5-Rep(En)-Rep(En)</td><td>0:01:38</td><td>0:00:07</td><td>1:56:44</td><td>1:58:28</td></tr><tr><td>FLAN-T5-Rep(En)-UnRep</td><td>0:02:03</td><td>0:00:03</td><td>2:08:03</td><td>2:10:09</td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(Sc)</td><td>0:02:17</td><td>0:00:02</td><td>1:50:20</td><td>1:52:39</td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(En)</td><td>0:01:56</td><td>0:00:04</td><td>1:49:36</td><td>1:51:37</td></tr></table>
|
| 211 |
+
|
| 212 |
+
Table 5: Accumulated execution time (h:mm:ss) of embedding extraction, sampling, and fine-tuning across different models.
|
| 213 |
+
|
| 214 |
+
Table 5 presents execution times for embedding extraction, sampling, and fine-tuning phases across different approaches on the MPQA Polarity dataset over a single seed. Durations are accumulated over ten iterations in iterative approaches. Annotation, support set augmentation, and evaluation times are excluded, as they are either negligible or independent of the approach. As expected, non-iterative methods exhibit shorter execution times compared to iterative ones. The results confirm that increasing the number of iterations directly extends the overall run time in a linear fashion, as it naturally requires more computational steps, though performance gains diminish, as illustrated in Figure 2. Thus, tuning the number of iterations is crucial for balancing performance and efficiency.
|
| 215 |
+
|
| 216 |
+
The embedding extraction time is also influenced by the embedding method used. Method $En$ processes data through the encoder, while $Sc$ requires both encoder and decoder passes, increasing computational cost. If both methods are applied, embeddings can still be obtained in a single model pass. Another key factor is the size of the unlabeled training pool; for large datasets (e.g., Amazon Reviews), random downsampling can significantly reduce execution time when necessary.
|
| 217 |
+
|
| 218 |
+
The sampling phase is relatively insignificant compared to embedding extraction and fine-tuning. Interestingly, iterative sampling does not take ten times the time of representative sampling, since the computations can be more simplified when choosing 10 samples per iteration compared to all 100 samples in a single iteration. Sampling method
|
| 219 |
+
|
| 220 |
+
choice also impacts execution time, with non-K-Means approaches typically being faster. Similar to embedding extraction, downsampling the training pool can further enhance efficiency as well.
|
| 221 |
+
|
| 222 |
+
Fine-tuning times vary due to early stopping, with no clear correlation between approach type and fine-tuning duration beyond iteration count. However, fine-tuning time is highly dependent on validation set size, and reducing the validation set can significantly shorten run time when needed. Random downsampling of the validation set offers an effective strategy for optimizing efficiency.
|
| 223 |
+
|
| 224 |
+
Overall, our findings highlight key factors (e.g., iteration count, training pool size, validation set size, etc.) that influence execution time. Proper tuning of these parameters enables a flexible trade-off between model performance and computational efficiency.
|
| 225 |
+
|
| 226 |
+
# 6 Conclusion and Future Work
|
| 227 |
+
|
| 228 |
+
We propose a novel fine-tuning-based method for sampling data to be used in a few-shot setting with active learning, while many others tend to sample data randomly. We show how using different embedding and sampling methods helps us achieve better results in classification tasks by choosing and labeling the most informative unlabeled samples that may represent the variety of data or that the model has the most doubts about. These methods unleash their full potential when used iteratively, using the fine-tuned model from the previous iterations, surpassing in-context learning approaches and other fine-tuning-based sampling strategies in previous studies.
|
| 229 |
+
|
| 230 |
+
Future work will expand on new embedding and sampling methods in classification tasks as well as other types of NLP tasks, such as text generation. It will also explore the effect of semi-supervised learning methods on top of our approach in a pipeline, making use of the rest of the unlabeled data to improve performance.
|
| 231 |
+
|
| 232 |
+
# Acknowledgment
|
| 233 |
+
|
| 234 |
+
This research is funded in part by NSF IIS and Discovery Partners Institute (DPI) at the University of Illinois Chicago. Any opinions, findings, and conclusions expressed here are those of the authors and do not necessarily reflect the views of NSF or DPI. Rambow gratefully acknowledges support from the Institute for Advanced Computational Science at Stony Brook University.
|
| 235 |
+
|
| 236 |
+
# Limitations
|
| 237 |
+
|
| 238 |
+
In the current study, we have centered our attention on English. In the future, we plan to focus on other natural languages and alternative datasets. Furthermore, our proposed methods are unable to be directly used in non-classification or non-NLP tasks and they need some modifications to be applied to these types of tasks. These experiments also require a lot of computational resources like the other AL approaches, since we have to iteratively run the same experiment 10 times with an incrementally augmented support set.
|
| 239 |
+
|
| 240 |
+
# Ethics Statement
|
| 241 |
+
|
| 242 |
+
Our current study is a fundamental research work in the field of NLP and computational linguistics. There are many applications considered for these fields of research. For instance, understanding users' tweets on Twitter, e-commerce applications, and question answering. Although many research projects have been done in these fields, and a large number of them have accomplished remarkable results, we do not explicitly recommend using these systems standalone. The reason is that there are open issues about the robustness and fairness of these systems. Hence, we see a need for human experts in interpreting the results. From our point of view, there are no ethical concerns about the platforms, technologies, tools, and algorithms used or proposed in this study. We should also note that the dataset, language models, tools, and libraries that we have utilized in this work are all publicly available.
|
| 243 |
+
|
| 244 |
+
# References
|
| 245 |
+
|
| 246 |
+
Saeed Ahmadnia, Arash Yousefi Jordehi, Mahsa Hosseini Khasheh Heyran, SeyedAbolghasem Mirroshandel, and Owen Rambow. 2024. Opinion mining using pre-trained large language models: Identifying the type, polarity, intensity, expression, and source of private states. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 12481-12495, Torino, Italia. ELRA and ICCL.
|
| 247 |
+
Rinu Boney, Alexander Ilin, et al. 2019. Active one-shot learning with prototypical networks. In *ESANN*.
|
| 248 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child,
|
| 249 |
+
|
| 250 |
+
Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems, volume 33, pages 1877-1901. Curran Associates, Inc.
|
| 251 |
+
Ernie Chang, Xiaoyu Shen, Hui-Syuan Yeh, and Vera Demberg. 2021. On training instance selection for few-shot neural text generation. arXiv preprint arXiv:2107.03176.
|
| 252 |
+
Wei-Yu Chen, Yen-Cheng Liu, Zsolt Kira, Yu-Chiang Frank Wang, and Jia-Bin Huang. 2019. A closer look at few-shot classification. arXiv preprint arXiv:1904.04232.
|
| 253 |
+
Yiming Chen, Yan Zhang, Chen Zhang, Grandee Lee, Ran Cheng, and Haizhou Li. 2021. Revisiting self-training for few-shot learning of language model. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 9125-9135, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 254 |
+
Zhiyu Chen, Harini Eavani, Wenhu Chen, Yinyin Liu, and William Yang Wang. 2020. Few-shot NLG with pre-trained language model. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 183-190, Online. Association for Computational Linguistics.
|
| 255 |
+
Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. 2022. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416.
|
| 256 |
+
Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
|
| 257 |
+
Aleksandra Edwards and Jose Camacho-Collados. 2024. Language models for text classification: Is in-context learning enough? In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 10058-10072, Torino, Italia. ELRA and ICCL.
|
| 258 |
+
Tianyu Gao, Adam Fisch, and Danqi Chen. 2021. Making pre-trained language models better few-shot learners. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3816-3830, Online. Association for Computational Linguistics.
|
| 259 |
+
Antonio Gulli. 2005. Ag's corpus of news articles.
|
| 260 |
+
|
| 261 |
+
Belize Gunel, Jingfei Du, Alexis Conneau, and Ves Stoyanov. 2020. Supervised contrastive learning for pretrained language model fine-tuning. arXiv preprint arXiv:2011.01403.
|
| 262 |
+
AQ Jiang, A Sablayrolles, A Mensch, C Bamford, DS Chaplot, D de las Casas, F Bressand, G Lengyel, G Lample, L Saulnier, et al. 2023. Mistral 7b (2023). arXiv preprint arXiv:2310.06825.
|
| 263 |
+
Rabeeh Karimi Mahabadi, Luke Zettlemoyer, James Henderson, Lambert Mathias, Marzieh Saeidi, Veselin Stoyanov, and Majid Yazdani. 2022. Prompt-free and efficient few-shot learning with language models. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 3638-3652, Dublin, Ireland. Association for Computational Linguistics.
|
| 264 |
+
Phillip Keung, Yichao Lu, György Szarvas, and Noah A. Smith. 2020. The multilingual Amazon reviews corpus. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 4563-4568, Online. Association for Computational Linguistics.
|
| 265 |
+
Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer. 2019. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461.
|
| 266 |
+
Xiaorun Li, Zeyu Cao, Liaoying Zhao, and Jianfeng Jiang. 2021. Alpn: Active-learning-based prototypical network for few-shot hyperspectral imagery classification. IEEE Geoscience and Remote Sensing Letters, 19:1-5.
|
| 267 |
+
Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, et al. 2022. Few-shot learning with multilingual generative language models. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 9019-9052.
|
| 268 |
+
Thomas Müller, Guillermo Pérez-Torró, Angelo Basile, and Marc Franco-Salvador. 2022. Active few-shot learning with fasl. In International Conference on Applications of Natural Language to Information Systems, pages 98-110. Springer.
|
| 269 |
+
Branislav Pecher, Ivan Srba, Maria Bielikova, and Joaquin Vanschoren. 2024. Automatic combination of sample selection strategies for few-shot learning. arXiv preprint arXiv:2402.03038.
|
| 270 |
+
Pouya Pezeshkpour, Zhengli Zhao, and Sameer Singh. 2020. On the utility of active instance selection for few-shot learning. NeurIPS HAMLETS.
|
| 271 |
+
Machel Reid, Nikolay Savinov, Denis Teptyashin, Dmitry Lepikhin, Timothy Lillicrap, Jean-baptiste
|
| 272 |
+
|
| 273 |
+
Alayrac, Radu Soricut, Angeliki Lazaridou, Orhan Firat, Julian Schrittwieser, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530.
|
| 274 |
+
Mengye Ren, Eleni Triantafillou, Sachin Ravi, Jake Snell, Kevin Swersky, Joshua B Tenenbaum, Hugo Larochelle, and Richard S Zemel. 2018. Meta-learning for semi-supervised few-shot classification. arXiv preprint arXiv:1803.00676.
|
| 275 |
+
Timo Schick and Hinrich Schütze. 2021a. Few-shot text generation with natural language instructions. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 390-402.
|
| 276 |
+
Timo Schick and Hinrich Schütze. 2021b. It's not just size that matters: Small language models are also few-shot learners. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2339-2352, Online. Association for Computational Linguistics.
|
| 277 |
+
Burr Settles. 2009. Active learning literature survey. Computer Sciences Technical Report 1648, University of Wisconsin-Madison.
|
| 278 |
+
Claude E Shannon. 1948. A mathematical theory of communication. The Bell system technical journal, 27(3):379-423.
|
| 279 |
+
Junsup Shin, Youngwook Kang, Seungjin Jung, and Jongwon Choi. 2022. Active instance selection for few-shot classification. IEEE Access.
|
| 280 |
+
Qianru Sun, Yaoyao Liu, Tat-Seng Chua, and Bernt Schiele. 2019. Meta-transfer learning for few-shot learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR).
|
| 281 |
+
Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, et al. 2024. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118.
|
| 282 |
+
Xixi Wang, Xiao Wang, Bo Jiang, and Bin Luo. 2023. Few-shot learning meets transformer: Unified query-support transformers for few-shot classification. IEEE Transactions on Circuits and Systems for Video Technology.
|
| 283 |
+
Janyce Wiebe, Theresa Wilson, and Claire Cardie. 2005. Annotating expressions of opinions and emotions in language. Language resources and evaluation, 39(2):165-210.
|
| 284 |
+
Theresa Ann Wilson. 2008. *Fine-grained subjectivity and sentiment analysis: recognizing the intensity, polarity, and attitudes of private states*. University of Pittsburgh.
|
| 285 |
+
|
| 286 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
|
| 287 |
+
|
| 288 |
+
Jingqing Zhang, Yao Zhao, Mohammad Saleh, and Peter Liu. 2020. Pegasus: Pre-training with extracted gap-sentences for abstractive summarization. In International Conference on Machine Learning, pages 11328-11339. PMLR.
|
| 289 |
+
|
| 290 |
+
Jingbo Zhu, Huizhen Wang, Benjamin K Tsou, and Matthew Ma. 2009. Active learning with sampling by uncertainty and density for data annotations. IEEE Transactions on audio, speech, and language processing, 18(6):1323-1331.
|
| 291 |
+
|
| 292 |
+
# A MPQA Opinion Corpus
|
| 293 |
+
|
| 294 |
+
A sentence may contain expressions that reflect different private states with various attitudes. These attitudes can belong to different types, and each type can express negative or positive opinions (polarity) toward targets with varying degrees of strength (intensity) (Wiebe et al., 2005; Wilson, 2008).
|
| 295 |
+
|
| 296 |
+
<table><tr><td>Task</td><td>Attitude Type</td><td>Input Sentence</td><td>Output</td></tr><tr><td>T</td><td>-</td><td>The new US policy deserves to be closely analyzed and monitored.</td><td>arguing sentiment</td></tr><tr><td>P</td><td>intention</td><td>Canada is among the countries that have pledged to ratify the accord.</td><td>positive</td></tr><tr><td>I</td><td>sentiment</td><td>There is a deep faith here, however, in the power of democracy.</td><td>high</td></tr></table>
|
| 297 |
+
|
| 298 |
+
The original MPQA annotation scheme comprises 6 types of attitudes. We remove the other and speculation types in our experiments as these types of attitudes do not hold a polarity. That leaves us with a 4-class classification task for the type. Furthermore, an expression in a sentence may have zero to four labels as attitude types based on the expression itself and the sentence that contains the expression. This leads the type identifier task to be a multi-label classification task. Subsequently, we identify polarity and intensity using the attitude type, the expression that holds the attitude, and the expression's container sentence as the input. This input can only have one specific polarity and one in-
|
| 299 |
+
|
| 300 |
+
density, which makes these tasks binary and 5-class multi-class classification tasks, respectively.
|
| 301 |
+
|
| 302 |
+
An example for each task is available in Table 6, and all labels and their distribution are as follows: type: agreement (×284), arguing (×2,466), intention (×420), and sentiment (×3,862) polarity: negative (×3,200) and positive (×3,832); and intensity: low (×658), low-medium (×1,262), medium (×2,615), medium-high (×1,258), and high (×1,239).
|
| 303 |
+
|
| 304 |
+
# B Implementation Details
|
| 305 |
+
|
| 306 |
+
Our models were implemented on PyTorch neural network framework. Furthermore, we utilized the scikit-learn library $^2$ , NumPy $^3$ , and Matplotlib packages. We used facebook/bart-base and google/flan-t5-base models in FT and google/gemma-2-9b-it, meta-llama/Llama-3.1-8B-Instruct, and mistralai/Mistral-7B-Instruct-v0.3 in ICL and their tokenizers from the Hugging Face Transformers library $^5$ (Wolf et al., 2020). For the ICL tasks, non-MPQA tasks, performance-efficiency tradeoff experiments, BART-based MPQA Polarity iterative experiments, and the analysis of the impact of parameter $\alpha$ , the models were executed on a single NVIDIA RTX A5000 GPU 24 GB GPU and AMD EPYC 7662 3.28 GHz 64-Core CPU. The rest of the tasks (many of the FT MPQA tasks) were executed on a single NVIDIA A100 40 GB GPU and dual AMD Rome 7742 CPUs (each with 2.25 GHz 64-Cores). The maximum amount of GPU memory we used for our approaches was 18 GB. They also required a maximum of 16 GB of RAM.
|
| 307 |
+
|
| 308 |
+
Table 6: The examples for Type (T), Polarity (P), and Intensity (I) tasks. The expressions within the sentences are in bold.
|
| 309 |
+
|
| 310 |
+
<table><tr><td>Parameter</td><td>BART</td><td>FLAN-T5</td></tr><tr><td>Batch Size</td><td>10</td><td>10</td></tr><tr><td>Learning Rate</td><td>5e-5</td><td>1e-4</td></tr><tr><td>Dropout Rate</td><td>0.1</td><td>0.1</td></tr><tr><td>Optimizer</td><td>AdamW</td><td>AdamW</td></tr><tr><td>Early Stopping</td><td>20 epochs</td><td>20 epochs</td></tr></table>
|
| 311 |
+
|
| 312 |
+
Table 7: The hyperparameters for BART- and FLAN-T5-based models.
|
| 313 |
+
|
| 314 |
+
All results in this paper are reproducible by setting the random seeds to fixed numbers. The hyperparameters used in our experiments are listed
|
| 315 |
+
|
| 316 |
+
in Table 7. In the present study, we utilized publicly available datasets. Hence, we did not use any human annotators.
|
| 317 |
+
|
| 318 |
+
# C ICL Templates
|
| 319 |
+
|
| 320 |
+
This section contains prompt templates that we used in ICL experiments for MPQA Type in Figure 4, MPQA Polarity in Figure 5, MPQA Intensity in Figure 6, AG News in Figure 7, and Amazon Reviews in Figure 8.
|
| 321 |
+
|
| 322 |
+
# D Impact of Parameter $\alpha$
|
| 323 |
+
|
| 324 |
+
Table 8 presents the results of the "FLAN-T5-Rep(En)-UnRep" approach for $\alpha \in \{1,2,5,10,20,50\}$ with $M = 10$ using the MPQA Polarity dataset.
|
| 325 |
+
|
| 326 |
+
# E Fine-Grained Experiments
|
| 327 |
+
|
| 328 |
+
Tables 9 (MPQA Type), 10 (MPQA Polarity), 11 (MPQA Intensity), 12 (AG News), and 13 (Amazon Reviews) present the main experiments from Table 2, along with intermediate steps for better comparison. Table 10 also includes additional experiments for BART-based iterative approaches using the MPQA Polarity dataset. The results are reported for support set sizes of $K \in \{0,5,10,20,30,40,50,60,70,80,90,100,Full\}$ . Some of the experiments encountered out-of-memory errors.
|
| 329 |
+
|
| 330 |
+
# System:
|
| 331 |
+
|
| 332 |
+
You are a sentiment classifier. For each message, predict the type of an expressed private state as (sentiment, intention, arguing, or agreement) given the whole sentence for context, followed by the expression itself. Each entry may belong to one or more classes. Return the predicted classes with exactly one space between each if there is more than one class. Do not add any additional information, comment, or formatting.
|
| 333 |
+
|
| 334 |
+
# User:
|
| 335 |
+
|
| 336 |
+
Text: [Test Sample]
|
| 337 |
+
|
| 338 |
+
Type:
|
| 339 |
+
|
| 340 |
+
# System:
|
| 341 |
+
|
| 342 |
+
You are a sentiment classifier. For each message, predict the type of an expressed private state as (sentiment, intention, arguing, or agreement) given the whole sentence for context, followed by the expression itself. Each entry may belong to one or more classes. Return the predicted classes with exactly one space between each if there is more than one class. Do not add any additional information, comment, or formatting. Here are some samples:
|
| 343 |
+
|
| 344 |
+
Text: [Support Sample #1]
|
| 345 |
+
|
| 346 |
+
Type: [Label of Support Sample #1]
|
| 347 |
+
|
| 348 |
+
···
|
| 349 |
+
|
| 350 |
+
Text: [Support Sample #K]
|
| 351 |
+
|
| 352 |
+
Type: [Label of Support Sample #K]
|
| 353 |
+
|
| 354 |
+
# User:
|
| 355 |
+
|
| 356 |
+
Text: [Test Sample]
|
| 357 |
+
|
| 358 |
+
Type:
|
| 359 |
+
|
| 360 |
+
Figure 4: Prompt templates for MPQA Type task. The upper box shows a sample for zero-shot learning, and the lower one shows a sample for $K$ -shot learning.
|
| 361 |
+
|
| 362 |
+
# System:
|
| 363 |
+
|
| 364 |
+
You are a sentiment classifier. For each message, predict the polarity of an expressed private state as (negative or positive) given the type of the private state, followed by the whole sentence for context, followed by the expression itself at the end. Return your prediction without adding any additional information, comment, or formatting.
|
| 365 |
+
|
| 366 |
+
# User:
|
| 367 |
+
|
| 368 |
+
Text: [Test Sample]
|
| 369 |
+
|
| 370 |
+
Polarity:
|
| 371 |
+
|
| 372 |
+
# System:
|
| 373 |
+
|
| 374 |
+
You are a sentiment classifier. For each message, predict the polarity of an expressed private state as (negative or positive) given the type of the private state, followed by the whole sentence for context, followed by the expression itself at the end. Return your prediction without adding any additional information, comment, or formatting. Here are some samples:
|
| 375 |
+
|
| 376 |
+
Text: [Support Sample #1]
|
| 377 |
+
|
| 378 |
+
Polarity: [Label of Support Sample #1]
|
| 379 |
+
|
| 380 |
+
···
|
| 381 |
+
|
| 382 |
+
Text: [Support Sample #K]
|
| 383 |
+
|
| 384 |
+
Polarity: [Label of Support Sample #K]
|
| 385 |
+
|
| 386 |
+
# User:
|
| 387 |
+
|
| 388 |
+
Text: [Test Sample]
|
| 389 |
+
|
| 390 |
+
Polarity:
|
| 391 |
+
|
| 392 |
+
Figure 5: Prompt templates for MPQA Polarity task. The upper box shows a sample for zero-shot learning, and the lower one shows a sample for $K$ -shot learning.
|
| 393 |
+
|
| 394 |
+
# System:
|
| 395 |
+
|
| 396 |
+
You are a sentiment classifier. For each message, predict the intensity of an expressed private state as (low, low-medium, medium, medium-high, or high) given the type of the private state, followed by the whole sentence for context, followed by the expression itself at the end. Return your prediction without adding any additional information, comment, or formatting.
|
| 397 |
+
|
| 398 |
+
# User:
|
| 399 |
+
|
| 400 |
+
Text: [Test Sample]
|
| 401 |
+
|
| 402 |
+
Intensity:
|
| 403 |
+
|
| 404 |
+
# System:
|
| 405 |
+
|
| 406 |
+
You are a sentiment classifier. For each message, predict the intensity of an expressed private state as (low, low-medium, medium, medium-high, or high) given the type of the private state, followed by the whole sentence for context, followed by the expression itself at the end. Return your prediction without adding any additional information, comment, or formatting. Here are some samples:
|
| 407 |
+
|
| 408 |
+
Text: [Support Sample #1]
|
| 409 |
+
|
| 410 |
+
Intensity: [Label of Support Sample #1]
|
| 411 |
+
|
| 412 |
+
···
|
| 413 |
+
|
| 414 |
+
Text: [Support Sample #K]
|
| 415 |
+
|
| 416 |
+
Intensity: [Label of Support Sample #K]
|
| 417 |
+
|
| 418 |
+
# User:
|
| 419 |
+
|
| 420 |
+
Text: [Test Sample]
|
| 421 |
+
|
| 422 |
+
Intensity:
|
| 423 |
+
|
| 424 |
+
Figure 6: Prompt templates for MPQA Intensity task. The upper box shows a sample for zero-shot learning, and the lower one shows a sample for $K$ -shot learning.
|
| 425 |
+
|
| 426 |
+
# System:
|
| 427 |
+
|
| 428 |
+
You are a news article classifier. For each message, predict the category of a news article as (World, Sports, Business, or Sci/Tech) given the news article title and description. Return the predicted class without any additional comment.
|
| 429 |
+
|
| 430 |
+
# User:
|
| 431 |
+
|
| 432 |
+
Article: [Test Sample]
|
| 433 |
+
|
| 434 |
+
Class:
|
| 435 |
+
|
| 436 |
+
# System:
|
| 437 |
+
|
| 438 |
+
You are a news article classifier. For each message, predict the category of a news article as (World, Sports, Business, or Sci/Tech) given the news article title and description. Return the predicted class without any additional comment. Here are some samples:
|
| 439 |
+
|
| 440 |
+
Article: [Support Sample #1]
|
| 441 |
+
|
| 442 |
+
Class: [Label of Support Sample #1]
|
| 443 |
+
|
| 444 |
+
···
|
| 445 |
+
|
| 446 |
+
Article: [Support Sample #K]
|
| 447 |
+
|
| 448 |
+
Class: [Label of Support Sample #K]
|
| 449 |
+
|
| 450 |
+
# User:
|
| 451 |
+
|
| 452 |
+
Article: [Test Sample]
|
| 453 |
+
|
| 454 |
+
Class:
|
| 455 |
+
|
| 456 |
+
Figure 7: Prompt templates for AG News task. The upper box shows a sample for zero-shot learning, and the lower one shows a sample for $K$ -shot learning.
|
| 457 |
+
|
| 458 |
+
# System:
|
| 459 |
+
|
| 460 |
+
You are a sentiment classifier. For each message, predict the number of stars (1, 2, 3, 4, or 5) given by a user based on the given review text. Return the predicted number of stars without any additional comment.
|
| 461 |
+
|
| 462 |
+
# User:
|
| 463 |
+
|
| 464 |
+
Review: [Test Sample]
|
| 465 |
+
|
| 466 |
+
Stars:
|
| 467 |
+
|
| 468 |
+
# System:
|
| 469 |
+
|
| 470 |
+
You are a sentiment classifier. For each message, predict the number of stars (1, 2, 3, 4, or 5) given by a user based on the given review text. Return the predicted number of stars without any additional comment. Here are some samples:
|
| 471 |
+
|
| 472 |
+
Review: [Support Sample #1]
|
| 473 |
+
|
| 474 |
+
Stars: [Label of Support Sample #1]
|
| 475 |
+
|
| 476 |
+
···
|
| 477 |
+
|
| 478 |
+
Review: [Support Sample #K]
|
| 479 |
+
|
| 480 |
+
Stars: [Label of Support Sample #K]
|
| 481 |
+
|
| 482 |
+
# User:
|
| 483 |
+
|
| 484 |
+
Review: [Test Sample]
|
| 485 |
+
|
| 486 |
+
Stars:
|
| 487 |
+
|
| 488 |
+
Figure 8: Prompt templates for Amazon Reviews task. The upper box shows a sample for zero-shot learning, and the lower one shows a sample for $K$ -shot learning.
|
| 489 |
+
|
| 490 |
+
<table><tr><td>α</td><td>10</td><td>20</td><td>30</td><td>40</td><td>50</td><td>60</td><td>70</td><td>80</td><td>90</td><td>100</td></tr><tr><td>1</td><td>76.43.7</td><td>80.31.0</td><td>82.93.1</td><td>84.32.8</td><td>86.31.6</td><td>88.61.2</td><td>88.11.3</td><td>90.31.5</td><td>90.20.5</td><td>90.61.2</td></tr><tr><td>2</td><td>76.43.7</td><td>81.61.1</td><td>83.51.4</td><td>85.21.9</td><td>86.50.6</td><td>87.82.5</td><td>88.91.6</td><td>90.51.4</td><td>90.71.0</td><td>91.00.6</td></tr><tr><td>5</td><td>76.43.7</td><td>81.33.3</td><td>83.42.2</td><td>86.71.9</td><td>87.42.6</td><td>87.52.1</td><td>88.91.7</td><td>90.20.9</td><td>91.01.1</td><td>91.10.9</td></tr><tr><td>10</td><td>76.43.7</td><td>80.72.6</td><td>83.10.5</td><td>84.91.6</td><td>86.71.3</td><td>89.51.7</td><td>89.81.0</td><td>90.00.4</td><td>91.00.6</td><td>91.10.5</td></tr><tr><td>20</td><td>76.43.7</td><td>80.41.4</td><td>80.45.6</td><td>83.32.7</td><td>84.51.3</td><td>86.51.8</td><td>88.20.6</td><td>89.41.1</td><td>90.31.0</td><td>91.20.7</td></tr><tr><td>50</td><td>76.43.7</td><td>79.83.0</td><td>83.21.8</td><td>84.81.9</td><td>85.91.8</td><td>88.32.1</td><td>89.12.5</td><td>90.01.8</td><td>90.71.3</td><td>91.31.4</td></tr></table>
|
| 491 |
+
|
| 492 |
+
Table 8: The average micro-F1 (\%) results for MPQA Polarity when $M = 10$ (i.e., selection size) in the "FLAN-T5-Rep(En)-UnRep" approach, calculated over five different seeds for the sampling phase. The sub-columns denote $K$ (i.e., total support set size), and the subscripts indicate the standard deviation.
|
| 493 |
+
|
| 494 |
+
<table><tr><td>Model Name</td><td>0</td><td>5</td><td>10</td><td>20</td><td>30</td><td>40</td><td>50</td><td>60</td><td>70</td><td>80</td><td>90</td><td>100</td><td>Full</td></tr><tr><td>Majority Baseline</td><td>56.6</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Random Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Random</td><td></td><td>56.81.4</td><td>56.71.5</td><td></td><td></td><td>59.51.8</td><td></td><td></td><td></td><td></td><td></td><td>63.33.3</td><td>80.3</td></tr><tr><td>FLAN-T5-Random</td><td></td><td>56.24.6</td><td>60.02.9</td><td>62.52.1</td><td>62.23.2</td><td>65.32.6</td><td>65.81.6</td><td>67.61.3</td><td>67.50.9</td><td>67.41.8</td><td>66.72.7</td><td>80.7</td><td></td></tr><tr><td>Representative Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Rep(En)</td><td></td><td>56.50.2</td><td>57.11.7</td><td></td><td></td><td>59.82.7</td><td></td><td></td><td></td><td></td><td>64.23.5</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)</td><td></td><td>53.66.2</td><td>63.72.3</td><td>62.51.8</td><td>62.51.9</td><td>65.11.5</td><td>67.02.0</td><td>66.12.6</td><td>67.11.1</td><td>67.31.4</td><td>67.81.9</td><td></td><td></td></tr><tr><td>Iterative Approaches</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Un</td><td></td><td>53.66.2</td><td>59.81.4</td><td>59.92.2</td><td>61.41.9</td><td>63.72.6</td><td>64.01.9</td><td>66.32.8</td><td>66.52.5</td><td>67.42.4</td><td>66.92.3</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(Sc)</td><td></td><td>53.66.2</td><td>61.31.1</td><td>63.42.1</td><td>64.71.4</td><td>65.81.9</td><td>68.21.3</td><td>67.11.7</td><td>67.80.9</td><td>68.21.8</td><td>68.50.8</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(En)</td><td></td><td>53.66.2</td><td>61.42.9</td><td>63.12.2</td><td>63.42.0</td><td>64.72.1</td><td>66.41.4</td><td>67.12.2</td><td>69.01.5</td><td>68.71.5</td><td>68.91.3</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-UnRep</td><td></td><td>53.66.2</td><td>60.64.6</td><td>62.52.8</td><td>62.03.1</td><td>63.22.7</td><td>65.82.2</td><td>66.81.3</td><td>66.71.9</td><td>67.40.5</td><td>68.82.0</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(Sc)</td><td></td><td>53.66.2</td><td>59.62.8</td><td>63.32.5</td><td>64.81.0</td><td>64.42.9</td><td>65.61.8</td><td>65.41.3</td><td>66.91.7</td><td>68.02.2</td><td>68.01.9</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(En)</td><td></td><td>53.66.2</td><td>59.23.6</td><td>61.23.2</td><td>63.41.3</td><td>64.62.0</td><td>67.02.3</td><td>67.91.3</td><td>67.71.2</td><td>68.01.4</td><td>69.30.8</td><td></td><td></td></tr><tr><td>In-Context Learning</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Random</td><td>49.3</td><td>48.46.2</td><td>50.96.8</td><td>55.23.3</td><td>63.01.5</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Custom</td><td></td><td></td><td>50.55.7</td><td>52.95.0</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Llama 3.1-Random</td><td>43.4</td><td>57.04.7</td><td>57.24.6</td><td>58.23.9</td><td>59.42.9</td><td>58.93.6</td><td>59.53.0</td><td></td><td></td><td></td><td>60.12.9</td><td></td><td></td></tr><tr><td>Llama 3.1-Custom</td><td></td><td></td><td>60.90.9</td><td>61.01.1</td><td></td><td></td><td>61.81.1</td><td></td><td></td><td></td><td>61.51.1</td><td></td><td></td></tr><tr><td>Mistral v0.3-Random</td><td>39.4</td><td>49.44.5</td><td>54.54.3</td><td>57.72.6</td><td>58.00.8</td><td>57.61.8</td><td>58.70.9</td><td></td><td></td><td></td><td>58.52.1</td><td></td><td></td></tr><tr><td>Mistral v0.3-Custom</td><td></td><td></td><td>53.94.1</td><td>56.92.0</td><td></td><td></td><td>58.62.7</td><td></td><td></td><td></td><td>57.84.4</td><td></td><td></td></tr></table>
|
| 495 |
+
|
| 496 |
+
Table 9: The average micro-F1 (%) results for MPQA Type when $M = 10$ (i.e., selection size) in iterative approaches, calculated over five different seeds for the sampling phase. The sub-columns denote $K$ (i.e., total support set size), and the subscripts indicate the standard deviation.
|
| 497 |
+
|
| 498 |
+
<table><tr><td>Model Name</td><td>0</td><td>5</td><td>10</td><td>20</td><td>30</td><td>40</td><td>50</td><td>60</td><td>70</td><td>80</td><td>90</td><td>100</td><td>Full</td></tr><tr><td>Majority Baseline</td><td>54.8</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Random Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Random</td><td></td><td>73.25.1</td><td>78.94.3</td><td></td><td></td><td></td><td>82.91.6</td><td></td><td></td><td></td><td></td><td>86.81.7</td><td>92.5</td></tr><tr><td>FLAN-T5-Random</td><td></td><td>76.52.4</td><td>80.62.4</td><td>81.72.7</td><td>82.91.2</td><td>85.31.4</td><td>86.80.9</td><td>86.51.3</td><td>87.21.3</td><td>88.31.4</td><td>88.40.9</td><td>88.40.9</td><td>94.2</td></tr><tr><td>Representative Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Rep(En)</td><td></td><td>71.40.0</td><td>76.12.6</td><td></td><td></td><td></td><td>81.81.0</td><td></td><td></td><td></td><td></td><td>87.11.2</td><td></td></tr><tr><td>FLAN-T5-Rep(En)</td><td></td><td>77.55.1</td><td>79.31.8</td><td>80.80.6</td><td>82.61.7</td><td>85.32.7</td><td>87.70.7</td><td>87.72.0</td><td>87.71.1</td><td>88.72.6</td><td>89.21.6</td><td></td><td></td></tr><tr><td>Iterative Approaches</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Rep(En)-Un</td><td></td><td>77.85.5</td><td>80.72.0</td><td>81.73.0</td><td>80.54.2</td><td>84.62.4</td><td>83.52.6</td><td>84.74.8</td><td>87.01.3</td><td>87.82.9</td><td>87.52.5</td><td></td><td></td></tr><tr><td>BART-Rep(En)-Rep(Sc)</td><td></td><td>77.85.5</td><td>77.54.9</td><td>77.43.4</td><td>82.14.4</td><td>81.75.4</td><td>82.93.9</td><td>84.92.0</td><td>84.32.5</td><td>86.02.0</td><td>86.61.9</td><td></td><td></td></tr><tr><td>BART-Rep(En)-Rep(En)</td><td></td><td>77.85.5</td><td>79.83.9</td><td>81.23.1</td><td>80.43.9</td><td>84.13.3</td><td>84.62.7</td><td>87.41.2</td><td>86.41.4</td><td>85.72.5</td><td>88.20.8</td><td></td><td></td></tr><tr><td>BART-Rep(En)-UnRep</td><td></td><td>77.85.5</td><td>78.34.3</td><td>80.12.5</td><td>80.74.5</td><td>83.54.2</td><td>82.94.5</td><td>85.11.9</td><td>83.94.9</td><td>82.55.5</td><td>86.21.5</td><td></td><td></td></tr><tr><td>BART-Rep(En)-CIUn(Sc)</td><td></td><td>77.85.5</td><td>80.14.5</td><td>81.13.4</td><td>83.62.5</td><td>84.62.2</td><td>83.52.6</td><td>85.31.6</td><td>83.23.4</td><td>86.32.1</td><td>87.22.5</td><td></td><td></td></tr><tr><td>BART-Rep(En)-CIUn(En)</td><td></td><td>77.85.5</td><td>79.24.4</td><td>83.02.8</td><td>83.42.4</td><td>84.01.6</td><td>84.12.2</td><td>82.23.3</td><td>84.84.6</td><td>86.62.1</td><td>87.21.6</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Un</td><td></td><td>77.55.1</td><td>81.25.8</td><td>84.13.0</td><td>85.82.7</td><td>88.21.9</td><td>88.41.6</td><td>89.71.0</td><td>90.80.8</td><td>90.71.1</td><td>91.40.8</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(Sc)</td><td></td><td>77.55.1</td><td>80.83.4</td><td>82.61.2</td><td>84.92.3</td><td>87.40.8</td><td>86.52.6</td><td>87.42.4</td><td>89.61.1</td><td>89.91.5</td><td>90.61.4</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(En)</td><td></td><td>77.55.1</td><td>80.42.0</td><td>82.21.7</td><td>84.02.3</td><td>85.60.6</td><td>85.51.2</td><td>87.01.5</td><td>86.42.0</td><td>87.40.7</td><td>88.11.4</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-UnRep</td><td></td><td>77.55.1</td><td>82.42.5</td><td>84.12.4</td><td>85.31.0</td><td>87.52.0</td><td>88.41.7</td><td>89.01.2</td><td>89.21.5</td><td>89.70.8</td><td>90.10.5</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(Sc)</td><td></td><td>77.55.1</td><td>83.22.4</td><td>85.42.6</td><td>85.52.0</td><td>88.30.9</td><td>89.10.5</td><td>89.60.8</td><td>89.50.8</td><td>90.40.7</td><td>90.40.7</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(En)</td><td></td><td>77.55.1</td><td>81.51.8</td><td>85.02.2</td><td>85.51.4</td><td>87.51.4</td><td>88.81.5</td><td>89.81.0</td><td>89.71.2</td><td>91.00.5</td><td>90.80.8</td><td></td><td></td></tr><tr><td>In-Context Learning</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Random</td><td>67.0</td><td>71.91.9</td><td>73.02.1</td><td>75.03.3</td><td>76.13.7</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Custom</td><td></td><td></td><td>73.50.7</td><td>76.12.3</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Llama 3.1-Random</td><td>64.9</td><td>67.03.9</td><td>69.73.7</td><td>70.42.9</td><td>72.93.1</td><td>75.42.8</td><td>68.36.4</td><td></td><td></td><td></td><td>75.71.9</td><td></td><td></td></tr><tr><td>Llama 3.1-Custom</td><td></td><td></td><td>63.33.5</td><td>67.64.7</td><td></td><td></td><td>72.13.4</td><td></td><td></td><td></td><td>67.24.4</td><td></td><td></td></tr><tr><td>Mistral v0.3-Random</td><td>72.4</td><td>71.23.0</td><td>73.82.1</td><td>75.21.3</td><td>75.81.1</td><td>75.81.7</td><td>76.52.1</td><td></td><td></td><td></td><td>78.13.4</td><td></td><td></td></tr><tr><td>Mistral v0.3-Custom</td><td></td><td></td><td>72.11.8</td><td>74.11.8</td><td></td><td></td><td>73.61.2</td><td></td><td></td><td></td><td>73.94.3</td><td></td><td></td></tr></table>
|
| 499 |
+
|
| 500 |
+
Table 10: The average micro-F1 (\%) results for MPQA Polarity when $M = 10$ (i.e., selection size) in iterative approaches, calculated over five different seeds for the sampling phase. The sub-columns denote $K$ (i.e., total support set size), and the subscripts indicate the standard deviation.
|
| 501 |
+
|
| 502 |
+
<table><tr><td>Model Name</td><td>0</td><td>5</td><td>10</td><td>20</td><td>30</td><td>40</td><td>50</td><td>60</td><td>70</td><td>80</td><td>90</td><td>100</td><td>Full</td></tr><tr><td>Majority Baseline</td><td>37.2</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Random Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Random</td><td></td><td></td><td>36.02.4</td><td>37.00.2</td><td></td><td></td><td>37.10.1</td><td></td><td></td><td></td><td></td><td>35.22.1</td><td>47.0</td></tr><tr><td>FLAN-T5-Random</td><td></td><td></td><td>33.03.8</td><td>34.03.6</td><td>36.20.9</td><td>36.90.7</td><td>35.51.6</td><td>36.11.7</td><td>36.01.4</td><td>35.51.5</td><td>35.61.4</td><td>35.51.5</td><td>50.0</td></tr><tr><td>Representative Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Rep(En)</td><td></td><td></td><td>37.00.0</td><td>35.22.1</td><td></td><td></td><td>37.00.4</td><td></td><td></td><td></td><td></td><td>37.30.3</td><td></td></tr><tr><td>FLAN-T5-Rep(En)</td><td></td><td></td><td>33.92.2</td><td>35.40.9</td><td>36.10.8</td><td>35.51.6</td><td>36.31.1</td><td>35.92.0</td><td>36.71.4</td><td>36.81.3</td><td>36.81.0</td><td>35.61.1</td><td></td></tr><tr><td>Iterative Approaches</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Un</td><td></td><td></td><td>33.92.2</td><td>36.80.3</td><td>36.81.2</td><td>37.21.8</td><td>37.40.7</td><td>37.02.4</td><td>37.22.6</td><td>38.42.7</td><td>38.51.9</td><td>39.22.1</td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(Sc)</td><td></td><td></td><td>33.92.2</td><td>35.32.0</td><td>35.52.5</td><td>36.92.5</td><td>37.42.5</td><td>37.53.1</td><td>37.61.2</td><td>37.50.7</td><td>38.41.3</td><td>38.01.4</td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(En)</td><td></td><td></td><td>33.92.2</td><td>34.52.3</td><td>35.13.1</td><td>36.21.5</td><td>36.92.2</td><td>37.10.7</td><td>37.81.6</td><td>37.21.3</td><td>37.00.7</td><td>37.81.2</td><td></td></tr><tr><td>FLAN-T5-Rep(En)-UnRep</td><td></td><td></td><td>33.92.2</td><td>36.21.4</td><td>36.70.7</td><td>36.80.5</td><td>37.40.6</td><td>37.32.7</td><td>37.22.8</td><td>36.51.0</td><td>38.42.4</td><td>38.82.2</td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(Sc)</td><td></td><td></td><td>33.92.2</td><td>36.30.8</td><td>35.91.7</td><td>36.21.0</td><td>36.42.0</td><td>37.21.1</td><td>37.52.9</td><td>37.61.3</td><td>36.81.3</td><td>38.21.4</td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(En)</td><td></td><td></td><td>33.92.2</td><td>35.12.9</td><td>36.01.7</td><td>37.02.9</td><td>38.01.6</td><td>38.71.7</td><td>38.32.0</td><td>39.51.5</td><td>38.71.9</td><td>39.71.8</td><td></td></tr><tr><td>In-Context Learning</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Random</td><td>32.0</td><td>32.34.1</td><td>32.82.6</td><td>33.22.6</td><td>34.61.9</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Custom</td><td></td><td></td><td>33.54.9</td><td>33.84.8</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Llama 3.1-Random</td><td>23.9</td><td>26.91.0</td><td>31.32.2</td><td>31.62.5</td><td>31.82.2</td><td>31.20.7</td><td>30.61.7</td><td></td><td></td><td></td><td></td><td>31.01.9</td><td></td></tr><tr><td>Llama 3.1-Custom</td><td></td><td></td><td>32.42.3</td><td>33.02.9</td><td></td><td></td><td>31.43.3</td><td></td><td></td><td></td><td></td><td>31.44.4</td><td></td></tr><tr><td>Mistral v0.3-Random</td><td>22.9</td><td>30.84.8</td><td>28.92.4</td><td>28.52.4</td><td>29.62.1</td><td>29.70.6</td><td>29.30.6</td><td></td><td></td><td></td><td></td><td>28.72.1</td><td></td></tr><tr><td>Mistral v0.3-Custom</td><td></td><td></td><td>29.47.2</td><td>30.77.7</td><td></td><td></td><td>32.65.3</td><td></td><td></td><td></td><td></td><td>29.75.0</td><td></td></tr></table>
|
| 503 |
+
|
| 504 |
+
Table 11: The average micro-F1 (%) results for MPQA Intensity when $M = 10$ (i.e., selection size) in iterative approaches, calculated over five different seeds for the sampling phase. The sub-columns denote $K$ (i.e., total support set size), and the subscripts indicate the standard deviation.
|
| 505 |
+
|
| 506 |
+
<table><tr><td>Model Name</td><td>0</td><td>5</td><td>10</td><td>20</td><td>30</td><td>40</td><td>50</td><td>60</td><td>70</td><td>80</td><td>90</td><td>100</td><td>Full</td></tr><tr><td>Majority Baseline</td><td>25.0</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Random Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Random</td><td></td><td>75.17,3</td><td>80.94,2</td><td>84.03,0</td><td>84.32,5</td><td>85.11,0</td><td>85.31,5</td><td>86.71,6</td><td>86.51,0</td><td>86.50,9</td><td>86.81,2</td><td>94.2</td><td></td></tr><tr><td>FLAN-T5-Random</td><td></td><td>71.86,3</td><td>87.31,2</td><td>87.81,5</td><td>88.31,0</td><td>88.41,2</td><td>88.70,9</td><td>88.91,0</td><td>89.40,7</td><td>89.40,5</td><td>89.30,7</td><td>94.4</td><td></td></tr><tr><td>Representative Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Rep(En)</td><td></td><td>61.40,3</td><td>75.911,9</td><td>79.27,3</td><td>86.50,8</td><td>86.30,4</td><td>87.10,5</td><td>86.41,8</td><td>86.50,4</td><td>86.81,0</td><td>86.61,5</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)</td><td></td><td>86.90,7</td><td>86.31,4</td><td>87.80,6</td><td>89.30,4</td><td>88.42,1</td><td>89.60,3</td><td>89.00,4</td><td>89.50,4</td><td>89.40,4</td><td>89.10,3</td><td></td><td></td></tr><tr><td>Iterative Approaches</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Un</td><td></td><td>86.90,7</td><td>87.71,0</td><td>88.10,9</td><td>88.00,9</td><td>88.50,6</td><td>88.20,9</td><td>88.90,4</td><td>88.71,1</td><td>88.61,0</td><td>88.71,4</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(Sc)</td><td></td><td>86.90,7</td><td>87.51,2</td><td>88.50,8</td><td>88.90,7</td><td>88.71,1</td><td>89.00,8</td><td>89.10,5</td><td>89.20,4</td><td>89.80,5</td><td>89.70,2</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(En)</td><td></td><td>86.90,7</td><td>86.62,4</td><td>88.11,1</td><td>87.42,1</td><td>88.60,4</td><td>89.10,5</td><td>89.20,3</td><td>89.10,4</td><td>89.30,4</td><td>89.30,2</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-UnRep</td><td></td><td>86.90,7</td><td>87.60,3</td><td>88.21,0</td><td>89.01,1</td><td>88.51,2</td><td>89.01,0</td><td>89.11,4</td><td>89.20,5</td><td>89.30,8</td><td>89.30,5</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(Sc)</td><td></td><td>86.90,7</td><td>86.41,8</td><td>86.21,8</td><td>87.61,3</td><td>87.61,0</td><td>87.31,6</td><td>88.41,1</td><td>88.31,1</td><td>88.50,7</td><td>88.80,7</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(En)</td><td></td><td>86.90,7</td><td>87.50,7</td><td>86.92,1</td><td>87.31,8</td><td>88.12,0</td><td>88.41,3</td><td>88.80,9</td><td>88.81,3</td><td>88.90,7</td><td>89.10,8</td><td></td><td></td></tr><tr><td>In-Context Learning</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Random</td><td>84.6</td><td>85.71,3</td><td>85.21,6</td><td>86.81,4</td><td>87.70,9</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Custom</td><td></td><td></td><td>87.20,5</td><td>88.10,6</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Llama 3.1-Random</td><td>82.5</td><td>84.41,5</td><td>85.81,4</td><td>85.11,4</td><td>86.00,6</td><td>86.10,8</td><td>86.31,3</td><td></td><td></td><td></td><td>86.51,3</td><td></td><td></td></tr><tr><td>Llama 3.1-Custom</td><td></td><td></td><td>84.81,0</td><td>85.70,7</td><td></td><td></td><td>86.00,9</td><td></td><td></td><td></td><td>85.31,4</td><td></td><td></td></tr><tr><td>Mistral v0.3-Random</td><td>84.9</td><td>82.33,9</td><td>82.92,6</td><td>85.31,4</td><td>85.51,7</td><td>84.92,3</td><td>86.10,8</td><td></td><td></td><td></td><td>86.41,4</td><td></td><td></td></tr><tr><td>Mistral v0.3-Custom</td><td></td><td></td><td>80.42,8</td><td>86.20,7</td><td></td><td></td><td>83.22,9</td><td></td><td></td><td></td><td>81.93,6</td><td></td><td></td></tr></table>
|
| 507 |
+
|
| 508 |
+
Table 12: The average micro-F1 (\%) results for AG News when $M = 10$ (i.e., selection size) in iterative approaches, calculated over five different seeds for the sampling phase. The sub-columns denote $K$ (i.e., total support set size), and the subscripts indicate the standard deviation.
|
| 509 |
+
|
| 510 |
+
<table><tr><td>Model Name</td><td>0</td><td>5</td><td>10</td><td>20</td><td>30</td><td>40</td><td>50</td><td>60</td><td>70</td><td>80</td><td>90</td><td>100</td><td>Full</td></tr><tr><td>Majority Baseline</td><td>20.0</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Random Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Random</td><td></td><td>32.13.0</td><td>35.72.7</td><td>37.12.7</td><td>37.82.7</td><td>41.11.8</td><td>42.33.0</td><td>44.02.2</td><td>46.02.3</td><td>47.01.8</td><td>45.32.7</td><td>63.2</td><td></td></tr><tr><td>FLAN-T5-Random</td><td></td><td>47.24.8</td><td>52.74.3</td><td>54.33.5</td><td>55.62.1</td><td>55.90.7</td><td>56.80.8</td><td>57.41.0</td><td>58.00.8</td><td>58.50.9</td><td>58.81.5</td><td>65.7</td><td></td></tr><tr><td>Representative Sampling</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BART-Rep(En)</td><td></td><td>29.90.0</td><td>34.91.7</td><td>35.31.5</td><td>35.52.6</td><td>38.72.6</td><td>42.12.5</td><td>43.82.4</td><td>43.03.3</td><td>43.82.1</td><td>45.92.7</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)</td><td></td><td>51.50.2</td><td>51.51.8</td><td>50.91.9</td><td>51.62.4</td><td>55.02.9</td><td>55.72.7</td><td>57.30.7</td><td>57.61.2</td><td>57.91.3</td><td>59.30.9</td><td></td><td></td></tr><tr><td>Iterative Approaches</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Un</td><td></td><td>51.50.2</td><td>54.73.1</td><td>53.93.3</td><td>56.23.7</td><td>57.01.7</td><td>56.13.6</td><td>57.61.7</td><td>57.52.1</td><td>57.41.9</td><td>57.52.8</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(Sc)</td><td></td><td>51.50.2</td><td>52.82.2</td><td>55.71.4</td><td>56.61.1</td><td>57.41.1</td><td>58.00.8</td><td>58.70.6</td><td>59.41.6</td><td>59.70.6</td><td>59.51.4</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-Rep(En)</td><td></td><td>51.50.2</td><td>52.62.9</td><td>55.41.0</td><td>56.70.9</td><td>57.31.0</td><td>57.21.1</td><td>57.30.9</td><td>57.70.6</td><td>57.70.6</td><td>58.70.9</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-UnRep</td><td></td><td>51.50.2</td><td>45.74.2</td><td>47.84.9</td><td>50.04.6</td><td>52.43.2</td><td>52.84.4</td><td>52.34.2</td><td>53.03.1</td><td>54.33.2</td><td>55.03.1</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(Sc)</td><td></td><td>51.50.2</td><td>54.71.8</td><td>55.51.7</td><td>58.11.4</td><td>58.40.9</td><td>58.41.6</td><td>58.91.2</td><td>58.42.0</td><td>59.50.8</td><td>59.40.4</td><td></td><td></td></tr><tr><td>FLAN-T5-Rep(En)-CIUn(En)</td><td></td><td>51.50.2</td><td>52.03.5</td><td>55.32.3</td><td>57.10.7</td><td>58.31.6</td><td>58.00.7</td><td>59.20.8</td><td>59.90.6</td><td>59.11.3</td><td>59.91.1</td><td></td><td></td></tr><tr><td>In-Context Learning</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Random</td><td>62.2</td><td>60.31.5</td><td>60.61.6</td><td>60.11.9</td><td>60.52.6</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Gemma 2-Custom</td><td></td><td></td><td>61.90.6</td><td>60.50.6</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Llama 3.1-Random</td><td>59.4</td><td>52.23.7</td><td>53.35.4</td><td>54.74.8</td><td>55.54.5</td><td>55.34.4</td><td>57.02.7</td><td></td><td></td><td></td><td>55.63.1</td><td></td><td></td></tr><tr><td>Llama 3.1-Custom</td><td></td><td></td><td>50.42.4</td><td>52.22.6</td><td></td><td></td><td>56.12.4</td><td></td><td></td><td></td><td>54.51.2</td><td></td><td></td></tr><tr><td>Mistral v0.3-Random</td><td>54.3</td><td>59.81.1</td><td>59.41.2</td><td>58.52.2</td><td>54.04.9</td><td>49.65.0</td><td>49.77.9</td><td></td><td></td><td></td><td>46.19.0</td><td></td><td></td></tr><tr><td>Mistral v0.3-Custom</td><td></td><td></td><td>60.00.9</td><td>56.92.2</td><td></td><td></td><td>45.66.6</td><td></td><td></td><td></td><td>47.72.8</td><td></td><td></td></tr></table>
|
| 511 |
+
|
| 512 |
+
Table 13: The average micro-F1 (%) results for Amazon Reviews when $M = 10$ (i.e., selection size) in iterative approaches, calculated over five different seeds for the sampling phase. The sub-columns denote $K$ (i.e., total support set size), and the subscripts indicate the standard deviation.
|
activefewshotlearningfortextclassification/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ef382c6a835511ab6182356355d588448c352cd309e8bcac6aea810d7225682
|
| 3 |
+
size 1503997
|
activefewshotlearningfortextclassification/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7b3d3994d05b2b7334b398ed16b282aeaf3e3d1fccbcdd3238f74cd13beb6be
|
| 3 |
+
size 578007
|
adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/ccbfbf24-6776-43af-9715-49834a214263_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1fb2fd40cb5d20240fcc6f578a12739dfb32c849575bb502239a5ce642919cd1
|
| 3 |
+
size 118764
|
adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/ccbfbf24-6776-43af-9715-49834a214263_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1346af542e7c2de4f54ef4d593d56331d13efaa9bbcb47e0480a7cab39ac440c
|
| 3 |
+
size 139486
|
adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/ccbfbf24-6776-43af-9715-49834a214263_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8dbc57abf64b9cd3f8aa242a13243940780b527955e705efd9e45617393c86aa
|
| 3 |
+
size 790142
|
adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/full.md
ADDED
|
@@ -0,0 +1,471 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ADACAD: Adaptively Decoding to Balance Conflicts between Contextual and Parametric Knowledge
|
| 2 |
+
|
| 3 |
+
Han Wang Archiki Prasad Elias Stengel-Eskin Mohit Bansal
|
| 4 |
+
|
| 5 |
+
UNC Chapel Hill
|
| 6 |
+
|
| 7 |
+
{hwang, archiki, esteng, mbansal}@cs.unc.edu
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Knowledge conflict arises from discrepancies between information in the context of a large language model (LLM) and the knowledge stored in its parameters. This can hurt performance when using standard decoding techniques, which tend to ignore the context. Existing test-time contrastive methods seek to address this by comparing the LLM's output distribution with and without the context and adjust the model according to the contrast between them. However, we find that these methods frequently misjudge the degree of conflict and struggle to handle instances that vary in their amount of conflict, with static methods over-adjusting when conflict is absent. We propose a fine-grained, instance-level approach called ADACAD, which dynamically infers the weight of adjustment based on the degree of conflict, as measured by the Jensen-Shannon divergence between distributions representing contextual and parametric knowledge. Across four LLMs, six question-answering (QA) and three summarization datasets, we demonstrate that ADACAD consistently outperforms other decoding baselines with average QA accuracy gains of $14.21\%$ (absolute) over a static contrastive baseline, and improves the factuality of summaries by 6.19 (AlignScore). Lastly, we show that while contrastive baselines hurt performance when conflict is absent, ADACAD mitigates these losses, making it more applicable to real-world datasets in which some examples have conflict and others do not.<sup>1</sup>
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Large language models (LLMs) encode vast amounts of information from pretraining in their parameters (Petroni et al., 2019; Roberts et al., 2020), giving them remarkable capabilities in knowledge-intensive NLP tasks. However, LLMs also hallucinate plausible but factually incorrect responses
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: In cases of high knowledge conflict, greedy decoding fails to attend to the context, resulting in incorrect answers. Contrastive decoding allows the context to be incorporated, but must be done with care: in low-conflict cases, excessive contrast can over-correct (e.g., by CAD with $\alpha = 1$ ), resulting in incorrect outputs. ADACAD dynamically adjusts the degree of contrast, allowing it to handle both high and low-conflict cases.
|
| 19 |
+
|
| 20 |
+
due to outdated knowledge (Lazaridou et al., 2021; Dhingra et al., 2022; Kasai et al., 2023), lesser-known facts (Mallen et al., 2023), and even misinformation in the pre-training corpus. A popular line of prior work aims to improve answers and reduce hallucination by augmenting LLMs' context with external knowledge, including knowledge from retrieved documents (Guu et al., 2020; Lewis et al., 2020), web search results (Nakano et al., 2022), and the outputs of tools (Schick et al., 2023). However, discrepancies between the added contextual knowledge and the model's pretrained parametric knowledge can cause knowledge conflict. In these cases, models often overlook the provided context and rely overly on the parametric knowledge (Longpre et al., 2021; Chen et al., 2022; Zhou et al., 2023; Wan et al., 2023). For example, in Fig. 1, the LLM's pretraining data (and thus its parametric knowledge) has a cutoff of September
|
| 21 |
+
|
| 22 |
+
2022, at which point France had hosted the Summer Olympics twice. This conflicts with the latest contextual knowledge (from July 2024) when France had hosted three times, and leads the model to answer incorrectly when using greedy decoding.
|
| 23 |
+
|
| 24 |
+
One promising direction for handling knowledge conflict uses inference-time decoding strategies that adjust the model's output probability distribution without the need for additional training. Shi et al. (2024) propose context-aware decoding (CAD) which seeks to correct the model's output based on the difference between output probability distributions with and without the context. However, in practice, we find that while CAD works well when there is a uniformly high degree of conflict between the parametric knowledge and external context, it struggles with scenarios in which different examples have varying degrees of knowledge conflict. Empirically, we observe that CAD can in fact degrade performance on low-conflict examples by overcorrecting the output distribution. For example, in Fig. 1, when the context is sourced from a document before July 2024, there is no conflict between the parametric knowledge and the contextual knowledge; both state that France has hosted the Olympics twice. Here, CAD overcorrechts the distribution, leading to an invalid answer.
|
| 25 |
+
|
| 26 |
+
In this work, we present a simple and effective dynamic decoding method, Adaptive Context Aware Decoding (ADACAD), aimed at automatically modeling the degree of conflict between the context and parametric knowledge and dynamically inferring the degree of adjustment needed for every token. We use the Jensen-Shannon divergence (JSD) between output distributions with and without the context to measure the degree of knowledge conflict, using the resulting value to reweight the combination of distributions. A higher JSD indicates a greater degree of conflict and signals the need for higher adjustment (more weight on the contextual knowledge) while a lower JSD reflects a smaller degree of conflict requiring a smaller adjustment (more weight on the parametric knowledge). As illustrated in Fig. 1, this leads to correct answers both for high and low-conflict examples by helping the model adaptively decide how to weigh contextual vs. parametric knowledge.
|
| 27 |
+
|
| 28 |
+
We demonstrate ADACAD's effectiveness on a diverse range of tasks, covering question-answering (QA) and summarization, with six QA datasets (Natural Question (NQ; Kwiatkowski et al., 2019), NQ-SWAP (Longpre et al., 2021),
|
| 29 |
+
|
| 30 |
+
TriviaQA (Joshi et al., 2017), PopQA (Mallen et al., 2023), HotpotQA (Yang et al., 2018), TabMWP (Lu et al., 2023)) and three summarization datasets (CNN-DM (See et al., 2017), XSum (Narayan et al., 2018), and TofuEval (Tang et al., 2024)). We test a range of base LLMs, examining Llama-2 (Touvron et al., 2023), Llama-3 (AI@Meta, 2024), and Mistral (Jiang et al., 2023). We consider different sizes of these models and also test both the base and instruction-tuned variants. Our results and analyses show that decoding with a uniform level of contrast benefits high-conflict scenarios but generally hurts performance, while the adaptive contrast of ADACAD results in improvements across the board. Overall, ADACAD generally achieves superior performance compared to the baselines, with an absolute gain of $14.21\%$ over CAD (a static baseline), $4.82\%$ over COIECD (Yuan et al., 2024, a baseline that classifies instances as conflicting or not), $5.86\%$ over ConfCD (Zhao et al., 2024, a method that makes dynamic token-level adjustments based on LLM confidence), and $2.41\%$ over greedy decoding when averaged across models and QA datasets. On summarization, ADACAD improves summary quality and factuality, with an average AlignScore (Zha et al., 2023) gain of 4.16 over greedy decoding, 2.19 over CAD, 10.44 over COIECD, and 7.96 over ConfCD.
|
| 31 |
+
|
| 32 |
+
Furthermore, in our analyses, we explore why ADACAD improves over the baselines. We first validate the hypothesis that ADACAD is able to balance contextual and parametric knowledge by assigning lower weights to lower-conflict instances, testing each method on datasets designed to have high and low conflict, finding that ADACAD's inferred weight is much lower when there is no conflict. We also compare the amount by which CAD and ADACAD adjust the base model's distribution on examples with and without conflict, finding that while ADACAD changes the distribution less when there is no conflict (i.e., when the base model's distribution is already sufficient), CAD adjusts by roughly the same amount whether there is conflict or not, explaining its lower QA performance. Additionally, for summarization tasks, ADACAD generates more faithful summaries whereas other methods tend to hallucinate details.
|
| 33 |
+
|
| 34 |
+
# 2 Related Work
|
| 35 |
+
|
| 36 |
+
Knowledge Conflict Integrating external knowledge as context into LLMs enables them to keep
|
| 37 |
+
|
| 38 |
+
abreast of current world knowledge (Kasai et al., 2023), reduce hallucination (Shuster et al., 2021), and improve factuality. However, a recent line of work focuses on discrepancies between external contextual knowledge and the model's parametric knowledge, such as LLMs' over-reliance on their parametric knowledge on entity-based QA tasks (Longpre et al., 2021), ignoring retrieved contexts (Tan et al., 2024), and exhibiting confirmation bias (Xie et al., 2024), etc. Zhou et al. (2023) demonstrate that LLMs' faithfulness to the context can be significantly improved using carefully designed prompting strategies – this is orthogonal to our work, which is compatible with different prompts. Zhang et al. (2023) address how to combine retrieved and parametric knowledge to improve open-domain QA, but require further training discriminators with silver labels, whereas our method is training-free.
|
| 39 |
+
|
| 40 |
+
Contrast in Text Generation Contrastive approaches for text generation have been widely studied and used to enhance response diversity in conversations (Li et al., 2016), steering model generations towards desired attributes while maintaining fluency and diversity (Liu et al., 2021), and contrasting between larger and relatively smaller language models to generate high-quality text (Li et al., 2023), and improve visually-grounded generation tasks (Wan et al., 2024). Context-aware decoding (CAD; Shi et al., 2024) leverages a contrastive output distribution that amplifies the differences between the output probabilities predicted by a model with and without the context, promoting greater attention to the input context for more faithful and reliable text generation. Unlike ADACAD, these past contrastive approaches do not adapt the weight on distributions to varying degrees of knowledge conflict. To address this, Yuan et al. (2024) introduce COIECD, a decoding-time method that categorizes instances into two discrete bins – high and low conflict – based on a complex information-entropy constraint governed by tuned hyperparameters, and employs different decoding strategies (by altering CAD) for each. Zhao et al. (2024) uses LLM confidence to adjust the output probabilities dynamically (denoted as ConfCD) as well as relies on additional noisy and irrelevant contexts. In contrast, ADACAD employs a single dynamic instance-level strategy that automatically models (based on Jensen-Shannon divergence) a continuous degree of conflict without imposing rigid cate
|
| 41 |
+
|
| 42 |
+
gories or requiring additional noisy and irrelevant contexts, accommodating more general knowledge conflict settings. In addition to these conceptual differences, in Section 4.2, we show that ADACAD outperforms CAD, COIECD, and ConfCD on QA and summarization.
|
| 43 |
+
|
| 44 |
+
# 3 Methodology
|
| 45 |
+
|
| 46 |
+
Task and Notation Given an input query $\pmb{x}$ with a relevant context $\pmb{c}$ , a language model parameterized by $\theta$ is tasked with generating a correct response $\pmb{y} = y_{1},\dots ,y_{n}$ of length $n$ that respects the context. At each decoding step $t$ , a token $y_{t}$ can be sampled autoregressively from a probability distribution conditioned on query $\pmb{x}$ and context $\pmb{c}$ as $y\sim p_{\theta}(y\mid c,\pmb {x},\pmb{y}_{< t})$ . However, when there is conflict between knowledge in the context $\pmb{c}$ and parametric knowledge encoded in LLM, the model can struggle to pay enough attention to $\pmb{c}$ and overly rely on the parametric knowledge (Longpre et al., 2021; Chen et al., 2022), i.e., sample from a distribution more akin to $p_{\theta}(y|\pmb {x},\pmb{y}_{< t})$ .
|
| 47 |
+
|
| 48 |
+
Background: Context-aware Decoding To mitigate knowledge conflicts, Shi et al. (2024) introduce Context-aware Decoding (CAD), which samples from a contrastive output distribution that amplifies the difference between output probabilities with and without context. CAD measures the parametric knowledge via $p_{\theta}(y|x,y_t)$ and prioritizes relevant contextual knowledge over the model's parametric knowledge by using the pointwise mutual information (PMI) between the context $c$ and the generation $y$ , conditioned on $x$ , $y_{<t}$ to modify the model's original output distribution.
|
| 49 |
+
|
| 50 |
+
$$
|
| 51 |
+
\begin{array}{l} y _ {t} \sim \tilde {p} _ {\theta} (y \mid \boldsymbol {c}, \boldsymbol {x}, \boldsymbol {y} _ {< t}) \\ \propto p _ {\theta} (y \mid \boldsymbol {c}, \boldsymbol {x}, \boldsymbol {y} _ {< t}) \left[ \frac {p _ {\theta} (\boldsymbol {y} \mid \boldsymbol {c} , \boldsymbol {x} , \boldsymbol {y} _ {< t})}{p _ {\theta} (\boldsymbol {y} \mid \boldsymbol {x} , \boldsymbol {y} _ {< t})} \right] ^ {\alpha} \tag {1} \\ \end{array}
|
| 52 |
+
$$
|
| 53 |
+
|
| 54 |
+
where the PMI term $\frac{p_{\theta}(y|c,\boldsymbol{x},\boldsymbol{y}_{< t})}{p_{\theta}(y|\boldsymbol{x},\boldsymbol{y}_{< t})}$ is a scaling factor used to adjust the parametric knowledge, and $\alpha$ governs the weight or degree of adjustment. A larger $\alpha$ means a greater adjustment and $\alpha = 0$ reduces to no adjustment, i.e., greedy decoding.
|
| 55 |
+
|
| 56 |
+
ADACAD: Handling Variable Conflict In test-time contrastive methods – such as those presented
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
Figure 2: Comparison of greedy decoding (Context+Question), CAD, and ADACAD on high-conflict and low-conflict examples. Greedy decoding struggles to incorporate contextual knowledge in high-conflict examples. CAD tends to overemphasize irrelevant tokens in the vocabulary, leading to incorrect answers in low-conflict examples. ADACAD uses dynamic adaptation to effectively balance between context and parametric knowledge.
|
| 60 |
+
|
| 61 |
+
by Li et al. (2023) and Shi et al. (2024) - $\alpha$ is a fixed hyperparameter set for an entire dataset, requiring tuning on a validation set. However, every instance in the dataset may need a different weight for adjustment; furthermore, in longer-form generation, individual timesteps may require different weights, making a single $\alpha$ value suboptimal. For instance, in the presence of a high degree of conflict, e.g., Fig. 2 (top), a larger $\alpha$ can perturb the LLM's output distribution to mitigate over-reliance on parametric knowledge, whereas in cases with low or no conflict (as in Fig. 2 (bottom)), the adjustment to the LLM's output distribution is minimal. Therefore, a fixed $\alpha$ may fail on scenarios where there are heterogeneous examples with and without conflict, i.e., on realistic datasets.
|
| 62 |
+
|
| 63 |
+
To address variable conflict, we introduce a different $\alpha_{t}$ for each timestep and each instance. Specifically, we automatically infer $\alpha_{t}$ dynamically based on the degree of knowledge conflict for each instance (and decoding step) without supervision, enabling automatic adaptation. To accomplish this, we use Jensen-Shannon divergence (JSD; Lin, 1991) to model the degree of conflict between the context and parametric knowledge. While similar to Kullback-Leibler divergence, JSD is symmetric and bounded within the range [0, 1], making it more suitable for modeling conflicts, as it provides a more interpretable and normalized measure of divergence (details in Appendix A). A larger JSD between $p_{\theta}(y \mid x, y_t)$ and $p_{\theta}(y \mid c, x, y_t)$ reflects a greater conflict between context and parameter knowledge, suggesting that we need a larger $\alpha$ to encourage the LM to rely more on the context,
|
| 64 |
+
|
| 65 |
+
while a smaller JSD reflects a smaller conflict, suggesting that a smaller $\alpha$ is required to maintain the LM's adherence to its parametric knowledge. Therefore, we set $\alpha_{t}^{\mathrm{JSD}}$ at each decoding step $t$ to:
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
\alpha_ {t} ^ {\mathrm {J S D}} = \operatorname {J S D} \left(p _ {\theta} \left(y _ {t} \mid \boldsymbol {x}, \boldsymbol {y} _ {< t}\right) \| p _ {\theta} \left(y _ {t} \mid \boldsymbol {c}, \boldsymbol {x}, \boldsymbol {y} _ {< t}\right)\right)
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
This enables both coarse-grained instance-level and fine-grained token-level adjustments. Finally, we sample outputs from the probability distribution:
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
y _ {t} \sim p _ {\theta} (y | \boldsymbol {c}, \boldsymbol {x}, \boldsymbol {y} _ {< t}) \left[ \frac {p _ {\theta} (y | \boldsymbol {c} , \boldsymbol {x} , \boldsymbol {y} _ {< t})}{p _ {\theta} (y | \boldsymbol {x} , \boldsymbol {y} _ {< t})} \right] ^ {\alpha_ {t} ^ {\mathrm {I S D}}}
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
This dynamic adaptation allows our approach to effectively balance between context and parametric knowledge, ensuring robust performance across varying degrees of conflict without the need for extensive manual tuning, thereby enhancing both flexibility and accuracy in diverse scenarios.
|
| 78 |
+
|
| 79 |
+
ADACAD for Long-form Generation In longform generation tasks, we find that initially, the JSD values tend to be low (cf. Fig. 5 in Appendix A.1). This may be due to the model's tendency to produce generic, low-information outputs at the start of each sequence. Therefore, the divergence between $p_{\theta}(y_t | \boldsymbol{x}, \boldsymbol{y}_{<t})$ and $p_{\theta}(y_t | \boldsymbol{c}, \boldsymbol{x}, \boldsymbol{y}_{<t})$ is minimal. To mitigate this issue and ensure more consistent performance throughout the generation process, we introduce a warmup operation: $\alpha_t^{\mathrm{JSD}} = \max(\alpha_t^{\mathrm{JSD}}, \lambda)$ , where $\lambda$ is a lower bound to adjust for the initially low JSD values, ensuring a more robust and stable starting point. We set $\lambda = 0.3$ for long-form generation tasks.
|
| 80 |
+
|
| 81 |
+
# 4 Experiments and Results
|
| 82 |
+
|
| 83 |
+
# 4.1 Experimental Setup
|
| 84 |
+
|
| 85 |
+
Datasets and Metrics We evaluate on several QA datasets: Natural Questions (NQ; Kwiatkowski et al., 2019), TriviaQA (Joshi et al., 2017), PopQA (Mallen et al., 2023), and HotpotQA (Yang et al., 2018). We use these datasets to simulate real scenarios with varying degrees of conflict for each instance. Additionally, we evaluate on an existing knowledge conflict dataset, NQ-SWAP (Longpre et al., 2021), which is based on the NQ dataset and consists of synthetic conflicting data. Lastly, we also test on a popular tabular question-answering dataset, TabMWP (Lu et al., 2023), that requires LLMs to use reasoning skills over tabular contexts. We report exact match accuracy on all QA datasets.
|
| 86 |
+
|
| 87 |
+
To test ADACAD on longer-form generation tasks, we evaluate on three standard summarization tasks: CNN-DM (See et al., 2017), XSum (Narayan et al., 2018), and TofuEval (Tang et al., 2024). While many documents from older datasets (such as CNN-DM and XSum) are present in LLM's pretraining data, TofuEval is a recent, more challenging benchmark on topic-focused dialogue summarization (especially for marginal or secondary topics in the document). We use two reference-based metrics, ROUGE-L (Lin, 2004) and BERT-P (Zhang et al., 2020), to evaluate summarization quality. As TofuEval does not support reference-based evaluation Tang et al. (2024), we use recommended AlignScore (Zha et al., 2023) to measure the factual consistency of summaries on both main (central to the document) and marginal (lesser explored) topics. For additional details and examples of all datasets, refer to Appendix B.
|
| 88 |
+
|
| 89 |
+
Source of Context We use the gold context provided by NQ, NQ-SWAP, TriviaQA, and HotpotQA as the relevant contexts. Since PopQA does not provide gold contexts, we employ BM25 (Robertson and Zaragoza, 2009), to retrieve relevant contexts from Wikipedia. For TabMWP, we take the semistructured table as the relevant context. In summarization tasks, the source document serves as the relevant context, while the instruction is used as the input query. A summary of input query $x$ and context $c$ for all datasets is shown in Table 10 with corresponding prompts in Appendix F.
|
| 90 |
+
|
| 91 |
+
Models We test ADACAD on different pretrained base language models, including Llama2 (13B) (Touvron et al., 2023), Llama3 (8B, 70B) (AI@Meta, 2024), and Mistral (7B) (Jiang et al., 2023); we measure ADACAD's effectiveness both on the base and instruction-tuned model variants.
|
| 92 |
+
|
| 93 |
+
Baselines We compare ADACAD to standard decoding, context-aware decoding (CAD; Shi et al., 2024) – which has a fixed $\alpha$ , COIECD (Yuan et al., 2024) – which classifies whether there is knowledge conflict using a method controlled by tuned thresholds and then operates in two different decoding modes, each with the same fixed $\alpha$ , and ConfCD (Zhao et al., 2024) – which dynamically sets alpha based on LLM confidence. Across all tasks and baselines, we use greedy decoding under a zero-shot setting. For CAD, we set $\alpha = 1$ for the QA datasets and $\alpha = 0.5$ for the summarization datasets, following prior work (Shi et al., 2024). For COIECD, the values of $\lambda$ and $\alpha$ are set to 0.25 and 1 for QA datasets, and 0.25 and 0.5 for the summarization datasets, respectively, following Yuan et al. (2024). For ConfCD, the $\alpha$ values are set to the maximum token probability with context $C_R = \max_{y' \in V} p_\theta(y'|c, x, y_{<t})$ if $C_R$ exceeds the maximum token probability without context (i.e. $C_R > C = \max_{y' \in V} p_\theta(y'|x, y_{<t})$ ); otherwise, it is given by $1 - C$ . In ADACAD, the $\alpha$ values are dynamically adjusted based on the degree of knowledge conflict for each instance.
|
| 94 |
+
|
| 95 |
+
# 4.2 Main Results
|
| 96 |
+
|
| 97 |
+
QA Tasks From Table 1, we observe that ADA-CAD consistently outperforms greedy decoding, CAD, COIECD, and ConfCD. For instance, on Llama3-70B, ADACAD achieves an average score improvement of $2.18\%$ (absolute) over greedy decoding, $12.91\%$ over CAD, $3.52\%$ over COIECD, and $2.44\%$ over ConfCD. Note that while CAD performs quite well on NQ-SWAP (containing only high-conflict examples), it often degrades performance (relative to greedy decoding) on other QA datasets, resulting in an $18.58\%$ accuracy drop on average across all models and tasks; in contrast, ADACAD performs well across datasets, whether they have conflict or not. Furthermore, ADACAD consistently outperforms COIECD across various
|
| 98 |
+
|
| 99 |
+
<table><tr><td>Model</td><td>Decoding</td><td>NQ</td><td>NQ-SWAP</td><td>TriviaQA</td><td>PopQA</td><td>HotpotQA</td><td>TabMWP</td><td>Avg</td></tr><tr><td rowspan="5">Llama2-13B</td><td>Greedy</td><td>44.26</td><td>54.89</td><td>85.50</td><td>76.65</td><td>38.27</td><td>38.30</td><td>56.31</td></tr><tr><td>CAD</td><td>37.91</td><td>80.35</td><td>71.40</td><td>76.83</td><td>31.92</td><td>19.30</td><td>52.95</td></tr><tr><td>COIECD</td><td>44.60</td><td>59.84</td><td>87.00</td><td>81.05</td><td>42.81</td><td>38.80</td><td>59.02</td></tr><tr><td>ConfCD</td><td>45.81</td><td>76.89</td><td>81.70</td><td>79.08</td><td>35.11</td><td>29.10</td><td>57.95</td></tr><tr><td>ADACAD</td><td>46.73</td><td>67.84</td><td>85.40</td><td>78.79</td><td>37.83</td><td>37.50</td><td>59.02</td></tr><tr><td rowspan="5">Llama3-8B</td><td>Greedy</td><td>44.63</td><td>47.81</td><td>85.70</td><td>80.51</td><td>51.42</td><td>52.20</td><td>60.38</td></tr><tr><td>CAD</td><td>35.96</td><td>77.94</td><td>40.20</td><td>74.27</td><td>39.53</td><td>26.60</td><td>49.08</td></tr><tr><td>COIECD</td><td>43.36</td><td>51.16</td><td>83.10</td><td>78.49</td><td>45.63</td><td>49.70</td><td>58.57</td></tr><tr><td>ConfCD</td><td>42.90</td><td>72.44</td><td>71.20</td><td>79.80</td><td>47.13</td><td>46.20</td><td>59.95</td></tr><tr><td>ADACAD</td><td>45.47</td><td>62.34</td><td>82.50</td><td>81.34</td><td>50.53</td><td>53.00</td><td>62.53</td></tr><tr><td rowspan="5">Llama3-70B</td><td>Greedy</td><td>44.13</td><td>55.74</td><td>90.20</td><td>86.10</td><td>56.11</td><td>66.70</td><td>66.50</td></tr><tr><td>CAD</td><td>34.05</td><td>81.32</td><td>54.60</td><td>75.16</td><td>40.86</td><td>48.60</td><td>55.77</td></tr><tr><td>COIECD</td><td>45.09</td><td>57.26</td><td>88.60</td><td>83.60</td><td>52.03</td><td>64.40</td><td>65.16</td></tr><tr><td>ConfCD</td><td>41.44</td><td>79.34</td><td>81.00</td><td>82.00</td><td>50.14</td><td>63.50</td><td>66.24</td></tr><tr><td>ADACAD</td><td>45.43</td><td>70.07</td><td>88.80</td><td>85.68</td><td>55.00</td><td>67.10</td><td>68.68</td></tr><tr><td rowspan="5">Mistral-7B</td><td>Greedy</td><td>42.56</td><td>56.86</td><td>80.40</td><td>67.56</td><td>40.89</td><td>38.90</td><td>57.65</td></tr><tr><td>CAD</td><td>20.98</td><td>66.89</td><td>24.20</td><td>48.54</td><td>18.49</td><td>20.10</td><td>35.82</td></tr><tr><td>COIECD</td><td>29.00</td><td>58.09</td><td>71.60</td><td>64.59</td><td>35.83</td><td>31.60</td><td>48.45</td></tr><tr><td>ConfCD</td><td>23.99</td><td>59.29</td><td>58.70</td><td>54.19</td><td>29.83</td><td>31.30</td><td>42.88</td></tr><tr><td>ADACAD</td><td>45.09</td><td>67.27</td><td>80.20</td><td>67.26</td><td>41.35</td><td>39.70</td><td>60.23</td></tr></table>
|
| 100 |
+
|
| 101 |
+
Table 1: Under a zero-shot setting, we show that on average (across tasks and models) ADACAD improves accuracy by $14.21\%$ over CAD, $4.82\%$ over COIECD, and $5.86\%$ over ConfCD (results with instruction-tuned models in Appendix C).
|
| 102 |
+
|
| 103 |
+
<table><tr><td rowspan="2">Decoding</td><td colspan="3">CNN-DM</td><td colspan="3">XSum</td><td colspan="2">TofuEval (AlignScore)</td></tr><tr><td>ROUGE-L</td><td>BERT-P</td><td>AlignScore</td><td>ROUGE-L</td><td>BERT-P</td><td>AlignScore</td><td>Overall</td><td>Main / Marginal</td></tr><tr><td>Greedy</td><td>24.93</td><td>95.41</td><td>91.44</td><td>14.36</td><td>94.05</td><td>85.28</td><td>76.66</td><td>81.64 / 61.19</td></tr><tr><td>CAD</td><td>24.76</td><td>94.45</td><td>91.01</td><td>14.59</td><td>93.65</td><td>84.34</td><td>83.93</td><td>87.26 / 73.58</td></tr><tr><td>COIECD</td><td>23.47</td><td>92.06</td><td>85.49</td><td>14.51</td><td>91.04</td><td>73.81</td><td>75.24</td><td>80.68 / 58.31</td></tr><tr><td>ConfCD</td><td>23.94</td><td>93.37</td><td>87.03</td><td>14.78</td><td>92.71</td><td>77.98</td><td>76.97</td><td>78.17 / 73.23</td></tr><tr><td>ADACAD</td><td>25.42</td><td>94.91</td><td>94.97</td><td>14.91</td><td>94.29</td><td>85.81</td><td>85.07</td><td>88.06 / 75.79</td></tr></table>
|
| 104 |
+
|
| 105 |
+
Table 2: Results on summarization datasets with Llama3-70B showing ADACAD yields the best performance on factuality metrics (AlignScore) and overall summarization quality (ROUGE-L, and BERT-P). The full results with other language models are shown in Table 8 of Appendix E.
|
| 106 |
+
|
| 107 |
+
QA datasets, highlighting the strength of our continuous JSD-based approach over COIECD's binary classification approach that splits instances into ones with conflict or without. For instance, ADACAD outperforms COIECD by a large average margin of $10.29\%$ on NQ-SWAP across all models. Additionally, on more complex datasets like TabMWP with newer LLMs, ADACAD also shows superior performance against all baselines, e.g., achieving average improvements of $6.30\%$ with Llama3-70B and $9.23\%$ with Mistral-7B. These results indicate that ADACAD is better able to combine the advantages of greedy decoding and CAD, performing well in scenarios without knowledge conflict (as greedy decoding does) as well as those with conflict (as CAD does).
|
| 108 |
+
|
| 109 |
+
Summarization Tasks In Table 2, we investigate how ADACAD can improve performance on
|
| 110 |
+
|
| 111 |
+
longer-form generation, showing results on three summarization tasks, CNN-DM, Xsum, and TofuEval. For TofuEval, ADACAD demonstrates substantial improvements, particularly excelling in marginal topics (i.e., topics not central to the document) where it outperforms greedy decoding, CAD, COIECD, and ConfCD by 14.60, 2.21, 17.48, and 2.56 points in terms of AlignScore – a measure of faithfulness – respectively. This highlights ADACAD's ability to handle diverse topics and maintain factual consistency, especially when prompted to focus on a marginal topic; qualitatively, we see in Fig. 4 that these improvements are driven by less hallucination on the part of ADACAD.
|
| 112 |
+
|
| 113 |
+
On CNN-DM, ADACAD achieves the highest ROUGE-L score of 25.42, surpassing greedy decoding, CAD, COIECD, ConfCD by 0.49, 0.66, 1.95, and 1.48, respectively. In terms of factual con
|
| 114 |
+
|
| 115 |
+
<table><tr><td>Decoding</td><td>NQ-SWAP</td><td>NQ-SYNTH</td><td>Overall</td></tr><tr><td>Greedy</td><td>51.60</td><td>88.20</td><td>69.90</td></tr><tr><td>CAD</td><td>79.60</td><td>64.00</td><td>71.80</td></tr><tr><td>COIECD</td><td>50.80</td><td>83.60</td><td>67.20</td></tr><tr><td>ADACAD</td><td>62.80</td><td>86.40</td><td>74.60</td></tr></table>
|
| 116 |
+
|
| 117 |
+
sistency, ADACAD also leads with an AlignScore of 94.97. On XSum, ADACAD also outperforms all baselines across all metrics. For instance, ADACAD achieves an average improvement of 1.43, and 5.46 points in BERT-P, and AlignScore, respectively. For BERT-P metric on CNN-DM, ADACAD outperforms all contrastive decoding baselines and is slightly lower than Greedy decoding; as mentioned in Section 4.1, this may be a result of a lack of conflict in these datasets, which are at least partly included in large pretraining corpora. These improvements indicate that ADACAD's dynamic adjustment mechanism is effective in long-form generation, allowing it to balance context and parametric knowledge.
|
| 118 |
+
|
| 119 |
+
# 5 Analysis
|
| 120 |
+
|
| 121 |
+
# 5.1 Performance comparison on instances with higher and lower degrees of conflict
|
| 122 |
+
|
| 123 |
+
Setup In Table 1, we find that CAD underperforms ADACAD, COIECD, as well as greedy decoding on most QA datasets, except NQ-SWAP, wherein every instance by design has a high degree of conflict (Longpre et al., 2021). We hypothesize that on more realistic datasets, the trailing performance of CAD stems from its inability to account for instances with low or minimal conflict. To test this hypothesis, we evaluate all methods on examples designed to have minimal conflict, i.e., where the model's internal representation aligns well with the context. Specifically, we generate a dataset of synthetic non-conflicting data called NQ-SYNTH: we sample 500 questions from Natural Questions and then prompt the Llama-3-70B to generate the answer for each question. We replace the gold answer entity in the context with the generated answer by regex, thus, making the context consistent with the LLM's internal knowledge. Finally, we evaluate Llama-3-70B on NQ-SYNTH and on NQ-SWAP. See Table 6 in Appendix B for examples of NQ-SWAP and NQ-SYNTH.
|
| 124 |
+
|
| 125 |
+
Table 3: Accuracy on conflicting data (NQ-SWAP) and non-conflicting data (NQ-SYNTH) with Llama3-70B.
|
| 126 |
+
|
| 127 |
+
<table><tr><td>Decoding</td><td>ρ (NQ-SWAP)</td><td>ρ (NQ-SYNTH)</td><td>|Δρ|</td></tr><tr><td>CAD</td><td>0.56</td><td>0.57</td><td>0.01</td></tr><tr><td>ADACAD</td><td>0.86</td><td>0.94</td><td>0.08</td></tr></table>
|
| 128 |
+
|
| 129 |
+
Table 4: Spearman rank-order correlation coefficient between original and adjusted output distributions for CAD and ADACAD on NQ-SWAP and NQ-SYNTH. The difference $|\Delta \rho|$ measures the sensitivity of a decoding method to the degree of conflict (higher is better).
|
| 130 |
+
|
| 131 |
+
Result: CAD hurts performance when conflict is low, while ADACAD can handle both cases. Consistent with our hypothesis, in Table 3, we observe that in the absence of conflict (on NQ-SYNTH), CAD substantially degrades performance by $\approx 24\%$ relative to greedy decoding, while ADACAD maintains a comparable performance. Although COIECD seeks to detect conflict and operates in two distinct decoding modes for high and low conflict, it also underperforms in non-conflict scenarios, falling $2.8\%$ behind ADACAD. However, in cases of high conflict (NQ-SWAP), where greedy decoding yields dramatically lower accuracy, ADACAD improves over greedy decoding by $11.2\%$ , while COIECD cannot handle high-conflict examples as well, lagging behind ADACAD by $12\%$ . To further investigate how ADACAD balances instances with lower and higher degrees of conflict, we compute $\alpha_{\mathrm{max}}^{\mathrm{JSD}}$ , which is the maximum $\alpha_{t}^{\mathrm{JSD}}$ value across tokens, for both datasets. Indeed, we find that $\alpha_{\mathrm{max}}^{\mathrm{JSD}}$ adapts to the amount of conflict, with an average value of 0.45 on NQ-SWAP with a higher level of conflict, and substantially lower value ( $\alpha_{\mathrm{max}}^{\mathrm{JSD}} = 0.28$ ) on NQ-SYNTH which does not contain any conflict by design.
|
| 132 |
+
|
| 133 |
+
# 5.2 PMI does not adequately address conflict
|
| 134 |
+
|
| 135 |
+
As described in Section 3, both CAD and ADA-CAD compute the PMI between the LLM's output distributions with and without external context $c$ . However, CAD relies solely on the PMI term to balance the level of conflict, whereas in ADA-CAD, we compute $(\mathrm{PMI})^{\alpha_t^{\mathrm{JSD}}}$ where both PMI and $\alpha_t^{\mathrm{JSD}}$ adapt with the degree of conflict. In cases of low conflict, the LLM's distributions should in principle be the same with and without context, rendering $\mathrm{PMI} \approx 1$ , i.e., resorting to greedy decoding for any value of $\alpha$ (cf. Eq. (1)). However, in practice, we find that, even with minimal conflict, the PMI term reranks the tokens in the head of the LLM's distribution, resulting in poor performance for CAD.
|
| 136 |
+
|
| 137 |
+
<table><tr><td>Datasets</td><td>CAD (tuned α)</td><td>ADACAD</td></tr><tr><td>NQ</td><td>44.35 (0.25)</td><td>45.47</td></tr><tr><td>TriviaQA</td><td>79.60 (0.25)</td><td>82.50</td></tr><tr><td>PopQA</td><td>78.19 (0.25)</td><td>81.34</td></tr><tr><td>HotpotQA</td><td>46.81 (0.50)</td><td>50.53</td></tr><tr><td>TabMWP</td><td>46.90 (0.50)</td><td>53.00</td></tr><tr><td>Average</td><td>59.17</td><td>62.57</td></tr></table>
|
| 138 |
+
|
| 139 |
+
Table 5: Performance of CAD with tuned $\alpha$ and ADA-CAD on QA datasets with Llama3-8B.
|
| 140 |
+
|
| 141 |
+
Setup To test how well the PMI term accounts for conflict, we measure the amount of reranking (among tokens) done by CAD and ADACAD relative to the greedy distribution. We compute the Spearman rank-order correlation coefficient $\rho$ between the greedy distribution and output distribution from CAD and ADACAD (with scaling factors PMI and $(\mathrm{PMI})^{\alpha_t^{\mathrm{JSD}}}$ respectively). We restrict the measurement to the top-20 tokens (averaged across decoding steps) on NQ-SWAP and NQ-SYNTH. Intuitively, a method sensitive to the degree of conflict should yield a lower rank correlation (more perturbation) when the amount of conflict is high (on NQ-SWAP), and higher rank correlation (less perturbation) in cases of low conflict (on NQ-SYNTH). To this end, we compute the absolute difference or sensitivity, $|\Delta \rho|$ between the two $\rho$ values of NQ-SWAP and NQ-SYNTH. A larger $|\Delta \rho|$ indicates that the method is more effective at distinguishing between conflicting and non-conflicting data, i.e., more sensitive to the degree of conflict in instances.
|
| 142 |
+
|
| 143 |
+
Result: PMI over-perturbs greedy distribution in low conflict setting; ADACAD is adaptive. Results in Table 4 demonstrate that CAD, which only relies on the PMI term to offset conflicts, perturbs the greedy distribution to roughly the same extent $(\rho)$ in the presence or absence of conflict, i.e., on NQ-SWAP and NQ-SYNTH, respectively. This minimal difference in $|\Delta \rho|$ suggests that CAD is agnostic to the amount of conflict, leading to over-correction for non-conflicting examples. On the other hand, the correlation coefficient of ADACAD is higher on NQ-SYNTH than on NQ-SWAP (0.94 vs. 0.86), indicating more perturbation to the greedy distribution in the presence of conflict. Additionally, the sensitivity to conflict $(|\Delta \rho|)$ of ADACAD is substantially larger $(8 \times)$ than that of CAD, highlighting ADACAD's superior ability to distinguish between conflicting and non-conflicting
|
| 144 |
+
|
| 145 |
+
examples. Note that ADACAD has a higher $\rho$ in both settings, indicating that overall, it perturbs the LLM's distribution to a lesser extent.
|
| 146 |
+
|
| 147 |
+
# 5.3 Tuning $\alpha$ of CAD for each dataset
|
| 148 |
+
|
| 149 |
+
Since ADACAD does not require validation data to tune the value of $\alpha$ , we set CAD's $\alpha = 1$ (tuned on NQ-SWAP) for QA datasets following Shi et al. (2024), which may explain the strong performance of CAD on NQ-SWAP and low performance on other datasets. To further underscore the advantages of ADACAD over CAD, we compare ADACAD (untuned) to a CAD baseline with a tuned $\alpha$ value. Specifically, we tune CAD's $\alpha$ using a validation set of 500 instances (randomly sampled from the train set) for each dataset.
|
| 150 |
+
|
| 151 |
+
Table 5 shows that ADACAD achieves an average improvement of $3.4\%$ (absolute) over CAD even when $\alpha$ is tuned. We hypothesize that ADACAD's superior performance stems from varying the level of adjustment adaptively depending on the underlying instance, whereas a tuned- $\alpha$ CAD still uses the same $\alpha$ uniformly for all instances and does not adjust according to varying degrees of conflict among instances. Moreover, while tuning CAD's $\alpha$ for each dataset might improve performance in a controlled setting, such tuning does not scale well to real-world scenarios wherein models encounter a mix of user queries – some with high conflict and others with low or no conflict – and this categorization is not known a priori.
|
| 152 |
+
|
| 153 |
+
# 5.4 Qualitative Examples
|
| 154 |
+
|
| 155 |
+
QA Tasks We highlight the importance of adaptively capturing the degree of conflict between the context and parametric knowledge, we show a qualitative example from the NQ dataset in Fig. 3. ADA-CAD outperforms other methods by correctly generating the "CN Tower's 2,579 steps". We also observe that CAD tends to produce unrelated outputs due to over-correction, which over-amplifies the influence of irrelevant tokens within the vocabulary. COIECD fails to detect the conflict and generates the same incorrect answer as greedy decoding. We find that ADA-CAD strikes the best balance between providing contrast in scenarios with high knowledge conflict while not suffering from over-correction on low-conflict instances.
|
| 156 |
+
|
| 157 |
+
Summarization We also show a qualitative example from TofuEval in Fig. 4. Given a meeting transcript centered on "report on air quality im
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
Figure 3: Qualitative example from NQ. Informative text is highlighted in blue, while text about unrelated facts and wrong answers is highlighted in red. ADACAD produces the correct answer, while CAD generates unrelated outputs and COIECD fails to detect the conflict and generates the same incorrect answer as greedy decoding.
|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
Figure 4: TofuEval: Text unsupported by the transcripts is highlighted in red, while consistent and relevant text is highlighted in blue. ADACAD generates a faithful summary whereas other methods tend to hallucinate details.
|
| 164 |
+
|
| 165 |
+
pacts from Paramount's metal processing facilities on Long Beach residents", we prompt the LLMs to generate a summary focused on the marginal topic "Fiscal Year 2016". Baselines like greedy decoding, CAD, and COIECD tend to hallucinate details, such as fabricated financial data or names of individuals not mentioned in the transcript, which are highlighted in red. In contrast, ADACAD generates a more accurate and faithful summary without introducing unverified information.
|
| 166 |
+
|
| 167 |
+
# 6 Discussion and Conclusion
|
| 168 |
+
|
| 169 |
+
In naturalistic scenarios with mixed datasets containing examples with and without knowledge conflicts, existing decoding methods, including CAD, fail to adapt to changing amounts of conflict and in fact can lead to reduced performance. Although larger and more performant models can store more information in their parametric knowledge – thus leading to less and less conflict as models improve – there will still always be gaps between the model and the actual state of the world (e.g., because of time cutoffs). This means that models will encounter both low- and high-conflict scenarios, no matter their strength.
|
| 170 |
+
|
| 171 |
+
To this end, we introduce ADACAD, a simple yet effective dynamic decoding method that uses Jensen-Shannon divergence to dynamically model the degree of conflict for a given example (and timestep) and automatically balance the con
|
| 172 |
+
|
| 173 |
+
trast between contextual and parametric knowledge. On diverse QA datasets, we show that ADACAD combines the best of greedy decoding and context-aware decoding, improving performance. Additionally, experiments on summarization demonstrate that ADACAD enhances both the quality and factuality of generated text, while other methods tend to hallucinate details. Lastly, ADACAD consistently outperforms COIECD, another hybrid decoding strategies that detects conflict. Our analysis reveals that ADACAD mitigates the overcorrection seen in CAD by dynamically adjusting the weight of contextual knowledge based on the degree of conflict.
|
| 174 |
+
|
| 175 |
+
# Limitations
|
| 176 |
+
|
| 177 |
+
Since our proposed method ADACAD is based on CAD, it requires access to output logits from LLMs to calculate the difference between output probabilities with and without context. However, API-based LLMs like GPT-4 often do not provide output logits, making it challenging to directly apply logit-based methods like ADACAD and CAD to fully black-box models. Additionally, our experiments focus on English datasets and pre-trained models; as LLMs become available for other languages, future research will be needed to explore the interactions between language and knowledge conflict. We do not foresee any particular risks associated with the application of our method.
|
| 178 |
+
|
| 179 |
+
# Acknowledgements
|
| 180 |
+
|
| 181 |
+
We would like to thank David Wan for feedback on our summarization experiments and the anonymous reviewers for their feedback. This work was supported by DARPA ECOLE Program No. HR00112390060, NSF-AI Engage Institute DRL2112635, DARPA Machine Common sense (MCS) Grant N66001-19-2-4031, and NSF-CAREER Award 1846185. The views contained in this article are those of the authors and not of the funding agency.
|
| 182 |
+
|
| 183 |
+
# References
|
| 184 |
+
|
| 185 |
+
AI@Meta. 2024. Llama 3 model card.
|
| 186 |
+
|
| 187 |
+
Hung-Ting Chen, Michael Zhang, and Eunsol Choi. 2022. Rich knowledge sources bring complex knowledge conflicts: Recalibrating models to reflect conflicting evidence. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 2292-2307, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.
|
| 188 |
+
Bhuwan Dhingra, Jeremy R. Cole, Julian Martin Eisenschlos, Daniel Gillick, Jacob Eisenstein, and William W. Cohen. 2022. Time-aware language models as temporal knowledge bases. Transactions of the Association for Computational Linguistics, 10:257-273.
|
| 189 |
+
Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. 2020. The pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027.
|
| 190 |
+
Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In Proceedings of the 37th International Conference on Machine Learning.
|
| 191 |
+
Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. The curious case of neural text degeneration. In International Conference on Learning Representations.
|
| 192 |
+
Yebowen Hu, Timothy Ganter, Hanieh Deilamsalehy, Franck Dernoncourt, Hassan Foroosh, and Fei Liu. 2023. MeetingBank: A benchmark dataset for meeting summarization. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 16409-16423, Toronto, Canada. Association for Computational Linguistics.
|
| 193 |
+
Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud,
|
| 194 |
+
|
| 195 |
+
Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. 2023. Mistral 7b. arXiv preprint arXiv:2310.06825.
|
| 196 |
+
Mandar Joshi, Eunsol Choi, Daniel Weld, and Luke Zettlemoyer. 2017. TriviaQA: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1601-1611, Vancouver, Canada. Association for Computational Linguistics.
|
| 197 |
+
Jungo Kasai, Keisuke Sakaguchi, yoichi takahashi, Ronan Le Bras, Akari Asai, Xinyan Velocity Yu, Dragomir Radev, Noah A. Smith, Yejin Choi, and Kentaro Inui. 2023. Realtime QA: What's the answer right now? In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track.
|
| 198 |
+
Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, Kristina Toutanova, Llion Jones, Matthew Kelcey, Ming-Wei Chang, Andrew M. Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. 2019. Natural questions: A benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:452-466.
|
| 199 |
+
Angeliki Lazaridou, Adhiguna Kuncoro, Elena Gribovskaya, Devang Agrawal, Adam Liska, Tayfun Terzi, Mai Gimenez, Cyprien de Masson d'Autume, Tomáš Kočisky, Sebastian Ruder, Dani Yogatama, Kris Cao, Susannah Young, and Phil Blunsom. 2021. Mind the gap: Assessing temporal generalization in neural language models. In Advances in Neural Information Processing Systems.
|
| 200 |
+
Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, and Douwe Kiela. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. In Advances in Neural Information Processing Systems.
|
| 201 |
+
Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016. A diversity-promoting objective function for neural conversation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 110-119, San Diego, California. Association for Computational Linguistics.
|
| 202 |
+
Xiang Lisa Li, Ari Holtzman, Daniel Fried, Percy Liang, Jason Eisner, Tatsunori Hashimoto, Luke Zettlemoyer, and Mike Lewis. 2023. Contrastive decoding: Open-ended text generation as optimization. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 12286-12312, Toronto, Canada. Association for Computational Linguistics.
|
| 203 |
+
|
| 204 |
+
Chin-Yew Lin. 2004. ROUGE: A package for automatic evaluation of summaries. In Text Summarization Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.
|
| 205 |
+
Jianhua Lin. 1991. Divergence measures based on the shannon entropy. IEEE Transactions on Information theory, 37(1):145-151.
|
| 206 |
+
Alisa Liu, Maarten Sap, Ximing Lu, Swabha Swayamdipta, Chandra Bhagavatula, Noah A. Smith, and Yejin Choi. 2021. DExperts: Decoding-time controlled text generation with experts and anti-experts. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 6691-6706, Online. Association for Computational Linguistics.
|
| 207 |
+
Shayne Longpre, Kartik Perisetla, Anthony Chen, Nikhil Ramesh, Chris DuBois, and Sameer Singh. 2021. Entity-based knowledge conflicts in question answering. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7052-7063, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 208 |
+
Pan Lu, Liang Qiu, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, Tanmay Rajpurohit, Peter Clark, and Ashwin Kalyan. 2023. Dynamic prompt learning via policy gradient for semi-structured mathematical reasoning. In International Conference on Learning Representations (ICLR).
|
| 209 |
+
Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Daniel Khashabi, and Hannaneh Hajishirzi. 2023. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9802-9822, Toronto, Canada. Association for Computational Linguistics.
|
| 210 |
+
Reiichiro Nakano, Jacob Hilton, Suchir Balaji, Jeff Wu, Long Ouyang, Christina Kim, Christopher Hesse, Shantanu Jain, Vineet Kosaraju, William Saunders, Xu Jiang, Karl Cobbe, Tyna Eloundou, Gretchen Krueger, Kevin Button, Matthew Knight, Benjamin Chess, and John Schulman. 2022. Webgpt: Browser-assisted question-answering with human feedback. arXiv preprint arXiv:2112.09332.
|
| 211 |
+
Shashi Narayan, Shay B. Cohen, and Mirella Lapata. 2018. Don't give me the details, just the summary! topic-aware convolutional neural networks for extreme summarization. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1797-1807, Brussels, Belgium. Association for Computational Linguistics.
|
| 212 |
+
Fabio Petroni, Tim Rocttäschel, Sebastian Riedel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, and
|
| 213 |
+
|
| 214 |
+
Alexander Miller. 2019. Language models as knowledge bases? In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2463-2473, Hong Kong, China. Association for Computational Linguistics.
|
| 215 |
+
Adam Roberts, Colin Raffel, and Noam Shazeer. 2020. How much knowledge can you pack into the parameters of a language model? In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5418-5426, Online. Association for Computational Linguistics.
|
| 216 |
+
Stephen E. Robertson and Hugo Zaragoza. 2009. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends in Information Retrieval.
|
| 217 |
+
Timo Schick, Jane Dwivedi-Yu, Roberto Dessi, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettle-moyer, Nicola Cancedda, and Thomas Scialom. 2023. Toolformer: Language models can teach themselves to use tools. In Thirty-seventh Conference on Neural Information Processing Systems.
|
| 218 |
+
Abigail See, Peter J. Liu, and Christopher D. Manning, 2017. Get to the point: Summarization with pointer-generator networks. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1073-1083, Vancouver, Canada. Association for Computational Linguistics.
|
| 219 |
+
Weijia Shi, Xiaochuang Han, Mike Lewis, Yulia Tsvetkov, Luke Zettlemoyer, and Wen-tau Yih. 2024. Trusting your evidence: Hallucinate less with context-aware decoding. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pages 783-791, Mexico City, Mexico. Association for Computational Linguistics.
|
| 220 |
+
Kurt Shuster, Spencer Poff, Moya Chen, Douwe Kiela, and Jason Weston. 2021. Retrieval augmentation reduces hallucination in conversation. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, pages 3784-3803, Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 221 |
+
Hexiang Tan, Fei Sun, Wanli Yang, Yuanzhuo Wang, Qi Cao, and Xueqi Cheng. 2024. Blinded by generated contexts: How language models merge generated and retrieved contexts when knowledge conflicts? arXiv preprint arXiv:2401.11911.
|
| 222 |
+
Liyan Tang, Igor Shalyminov, Amy Wong, Jon Burnsky, Jake Vincent, Yu'an Yang, Siffi Singh, Song Feng, Hwanjun Song, Hang Su, Lijia Sun, Yi Zhang, Saab Mansour, and Kathleen McKeown. 2024. TofuEval: Evaluating hallucinations of LLMs on topic-focused dialogue summarization. In Proceedings of the 2024
|
| 223 |
+
|
| 224 |
+
Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 4455-4480, Mexico City, Mexico. Association for Computational Linguistics.
|
| 225 |
+
|
| 226 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinez, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. 2023. Llama 2: Open foundation and finetuned chat models. arXiv preprint arXiv:2307.09288.
|
| 227 |
+
|
| 228 |
+
David Wan, Jaemin Cho, Elias Stengel-Eskin, and Mohit Bansal. 2024. Contrastive region guidance: Improving grounding in vision-language models without training. arXiv preprint arXiv:2403.02325.
|
| 229 |
+
|
| 230 |
+
David Wan, Shiyue Zhang, and Mohit Bansal. 2023. HistAlign: Improving context dependency in language generation by aligning with history. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 2941-2960, Singapore. Association for Computational Linguistics.
|
| 231 |
+
|
| 232 |
+
Jian Xie, Kai Zhang, Jiangjie Chen, Renze Lou, and Yu Su. 2024. Adaptive chameleon or stubborn sloth: Revealing the behavior of large language models in knowledge conflicts. In The Twelfth International Conference on Learning Representations.
|
| 233 |
+
|
| 234 |
+
Zhilin Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, William Cohen, Ruslan Salakhutdinov, and Christopher D. Manning. 2018. HotpotQA: A dataset for diverse, explainable multi-hop question answering. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2369-2380, Brussels, Belgium. Association for Computational Linguistics.
|
| 235 |
+
|
| 236 |
+
Xiaowei Yuan, Zhao Yang, Yequan Wang, Shengping Liu, Jun Zhao, and Kang Liu. 2024. Discerning and resolving knowledge conflicts through adaptive decoding with contextual information-entropy constraint. In Findings of the Association for Computational Linguistics ACL 2024, pages 3903-3922, Bangkok, Thailand and virtual meeting. Association for Computational Linguistics.
|
| 237 |
+
|
| 238 |
+
Yuheng Zha, Yichi Yang, Ruichen Li, and Zhiting Hu. 2023. AlignScore: Evaluating factual consistency with a unified alignment function. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 11328-11348, Toronto, Canada. Association for Computational Linguistics.
|
| 239 |
+
|
| 240 |
+
Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. Bertscore: Evaluating text generation with bert. In International Conference on Learning Representations.
|
| 241 |
+
|
| 242 |
+
Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Moontae Lee, Honglak Lee, and Lu Wang. 2023. Merging generated and retrieved knowledge for open-domain QA. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 4710-4728, Singapore. Association for Computational Linguistics.
|
| 243 |
+
|
| 244 |
+
Zheng Zhao, Emilio Monti, Jens Lehmann, and Haytham Assem. 2024. Enhancing contextual understanding in large language models through contrastive decoding. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 4225-4237. Association for Computational Linguistics.
|
| 245 |
+
|
| 246 |
+
Wenxuan Zhou, Sheng Zhang, Hoifung Poon, and Muhao Chen. 2023. Context-faithful prompting for large language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 14544–14556, Singapore. Association for Computational Linguistics.
|
| 247 |
+
|
| 248 |
+
Chenguang Zhu, Yang Liu, Jie Mei, and Michael Zeng. 2021. MediaSum: A large-scale media interview dataset for dialogue summarization. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5927-5934, Online. Association for Computational Linguistics.
|
| 249 |
+
|
| 250 |
+
# A Jensen-Shannon Divergence
|
| 251 |
+
|
| 252 |
+
Jensen-Shannon divergence (JSD) is a symmetric measure of the similarity between two probability distributions, defined as the average of the Kullback-Leibler divergences from their mean distribution. JSD between two probability distribution $P$ and $Q$ is defined as:
|
| 253 |
+
|
| 254 |
+
$$
|
| 255 |
+
\operatorname {J S D} (P \parallel Q) = \frac {1}{2} \left(\operatorname {K L} (P \parallel M) + \operatorname {K L} (Q \parallel M)\right)
|
| 256 |
+
$$
|
| 257 |
+
|
| 258 |
+
where $M = \frac{1}{2} (P + Q)$ is a mixture distribution of $P$ and $Q$ and:
|
| 259 |
+
|
| 260 |
+
$$
|
| 261 |
+
\operatorname {K L} (P \parallel M) = \sum_ {x} P (x) \log \frac {P (x)}{M (x)}
|
| 262 |
+
$$
|
| 263 |
+
|
| 264 |
+
$$
|
| 265 |
+
\operatorname {K L} (Q \parallel M) = \sum_ {x} Q (x) \log {\frac {Q (x)}{M (x)}}
|
| 266 |
+
$$
|
| 267 |
+
|
| 268 |
+

|
| 269 |
+
Figure 5: Plot of JSD values of the first 5 decoding steps using Llama3-70B on TofuEval. The JSD values tend to have lower values and variance at the start of decoding.
|
| 270 |
+
|
| 271 |
+
# A.1 JSD Value Trend for Summarization
|
| 272 |
+
|
| 273 |
+
Fig. 5 illustrates the trend of JSD values over the initial decoding steps when using LLama3-70B on the TofuEval dataset. We observe that JSD values start relatively low and exhibit less variation or sensitivity in the early steps of decoding. This may be due to the model's tendency to produce generic, low-information outputs at the start of each sequence. As the decoding progresses, the JSD values increase and become more sensitive, indicating the dynamic adjustment in ADACAD works well.
|
| 274 |
+
|
| 275 |
+
# B Dataset Details
|
| 276 |
+
|
| 277 |
+
We use six question answering datasets and three summarization datasets for evaluation. We also present one example from each dataset, as detailed in Table 10. For the synthetically generated QA datasets NQ-SWAP and NQ-SYNTH, we provide examples in Table 6.
|
| 278 |
+
|
| 279 |
+
# B.1 QA Datasets
|
| 280 |
+
|
| 281 |
+
Some QA datasets, such as NQ, TriviaQA, and HotpotQA, do not have public test sets. For these we report performance of baselines and ADACAD on the dev set. Further, following Shi et al. (2024), to expedite inference, we sub-sample datasets where the test sets are very large ( $>8\mathrm{K}$ instances).
|
| 282 |
+
|
| 283 |
+
- Natural Question (NQ; Kwiatkowski et al., 2019) is a large-scale QA dataset consisting of real user questions issued to Google search, with answers found from Wikipedia. We test on 3231 instances from the NQ validation set, which originally contained 7.83K examples.
|
| 284 |
+
|
| 285 |
+
These instances were selected because they have short answers, making them suitable for evaluating all baselines and our method.
|
| 286 |
+
|
| 287 |
+
- NQ-SWAP (Longpre et al., 2021) introduces synthetic conflicts by swapping entities in the context to challenge the model's ability to manage conflicting information. Specifically, Longpre et al. (2021) first identify instances with named entity answers, then substitute mentions of the entity in the gold document with an alternate entity. NQ-SWAP consists of 4K instances derived from the NQ dataset.
|
| 288 |
+
- TriviaQA (Joshi et al., 2017) is a realistic QA dataset that includes a wide variety of trivia questions, requiring models to deal with large amounts of text from various sources and handle inference over multiple sentences. We randomly sample 1K instances from the TriviaQA Wiki validation set, which contains a total of 8K examples.
|
| 289 |
+
- PopQA (Mallen et al., 2023) is a dataset designed to test models' performance on questions about long-tail entities. We choose 1.6K instances from the PopQA test set for which we are successfully able to retireve contexts containing the gold answer (c.f. Section 4.1).
|
| 290 |
+
- HotpotQA (Yang et al., 2018) is a QA dataset that requires multi-hop reasoning, where the model needs to find and combine information from multiple sources to answer complex questions. We use the entire development set of HotpotQA, consisting of 7.4K instances.
|
| 291 |
+
- TabMWP (Lu et al., 2023) is a dataset focused on open-domain grade-level problems that require mathematical reasoning on both textual and tabular data. We use an official "lite" subset of TabMWP called "test1k" which contains 1K instances.
|
| 292 |
+
|
| 293 |
+
# B.2 Summarization Datasets
|
| 294 |
+
|
| 295 |
+
- CNN-DM (See et al., 2017) is a widely used dataset for training and evaluating models on abstractive summarization tasks, involving news articles and their summaries. We randomly sample 500 examples from the original 11.5k test set.
|
| 296 |
+
- XSum (Narayan et al., 2018) is an abstractive summarization dataset known for its highly
|
| 297 |
+
|
| 298 |
+
# Question:
|
| 299 |
+
|
| 300 |
+
How many episodes are in Chicago Fire season 4?
|
| 301 |
+
|
| 302 |
+
# NATURAL QUESTION
|
| 303 |
+
|
| 304 |
+
# Original Context:
|
| 305 |
+
|
| 306 |
+
The fourth season of Chicago Fire contained 23 episodes.
|
| 307 |
+
|
| 308 |
+
It is an American drama television series with ...
|
| 309 |
+
|
| 310 |
+
Original Answer: 23
|
| 311 |
+
|
| 312 |
+
# NQ-SWAP
|
| 313 |
+
|
| 314 |
+
# Substitute Context:
|
| 315 |
+
|
| 316 |
+
The fourth season of Chicago Fire contained 10 episodes.
|
| 317 |
+
|
| 318 |
+
It is an American drama television series with ...
|
| 319 |
+
|
| 320 |
+
Substitute Answer: 10
|
| 321 |
+
|
| 322 |
+
# NQ-SYNTH
|
| 323 |
+
|
| 324 |
+
# Substitute Context:
|
| 325 |
+
|
| 326 |
+
The fourth season of Chicago Fire contained 22 episodes.
|
| 327 |
+
|
| 328 |
+
It is an American drama television series with ...
|
| 329 |
+
|
| 330 |
+
Substitute Answer (generated from LLM): 22
|
| 331 |
+
|
| 332 |
+
Table 6: Example from NQ-SWAP and NQ-SYNTH. A substitute example for NQ-SWAP is made from the original example by replacing the original answer, 23, with a similar but conflicting answer, i.e., 10. A substitute example for NQ-SYNTH is made from the original example by replacing the original answer, 23, with one generated by Llama3-70B without context, i.e., 22.
|
| 333 |
+
|
| 334 |
+
challenging nature, where the goal is to generate concise, one-sentence summaries from longer documents. We used 500 instances from the XSum dataset's 11.3K test set.
|
| 335 |
+
|
| 336 |
+
- TofuEval (Tang et al., 2024) is a benchmark for evaluating the factual consistency and topic relevance of summaries, especially in scenarios involving dialogue or meeting transcriptions. This benchmark draws 50 test set documents from each of two datasets: MediaSum (Zhu et al., 2021) and MeetingBank (Hu et al., 2023). For each document, three topics were generated, resulting in a total of 300 topic-focused summaries. Approximately $75\%$ of the total are main topics that refer to the central information in a document that is under discussion or is presented in the document, and the rest are marginal topics that refer to information in a document that is not the main focus of the document but is still part of the context.
|
| 337 |
+
|
| 338 |
+
# B.3 Licenses
|
| 339 |
+
|
| 340 |
+
Datasets are released under the following licenses:
|
| 341 |
+
|
| 342 |
+
- Natural Questions: Apache-2.0 license
|
| 343 |
+
- NQ-Swap: MIT license
|
| 344 |
+
|
| 345 |
+
- TriviaQA: Apache-2.0 license
|
| 346 |
+
- PopQA: MIT license
|
| 347 |
+
HotPotQA: Apache-2.0 license
|
| 348 |
+
TabMWP: MIT license
|
| 349 |
+
CNN-DM: Apache-2.0 license
|
| 350 |
+
- XSum: MIT license
|
| 351 |
+
- TofuEval: MIT license
|
| 352 |
+
|
| 353 |
+
The models we use have the following licenses:
|
| 354 |
+
|
| 355 |
+
- Llama 2: custom license https://ai.meta.com/llama/legal/
|
| 356 |
+
- Llama 3: custom license https://www.llama.com/llama3/licenses/
|
| 357 |
+
- Mistral: Apache-2.0 license
|
| 358 |
+
|
| 359 |
+
# C Instruction-tuned LLMs Experiments
|
| 360 |
+
|
| 361 |
+
We compare ADACAD against the baselines on all datasets using instruction-tuned language models and show the results in Table 7. We find that ADACAD achieves comparable or better performance than all baselines when applied to instruction-tuned models.
|
| 362 |
+
|
| 363 |
+
# D Results of Different Decoding Methods on CNN-DM
|
| 364 |
+
|
| 365 |
+
Table 9 shows the results of different base decoding methods on CNN-DM with Llama-70B. Here, we see that greedy decoding performs better than Top $p$ sampling (Holtzman et al., 2020), motivating our use of greedy decoding in Table 2.
|
| 366 |
+
|
| 367 |
+
<table><tr><td>Decoding</td><td>ROUGE-L</td><td>BERT-P</td></tr><tr><td>Top-p Sampling</td><td>17.48</td><td>86.79</td></tr><tr><td>Greedy Decoding</td><td>23.47</td><td>92.06</td></tr></table>
|
| 368 |
+
|
| 369 |
+
Table 9: Comparison of greedy decoding and top- $p$ sampling $\left( {p = {0.9}}\right)$ with Llama3-70B on CNN-DM.
|
| 370 |
+
|
| 371 |
+
# E Full Results with Different Base LMs on Summarization Tasks
|
| 372 |
+
|
| 373 |
+
Table 8 shows the full results with all base language models on three summarization tasks: CNN-DM, XSum, and TofuEval. ADACAD achieves comparable or better performance than all baselines across different LLMs.
|
| 374 |
+
|
| 375 |
+
<table><tr><td>Model</td><td>Decoding</td><td>NQ</td><td>NQ-SWAP</td><td>TriviaQA</td><td>PopQA</td><td>HotpotQA</td><td>TabMWP</td><td>Avg</td></tr><tr><td rowspan="3">Llama2-13B-Chat</td><td>Greedy</td><td>35.75</td><td>50.24</td><td>54.40</td><td>72.61</td><td>32.15</td><td>50.40</td><td>49.26</td></tr><tr><td>CAD</td><td>39.49</td><td>71.24</td><td>59.40</td><td>68.81</td><td>30.14</td><td>48.70</td><td>52.96</td></tr><tr><td>ADACAD</td><td>37.08</td><td>57.69</td><td>61.20</td><td>72.31</td><td>32.34</td><td>52.10</td><td>52.12</td></tr><tr><td rowspan="3">Llama3-8B-Inst</td><td>Greedy</td><td>40.27</td><td>60.89</td><td>64.00</td><td>70.89</td><td>39.66</td><td>68.50</td><td>57.37</td></tr><tr><td>CAD</td><td>39.43</td><td>71.19</td><td>52.30</td><td>70.35</td><td>37.27</td><td>63.10</td><td>55.61</td></tr><tr><td>ADACAD</td><td>39.65</td><td>67.37</td><td>61.50</td><td>70.41</td><td>39.43</td><td>66.10</td><td>57.41</td></tr><tr><td rowspan="3">Llama3-70B-Inst</td><td>Greedy</td><td>40.82</td><td>59.16</td><td>64.10</td><td>64.41</td><td>47.70</td><td>70.40</td><td>57.77</td></tr><tr><td>CAD</td><td>42.31</td><td>66.37</td><td>58.40</td><td>64.23</td><td>47.21</td><td>69.30</td><td>57.97</td></tr><tr><td>ADACAD</td><td>41.35</td><td>60.77</td><td>64.60</td><td>65.78</td><td>48.21</td><td>71.90</td><td>58.77</td></tr><tr><td rowspan="3">Mistral-7B-Inst</td><td>Greedy</td><td>42.93</td><td>64.74</td><td>77.20</td><td>76.59</td><td>50.26</td><td>50.20</td><td>60.32</td></tr><tr><td>CAD</td><td>42.56</td><td>67.89</td><td>71.70</td><td>74.45</td><td>47.12</td><td>46.40</td><td>58.35</td></tr><tr><td>ADACAD</td><td>42.87</td><td>63.99</td><td>75.40</td><td>76.89</td><td>49.49</td><td>47.30</td><td>59.32</td></tr></table>
|
| 376 |
+
|
| 377 |
+
Table 7: Results on QA datasets with different instruction-tuned language models. When averaged across datasets, ADACAD is better than or comparable to the baselines.
|
| 378 |
+
|
| 379 |
+
# F Prompts
|
| 380 |
+
|
| 381 |
+
We provide the prompts for pre-trained base language with and without context for both QA and summarization tasks.
|
| 382 |
+
|
| 383 |
+
# Question Answering
|
| 384 |
+
|
| 385 |
+
# With Context:
|
| 386 |
+
|
| 387 |
+
{context}
|
| 388 |
+
|
| 389 |
+
Using only the references listed above, answer the following question:
|
| 390 |
+
|
| 391 |
+
Question: {question}
|
| 392 |
+
|
| 393 |
+
Answer:
|
| 394 |
+
|
| 395 |
+
# Without Context:
|
| 396 |
+
|
| 397 |
+
Answer the following question:
|
| 398 |
+
|
| 399 |
+
Question: {question}
|
| 400 |
+
|
| 401 |
+
Answer:
|
| 402 |
+
|
| 403 |
+
# Summarization - XSum
|
| 404 |
+
|
| 405 |
+
# With Context:
|
| 406 |
+
|
| 407 |
+
Document: {document}
|
| 408 |
+
|
| 409 |
+
Summarize the document in one sentence.
|
| 410 |
+
|
| 411 |
+
Summary:
|
| 412 |
+
|
| 413 |
+
# Without Context:
|
| 414 |
+
|
| 415 |
+
Summarize the document in one sentence.
|
| 416 |
+
|
| 417 |
+
Summary:
|
| 418 |
+
|
| 419 |
+
# Summarization - CNN-DM
|
| 420 |
+
|
| 421 |
+
# With Context:
|
| 422 |
+
|
| 423 |
+
Document: {document}
|
| 424 |
+
|
| 425 |
+
Summarize the document in three sentences.
|
| 426 |
+
|
| 427 |
+
Summary:
|
| 428 |
+
|
| 429 |
+
# Without Context:
|
| 430 |
+
|
| 431 |
+
Summarize the document in three sentences.
|
| 432 |
+
|
| 433 |
+
Summary:
|
| 434 |
+
|
| 435 |
+
# Summarization - TofuEval
|
| 436 |
+
|
| 437 |
+
# With Context:
|
| 438 |
+
|
| 439 |
+
Document: {document}
|
| 440 |
+
|
| 441 |
+
Summarize the provided document focusing on "topic". The summary should be less than 50 words in length.
|
| 442 |
+
|
| 443 |
+
Summary:
|
| 444 |
+
|
| 445 |
+
# Without Context:
|
| 446 |
+
|
| 447 |
+
Summarize the provided document focusing on "topic". The summary should be less than 50 words in length.
|
| 448 |
+
|
| 449 |
+
Summary:
|
| 450 |
+
|
| 451 |
+
<table><tr><td rowspan="2">Decoding</td><td colspan="3">CNN-DM</td><td colspan="3">XSum</td><td colspan="2">TofuEval (AlignScore)</td></tr><tr><td>ROUGE-L</td><td>BERT-P</td><td>AlignScore</td><td>ROUGE-L</td><td>BERT-P</td><td>AlignScore</td><td>Overall</td><td>Main / Marginal</td></tr><tr><td colspan="9">Llama2-13B</td></tr><tr><td>Greedy</td><td>23.70</td><td>94.25</td><td>87.28</td><td>13.51</td><td>93.30</td><td>85.23</td><td>66.11</td><td>72.51 / 46.23</td></tr><tr><td>CAD</td><td>24.33</td><td>94.44</td><td>88.99</td><td>14.86</td><td>93.36</td><td>82.41</td><td>80.39</td><td>84.03 / 69.07</td></tr><tr><td>COIECD</td><td>20.21</td><td>88.63</td><td>75.72</td><td>13.95</td><td>89.80</td><td>70.41</td><td>62.88</td><td>68.45 / 45.55</td></tr><tr><td>ADACAD</td><td>23.93</td><td>94.63</td><td>91.15</td><td>14.18</td><td>94.04</td><td>84.33</td><td>80.39</td><td>83.94 / 69.36</td></tr><tr><td colspan="9">Llama3-8B</td></tr><tr><td>Greedy</td><td>25.16</td><td>94.92</td><td>90.33</td><td>13.16</td><td>93.43</td><td>83.65</td><td>68.17</td><td>73.51 / 51.57</td></tr><tr><td>CAD</td><td>24.91</td><td>94.70</td><td>91.44</td><td>13.80</td><td>93.37</td><td>86.88</td><td>83.40</td><td>86.77 / 72.94</td></tr><tr><td>COIECD</td><td>23.60</td><td>92.01</td><td>83.92</td><td>13.65</td><td>91.40</td><td>69.47</td><td>70.07</td><td>73.65 / 58.94</td></tr><tr><td>ADACAD</td><td>25.42</td><td>95.09</td><td>94.35</td><td>13.83</td><td>94.02</td><td>86.78</td><td>80.62</td><td>83.24 / 72.46</td></tr><tr><td colspan="9">Llama3-70B</td></tr><tr><td>Greedy</td><td>24.93</td><td>95.41</td><td>91.44</td><td>14.36</td><td>94.05</td><td>85.28</td><td>76.66</td><td>81.64 / 61.19</td></tr><tr><td>CAD</td><td>24.76</td><td>94.45</td><td>91.01</td><td>14.59</td><td>93.65</td><td>84.34</td><td>83.93</td><td>87.26 / 73.58</td></tr><tr><td>COIECD</td><td>23.47</td><td>92.06</td><td>85.49</td><td>14.51</td><td>91.04</td><td>73.81</td><td>75.24</td><td>80.68 / 58.31</td></tr><tr><td>ADACAD</td><td>25.42</td><td>94.91</td><td>94.97</td><td>14.91</td><td>94.29</td><td>85.81</td><td>85.07</td><td>88.06 / 75.79</td></tr><tr><td colspan="9">Mistral-7B</td></tr><tr><td>Greedy</td><td>24.59</td><td>93.57</td><td>80.80</td><td>14.07</td><td>88.56</td><td>58.76</td><td>63.07</td><td>68.62 / 45.79</td></tr><tr><td>CAD</td><td>23.72</td><td>93.22</td><td>90.61</td><td>18.20</td><td>91.54</td><td>84.94</td><td>67.64</td><td>67.55 / 67.48</td></tr><tr><td>COIECD</td><td>23.50</td><td>92.06</td><td>83.97</td><td>17.85</td><td>89.79</td><td>69.26</td><td>65.95</td><td>70.63 / 51.39</td></tr><tr><td>ADACAD</td><td>24.76</td><td>94.21</td><td>93.05</td><td>18.51</td><td>92.19</td><td>86.79</td><td>74.00</td><td>77.59 / 62.84</td></tr><tr><td colspan="9">Llama3-70B-Instruct</td></tr><tr><td>Greedy</td><td>24.72</td><td>90.64</td><td>88.22</td><td>23.19</td><td>90.80</td><td>82.40</td><td>78.56</td><td>80.18 / 73.52</td></tr><tr><td>CAD</td><td>25.17</td><td>91.19</td><td>88.52</td><td>20.92</td><td>91.52</td><td>86.54</td><td>79.86</td><td>79.55 / 80.82</td></tr><tr><td>COIECD</td><td>23.85</td><td>89.84</td><td>83.88</td><td>22.41</td><td>90.61</td><td>81.42</td><td>77.54</td><td>78.69 / 73.97</td></tr><tr><td>AdaCAD</td><td>25.26</td><td>90.91</td><td>88.68</td><td>21.52</td><td>91.30</td><td>85.30</td><td>81.16</td><td>82.82 / 76.03</td></tr></table>
|
| 452 |
+
|
| 453 |
+
Table 8: Results on summarization datasets with different LMs. ADACAD generally outperforms the baselines across metrics and datasets.
|
| 454 |
+
|
| 455 |
+
<table><tr><td>Natural Question</td></tr><tr><td>c: The second season of the American television drama series Breaking Bad premiered on March 8, 2009 and concluded on May 31, 2009. It consisted of 13 episodes, each running approximately 47 minutes in length ...
|
| 456 |
+
x: How many episodes in season 2 Breaking Bad?</td></tr><tr><td>NQ-SWAP</td></tr><tr><td>c: The second season of the American television drama series Breaking Bad premiered on March 8, 2009 and concluded on May 31, 2009. It consisted of 27 episodes, each running approximately 47 minutes in length ...
|
| 457 |
+
x: How many episodes in season 2 Breaking Bad?</td></tr><tr><td>TriviaQA</td></tr><tr><td>c: ... Removal of dental biofilm is important as it may become acidic causing demineralization of the teeth (also known as caries) or harden into calculus (dental) (also known as tartar). Calculus can not be removed through ...
|
| 458 |
+
x: In dentistry, what is the name given to hardened dental plaque?</td></tr><tr><td>PopQA</td></tr><tr><td>c: The 2012 Uzbekistan First League was the 21st season of 2nd level football in Uzbekistan since 1992. It is split in an Eastern and Western zone, each featuring 12 teams ...
|
| 459 |
+
x: What sport does 2012 Uzbekistan First League play?</td></tr><tr><td>HotpotQA</td></tr><tr><td>c: <t> Superdrag </t> Superdrag was an American alternative rock band from Knoxville, Tennessee ...
|
| 460 |
+
<t> Collective Soul </t> Collective Soul is an American rock band originally from Stockbridge, Georgia ...
|
| 461 |
+
x: Are both Superdrag and Collective Soul rock bands?</td></tr><tr><td>TabMWP</td></tr><tr><td>c: alpaca | $1,605.00
|
| 462 |
+
kinkajou | $1,837.00
|
| 463 |
+
python | $8,343.00
|
| 464 |
+
parrot | $1,123.00
|
| 465 |
+
macaw | $1,629.00
|
| 466 |
+
x: Erik has $7,616.00. How much money will Erik have left if he buys a parrot and a kinkajou? (Unit: $)</td></tr><tr><td>CNN-DM</td></tr><tr><td>c: Article: (CNN)Two years ago, the storied Boston Marathon ended in terror and altered the lives of runners, spectators and those who tried to come to their rescue. Just last week, Dzhokhar Tsarnaev was convicted ...
|
| 467 |
+
x: Summarize the article in three sentences. Summary:</td></tr><tr><td>XSum</td></tr><tr><td>c: You may want to choose another fantasy destination after the British Foreign Office told tourists to be aware that some political demonstrations in the capital, Male, have led to violence. It did add, though, that most trips ...
|
| 468 |
+
x: Summarize the article in one sentence. Summary:</td></tr><tr><td>TofuEval</td></tr><tr><td>c: Document: DOBBS: General Motors today announced it will offer early retirement buyouts for 113,000 of its employees. Management calls it, “accelerated attrition”. And it is only the latest sign of the dramatic decline ...
|
| 469 |
+
x: Summarize the provided document focusing on “Buyouts for General Motors employees”. The summary should be less than 50 words in length. Summary:</td></tr></table>
|
| 470 |
+
|
| 471 |
+
Table 10: An illustration of input query $x$ and relevant context $c$ for different datasets.
|
adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0106e68243b7dd87b3bc2f6377fb88269087a61d3fbced22c85273ed9e9af4db
|
| 3 |
+
size 1052996
|
adacadadaptivelydecodingtobalanceconflictsbetweencontextualandparametricknowledge/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2364b266d29159b04c35b681450ae623273f053ada8445e87ff1ce8927c635f
|
| 3 |
+
size 522751
|
adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/8b39b034-d377-424a-a0d3-f7fefaf33c8d_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6334ded9f18fa5889127707bba4c2018a0d6b403c3ba07ae0ca4774530555e3e
|
| 3 |
+
size 111479
|
adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/8b39b034-d377-424a-a0d3-f7fefaf33c8d_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ff9182f43692ba034c86b54036ca26ed47abb4b656ca0a0c372a4f1e373fae69
|
| 3 |
+
size 130802
|
adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/8b39b034-d377-424a-a0d3-f7fefaf33c8d_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ffdd1be2fbffe6feb3c072182e16f78c519a0dba5e1ac2df6541a8a4470dca5c
|
| 3 |
+
size 1079304
|
adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/full.md
ADDED
|
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AdaMergeX: Cross-Linguual Transfer with Large Language Models via Adaptive Adapter Merging
|
| 2 |
+
|
| 3 |
+
Yiran Zhao $^{1,2*}$ Wenxuan Zhang $^{2,3\dagger}$ Huiming Wang $^{2,4*}$ Kenji Kawaguchi $^{1}$ Lidong Bing $^{5‡}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ National University of Singapore $^{2}$ DAMO Academy, Alibaba Group, Singapore
|
| 6 |
+
$^{3}$ Hupan Lab, 310023, Hangzhou, China $^{4}$ Singapore University of Technology and Design
|
| 7 |
+
$^{5}$ Shanda AI Research Institute
|
| 8 |
+
zhaoyiran@u.nus.edu kenji@comp.nus.edu.sg
|
| 9 |
+
{saike.zwx, huiming.wang}@alibaba-inc.com lidong.bing@shanda.com
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Large Language Models (LLMs) excel in high-resource languages but underperform in low-resource ones. As an effective alternative to the direct fine-tuning on target tasks in specific languages, cross-lingual transfer addresses the challenges of limited training data. It decouples "task ability" and "language ability" by fine-tuning on the target task in the source language and another selected task in the target language, respectively. However, they fail to fully separate the task ability from the source language or the language ability from the chosen task. In this paper, we acknowledge the mutual reliance between task ability and language ability and direct our attention toward the gap between the target language and the source language on tasks. As the gap removes the impact of tasks, we assume that it remains consistent across tasks. Based on this assumption, we propose a new cross-lingual transfer method called AdaMergeX that utilizes adaptive adapter merging. By introducing a reference task, we can determine that the divergence of adapters fine-tuned on the reference task in both languages follows the same distribution as the divergence of adapters fine-tuned on the target task in both languages. Hence, we can obtain target adapters by combining the other three adapters. Furthermore, we propose a structure-adaptive adapter merging method. Our empirical results demonstrate that our approach yields new and effective cross-lingual transfer, outperforming existing methods across all settings. $^1$
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Multilingual NLP models, including conventional models such as mBERT (Kenton and Toutanova,
|
| 18 |
+
|
| 19 |
+
2019), XLM (Conneau and Lample, 2019), XLM-R (Conneau et al., 2020), as well as recent multilingual large language models (LLMs) like ChatGPT (OpenAI, 2022), PaLM2 (Anil et al., 2023), Llama2 (Touvron et al., 2023), have gained significant attention given the growing need for multilingual requirements. To further enhance the model's multilingual capability, particularly in cases where training data of certain tasks for low-resource languages is scarce and fine-tuning becomes impractical (Ma et al., 2023), cross-lingual transfer is introduced to extend the task-solving ability in a source language to a wide range of target languages (Lin et al., 2019; Chen et al., 2022; Deb et al., 2023).
|
| 20 |
+
|
| 21 |
+
Essentially, cross-lingual transfer aims to transfer the ability to solve a certain task ("task ability") from a source language to a particular target language ("language ability"). Some cross-lingual transfer techniques do not directly improve the language ability in specific languages. Instead, they utilize the language ability in English for multilingual tasks by employing methods such as translation (Liang et al., 2023; Huang et al., 2023b), representation alignment (Nguyen et al., 2023; Salesky et al., 2023; Gao et al., 2023), or prompting method specifically developed for LLMs (Li et al., 2023; Tanwar et al., 2023; Zhang et al., 2023b). Some works intertwine these two abilities and utilize translated parallel corpora for fine-tuning (Pan et al., 2021; Zhang et al., 2022; Zhu et al., 2023).
|
| 22 |
+
|
| 23 |
+
On the contrary, some studies directly focus on enhancing the language abilities in target languages, so they endeavor to decouple task ability and language ability, enhance them separately, and subsequently merge them (Pfeiffer et al., 2020; Ansell et al., 2022; Ponti et al., 2023). However, such an approach overlooks the intrinsic interdependence between task ability and language ability. Given that any specific task would be expressed in a particular language, these two abilities cannot be distinctly isolated from one another.
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 1: An overview of invariants of the language ability gap among different tasks in the adapter space, where by employing any three we can get the remaining one. In light of this observation, we propose AdaMergeX.
|
| 27 |
+
|
| 28 |
+
In this work, we argue that language ability and task ability are inherently interconnected. Instead of separating one from another, they should follow that task ability is affiliated with the source language while language ability refers to the capacity gap between the target language and the source language. In line with the famous equation "king-queen = man-woman" in the word embedding space (Mikolov et al., 2013), we assume that the divergences between LLMs fine-tuned in different languages on a particular task follow the same distribution across diverse tasks. In the case of parameter-efficient fine-tuning, the equation becomes $\text{read}^{fr} - \text{read}^{en} = \text{math}^{fr} - \text{math}^{en}$ in the adapter space, where $\text{read}$ and $\text{math}$ refers to two tasks, and $\text{en}$ indicates two languages of the corresponding tasks. As shown in the left side of Figure 2, in the adapter space, the divergence between the target language and source language on the target task follows the same distribution as the divergence on the reference task.
|
| 29 |
+
|
| 30 |
+
Therefore, we propose to accomplish the cross-lingual transfer through Adaptive Adapter Merging (AdaMergeX) with such a relation as shown in the right side of Figure 2. Specifically, we introduce a reference task from which we obtain the divergence between the target language and source language. Such a reference task can be an easily accessible task for both high-resource and low-resource languages, such as causal language modeling. In addition, we fine-tune LLMs on the target task in the source language. Finally, by merging the language ability and task ability, we can obtain the adapters of the target task in the target language.
|
| 31 |
+
|
| 32 |
+
Furthermore, in contrast to previous studies that combine models or adapters through a linear combination (Ilharco et al., 2022; Zhang et al., 2023a; Ponti et al., 2023), we argue that the model merging method should align with the manner in which adapters are integrated with language mod
|
| 33 |
+
|
| 34 |
+
els. Therefore, we design a structure-adaptive adapter merging method, which can adaptively select merging methods for LoRA (Hu et al., 2021), $(\mathrm{IA})^3$ (Liu et al., 2022), Adapter (Houlsby et al., 2019), Prefix-Tuning (Li and Liang, 2021) etc.
|
| 35 |
+
|
| 36 |
+
We evaluate the proposed AdaMergeX method on a wide range of multilingual tasks spanning 12 languages, covering a broad resource spectrum from high-resource to low-resource languages. Our evaluation demonstrates that AdaMergeX consistently outperforms other state-of-the-art methods including model merging, prompting, and general adapter merging methods. Notably, compared to MAD-X (Pfeiffer et al., 2020) which separates the task and language ability with two adapters, AdaMergeX achieves $8.0\%$ and $15.9\%$ absolute improvement on XCOPA and XQuAD respectively with XLM-R. In the case of state-of-the-art adapter merging method Arimerge (Zhang et al., 2023a), AdaMergeX achieves $31.1\%$ relative improvement on average in all languages and all tasks with Llama2. Moreover, the ablation analysis shows that AdaMergeX performs consistently well with different backbone models, source languages, and reference tasks.
|
| 37 |
+
|
| 38 |
+
# 2 Background
|
| 39 |
+
|
| 40 |
+
Given a pre-trained model, fine-tuning is often employed to improve the performance on specific tasks. Specifically, for a layer $h = W_0x$ , where $x \in \mathbb{R}^k$ is input, $h \in \mathbb{R}^d$ is output and $W_0 \in \mathbb{R}^{d \times k}$ is pre-trained parameters, fine-tuning updates parameters from $W_0$ to $W'$ and the layer becomes $h = W'x$ . However, full fine-tuning requires many training data points and computing resources, which inspires the design of adapters (Houlsby et al., 2019). With adapters, the layer is changed to $h = (W_0 \circ W_A)x$ , where $W_A$ denotes the parameters of adapters and $\circ$ denotes the combination operation of pre-trained parameters and adapter parameters. During such parameter-efficient fine-tuning, we can use the adaptive learning strategy that allows for more fine-tuning.
|
| 41 |
+
|
| 42 |
+
tuning, pre-trained parameters $W_{0}$ are fixed and only adapter parameters $W_{A}$ are updated. With the number of parameters growing much bigger for LLMs, adapters become more widely used in the current practice of fine-tuning LLMs (Hu et al., 2021; Li and Liang, 2021; Liu et al., 2022)
|
| 43 |
+
|
| 44 |
+
Various combination methods $\circ$ have been designed for different adapters. In this paper, we focus on two main widely used combination methods: addition and multiplication, corresponding to LoRA (Hu et al., 2021) and (IA) $^3$ (Liu et al., 2022), respectively. We also involve Adapter (Houlsby et al., 2019) and Prefix-Tuning (Li and Liang, 2021) in to guarantee the generaliability.
|
| 45 |
+
|
| 46 |
+
LoRA Specializing the combination method “○” to element-wise addition denoted as “⊕”, LoRA employs low-rank decomposition to reduce training complexity. The layer is thus changed to
|
| 47 |
+
|
| 48 |
+
$$
|
| 49 |
+
h = \left(W _ {0} \oplus W _ {A}\right) x = \left(W _ {0} \oplus B A\right) x, \tag {1}
|
| 50 |
+
$$
|
| 51 |
+
|
| 52 |
+
where $B \in \mathbb{R}^{d \times r}$ and $A \in \mathbb{R}^{r \times k}$ are low-rank decomposed matrices, and the rank $r \ll \min(d, k)$ . Specifically, the LoRA can be implemented in any layer of the Transformer (Vaswani et al., 2017) architecture, including the attention layer and the feed-forward layer.
|
| 53 |
+
|
| 54 |
+
$(\mathbf{IA})^3$ $(\mathrm{IA})^3$ specializes the combination method to element-wise multiplication “ $\odot$ ”:
|
| 55 |
+
|
| 56 |
+
$$
|
| 57 |
+
h = \left(W _ {0} \odot W _ {A}\right) x, \tag {2}
|
| 58 |
+
$$
|
| 59 |
+
|
| 60 |
+
where $W_{A}\in \mathbb{R}^{k}$ is element-wise multiplied to each row of $W_{0}$ . Furthermore, (IA) $^{3}$ can only be implemented to the key and value neuron in the attention layer and dimension reduction neuron in the feed-forward layer of the Transformer architecture.
|
| 61 |
+
|
| 62 |
+
Adapter & Prefix-Tuning By inserting layers and prefix tokens into the model, combination methods of Adapter and Prefix-Tuning can be formulated as
|
| 63 |
+
|
| 64 |
+
$$
|
| 65 |
+
h = \left(W _ {0} \| W _ {A}\right) x, \tag {3}
|
| 66 |
+
$$
|
| 67 |
+
|
| 68 |
+
where $\parallel$ represents concatenation to original pretrained parameters.
|
| 69 |
+
|
| 70 |
+
# 3 AdaMergeX: Adaptive Adapter Merging for Cross-lingual Transfer
|
| 71 |
+
|
| 72 |
+
# 3.1 Cross-Linguual Transfer via Adapter Merging
|
| 73 |
+
|
| 74 |
+
Generally, the ability of a model in a particular task and language can be seen as a composite of two
|
| 75 |
+
|
| 76 |
+
abilities, namely, "task ability" and "language ability". The former denotes the model's competence in performing a certain task (e.g., text classification, sentence completion), whereas the latter signifies their general proficiency in the given language (e.g., English, Chinese, German). Built on the premise that language ability and task proficiency are inherently intertwined, it is advocated that rather than isolating one from the other, the inference should be drawn that task ability is associated with the source language, whereas language ability refers to the capacity difference between the target language and the source language. In line with the famous equation "king - queen = man - woman" in the word embedding space, we assume that the divergences between LLMs fine-tuned in different languages on a particular task follow the same distribution across diverse tasks.
|
| 77 |
+
|
| 78 |
+
Formally speaking, $A_{l_i t_j}$ denotes the adapter of task $t_j$ in language $l_i$ , then for any two languages $l_1, l_2$ and two NLP tasks $t_1, t_2$ , we have
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
A _ {l _ {1} t _ {1}} \| A _ {l _ {2} t _ {1}} \sim A _ {l _ {1} t _ {2}} \| A _ {l _ {2} t _ {2}}, \tag {4}
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
where $\parallel$ denotes the divergence among two adapters. For example, let's consider $l_{1}$ and $l_{2}$ as English and German, respectively, and $t_{1}$ and $t_{2}$ as the text classification task and question answering task, respectively. Assuming we have training data for each task in both languages, we can fine-tune LLMs to obtain four adapters: text classification in English, text classification in German, question answering in English, and question answering in German. We assume that the divergence between adapters for the text classification task in English and German, as well as the divergence between adapters for the question answering task in English and German, follows the same distribution. This divergence represents the "language ability" that is independent of specific tasks.
|
| 85 |
+
|
| 86 |
+
In the context of cross-lingual transfer, we aim to solve the task $t_1$ for the target language $l_1$ , with the knowledge transferred from a source language $l_2$ , which is often a high-resource language such as English. By imposing the condition of cross-lingual transfer, where labeled data is available only for the target task in the source language and there is unlabeled data in both the source and target languages, we can introduce another "reference task" $t_2$ . This task can be easily constructed using unlabeled data, and language ability can be obtained by $A_{l_1t_2} \| A_{l_2t_2}$ . Moreover, to obtain the ability of
|
| 87 |
+
|
| 88 |
+
performing target task $t_1$ in the target language $l_1$ , we can further transform Equation (4) as:
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
A _ {l _ {1} t _ {1}} = A _ {l _ {2} t _ {1}} \left\| ^ {R} \left(A _ {l _ {1} t _ {2}} \right\| A _ {l _ {2} t _ {2}}\right), \tag {5}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $\| ^R$ is the reverse function of $\|$ . Intuitively, $A_{l_2t_1}$ represents the "task ability" in the source language, while $A_{l_1t_2}\| A_{l_2t_2}$ represents the "language ability". Through merging these two terms, we can transfer the "task ability" of $t_1$ from $l_2$ to $l_1$ .
|
| 95 |
+
|
| 96 |
+
To transfer the knowledge from labeled data in the high-resource language (i.e., given $A_{l_2t_1}$ ), the next step is to specify the reference task $t_2$ . We observe that there are many easily obtained corpora of low-resource languages, such as Wikipedia, online blogs, etc. These corpora can be used to construct intuitive tasks such as causal language modeling, which can serve as the reference task $t_2$ . Simultaneously, we can also construct such tasks for the high-resource language $l_2$ . Therefore, adapters can be fine-tuned on such easily accessible reference tasks in different languages to obtain $A_{l_1t_2}$ and $A_{l_2t_2}$ . Cross-lingual transfer thus can be achieved by merging these three adapters.
|
| 97 |
+
|
| 98 |
+
# 3.2 Structure-Adaptive Adapter Merging
|
| 99 |
+
|
| 100 |
+
As introduced in Section 2, adapters have different structures, which inspires us to devise different adapter merging methods. We propose that the adapter merging approach must align with the way that the adapter combined with the original model.
|
| 101 |
+
|
| 102 |
+
LoRA In the fine-tuning process of LoRA, where the method involves element-wise addition to the original parameters, the merging method used to combine task ability and language ability should also employ element-wise addition. Additionally, since the divergence calculation approach $\parallel$ is intended to be the inverse function of the merging method, it should be carried out through elementwise subtraction in this scenario. Therefore, Equation (4) is equivalently transferred to
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
A _ {l _ {1} t _ {1}} \ominus A _ {l _ {2} t _ {1}} \sim A _ {l _ {1} t _ {2}} \ominus A _ {l _ {2} t _ {2}}, \tag {6}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
where $\ominus$ denotes element-wise subtraction, and Equation (5) is equivalently transferred to
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
A _ {l _ {1} t _ {1}} = A _ {l _ {2} t _ {1}} \oplus t \cdot \left(A _ {l _ {1} t _ {2}} \ominus A _ {l _ {2} t _ {2}}\right), \tag {7}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
where $\oplus$ denotes element-wise addition and $t$ is the hyper-parameter that adapts the scale of two distributions in the same family of distributions.
|
| 115 |
+
|
| 116 |
+
$(\mathbf{IA})^3$ Similarly, the fine-tuning method of $(\mathrm{IA})^3$ is element-wise multiplication to the original parameters, and the merging method should also be element-wise multiplication. Furthermore, we need to employ element-wise division to obtain the divergence between $A_{l_1t_2}$ and $A_{l_2t_2}$ . Therefore, Equation (4) is equivalently transferred to
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
A _ {l _ {1} t _ {1}} \oslash A _ {l _ {2} t _ {1}} \sim A _ {l _ {1} t _ {2}} \oslash A _ {l _ {2} t _ {2}}, \tag {8}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where $\oslash$ denotes element-wise division, and Equation (5) is equivalently transferred to
|
| 123 |
+
|
| 124 |
+
$$
|
| 125 |
+
A _ {l _ {1} t _ {1}} = A _ {l _ {2} t _ {1}} \odot \left(\left(t \cdot \left(A _ {l _ {1} t _ {2}} \oslash A _ {l _ {2} t _ {2}}\right) - \mathbb {1}\right) + \mathbb {1}\right), \tag {9}
|
| 126 |
+
$$
|
| 127 |
+
|
| 128 |
+
where $\odot$ denotes element-wise multiplication and $t$ is the hyper-parameter determining the scale of two distributions in the same family of distributions.
|
| 129 |
+
|
| 130 |
+
Prefix-Tuning In the case of other adapter structures such as Prefix-Tuning, which involves the insertion of prefix tokens into the model, the merging process necessitates transferring adapters within the same space, such as MLP. Formally, the adaptive merging method is
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
A _ {l _ {1} t _ {1}} = t \cdot \left(A _ {l _ {1} t _ {2}} * A _ {l _ {2} t _ {2}} ^ {- 1}\right) * A _ {l _ {2} t _ {1}}, \tag {10}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
where $*$ represents matrix multiplication and $A_{l_2t_2}^{-1}$ represents Moore-Penrose pseudo-inverse of the matrix. For Prefix-Tuning, $A_{lt}$ represents the prefix tokens. In this paper, we mainly focus on LoRA and (IA) $^3$ when Llama2 is the backbone model due to the subpar performance of prefix-tuning on finetuning (He et al., 2021). On the contrary, in the case of smaller language models such as mT5 (Xue et al., 2021), we implement AdaMergeX on it with prefix-tuning. The experiment results are shown in Appendix A.1.
|
| 137 |
+
|
| 138 |
+
# 3.3 AdaMergeX
|
| 139 |
+
|
| 140 |
+
Following notations in Section 3.1, to solve a target task $t_1$ in a target language $l_1$ , i.e., obtain the adapter $A_{l_1t_1}$ , we need to fine-tune another three adapters: adapters on the target task in the source language $(A_{l_2t_1})$ , adapters on the reference task in the target language $(A_{l_1t_2})$ , and adapters on the reference task in the source language $(A_{l_2t_2})$ . Note that $A_{l_1t_2}$ and $A_{l_2t_2}$ are easily obtainable, as we can choose any task in the target and source language. As mentioned earlier, the task can even be causal language modeling, which only requires unlabeled text corpora. Therefore, with only unlabeled data in both source and target language,
|
| 141 |
+
|
| 142 |
+
<table><tr><td>Task</td><td>Zero-Shot Prompt</td></tr><tr><td>MGSM</td><td>Let's think step by step. Question: {question}</td></tr><tr><td>XCOPA</td><td>Here is a premise and a question. Help me pick the more plausible option. Premise: {premise} Question: What is the {question}? (A) {choice1} (B) {choice2}</td></tr><tr><td>XNLI</td><td>You should judge whether the hypothesis is true (entailment), false (contradiction), or undetermined (neutral) given the premise. Premise: {premise} Hypothesis: {hypothesis}</td></tr><tr><td>XQuAD</td><td>{context} Question: {question}</td></tr><tr><td>XLSum</td><td>Summarize the context in one sentence. Title: {title} Context: {article}</td></tr></table>
|
| 143 |
+
|
| 144 |
+
Table 1: Zero-shot prompts for each dataset.
|
| 145 |
+
|
| 146 |
+
our proposed AdaMergeX effectively transfers the target task proficiency from the source language to the target language. Moreover, given that the reference task remains constant, fine-tuning LLMs in the source language on the target task is the sole requirement for each new target task. This efficiency characterizes AdaMergeX.
|
| 147 |
+
|
| 148 |
+
In the case of LoRA, which fine-tunes LLMs by tuning $\{B,A\}$ in tuned layers of LLMs as introduced in Equation (1), adapters are merged following Equation (7) by element-wise addition and subtraction on $\{B,A\}$ in the corresponding layers of $A_{l_2t_1},A_{l_1t_2}$ ,and $A_{l_2t_2}$ .On the other hand, in the case of (IA)3, the fine-tuning parameters are $W_{A}$ in tuned layers as depicted in Equation (2). Thus the merging method follows Equation (9), which involves performing element-wise multiplication and division of the corresponding layers of $A_{l_2t_1}$ $A_{l_1t_2}$ ,and $A_{l_2t_2}$
|
| 149 |
+
|
| 150 |
+
# 4 Experiments
|
| 151 |
+
|
| 152 |
+
# 4.1 Experimental Setup
|
| 153 |
+
|
| 154 |
+
Datasets and Language To evaluate the effectiveness of our method, we conduct experiments on a wide variety of multilingual tasks in three main categories: reasoning tasks, natural language understanding (NLU) tasks, and natural language generation (NLG) tasks. For reasoning tasks, we test on multilingual arithmetic reasoning dataset MGSM (Shi et al., 2022) and multilingual commonsense reasoning dataset XCOPA (Ponti et al., 2020). For NLU tasks, we test on the multilingual natural language inference dataset XNLI (Connieu et al., 2018), and question-answering dataset XQuAD (Artetxe et al., 2020). For NLG tasks, we test on multilingual summarization dataset XL-Sum (Hasan et al., 2021). We choose 12 languages that appear in more than once in the above datasets, including German (de), Russian (ru), French (fr),
|
| 155 |
+
|
| 156 |
+
Spanish (es), Chinese (zh), Vietnamese (vi), Turkish (tr), Arabic (ar), Greek (el), Thai (th), Hindi (hi), and Swahili (sw). Detailed settings of zero-shot prompts are shown in Table 1. We utilize intuitive prompting methods for all tasks except for XCOPA and XNLI, where we employ prompts from Huang et al. (2023b). Detailed examples of the prompting approach can be found in Appendix A.2. For MGSM, XCOPA and XQuAD, we adopt the whole testset, while for XNLI and XLSum we randomly sample 1000 and 500 data points from the whole testset respectively.
|
| 157 |
+
|
| 158 |
+
Baselines We conduct comparisons between our proposed method, which utilizes model merging for achieving cross-lingual transfer, and seven competing techniques: (i) Vanilla zero-shot prompting ("Vanilla"), which directly assesses target languages using the pre-trained LLM. (ii) English Tuning ("Eng-FT"), which involves fine-tuning the model in English for target tasks and subsequently transferring it directly to target languages. (iii) Cross-Lingual-Thought Prompting ("XLT (Vanilla)") (Huang et al., 2023b) achieves state-of-the-art results on cross-lingual transfer with LLMs through carefully designed prompt template, which involves explicit translation from the target to the source language, reasoning in the source language, and translating back to the target language. (iv) "XLT (Eng-FT)", where XLT approach is applied to the Eng-FT model. (v) Arithmetic Merging ("AriMerge") (Zhang et al., 2023a), which is the state-of-the-art adapter merging method by arithmetic addition. (vi) MAD-X (Pfeiffer et al., 2020) decomposes language and task via independent invertible adapters. (vii) LF-SFT (Ansell et al., 2022) adopts sparse fine-tuning on language and task respectively and directly merging via addition.
|
| 159 |
+
|
| 160 |
+
<table><tr><td rowspan="2">Adapters</td><td rowspan="2">Method</td><td colspan="2">Reasoning</td><td rowspan="2">NUXNLI</td><td rowspan="2">XQuAD</td><td rowspan="2">NLG XLSum</td><td rowspan="2">Avg.</td></tr><tr><td>MGSM</td><td>XCOPA</td></tr><tr><td rowspan="6">LoRA</td><td>Vanilla</td><td>2.7</td><td>52.3</td><td>14.8</td><td>0.0</td><td>20.9</td><td>18.1</td></tr><tr><td>Eng-FT</td><td>17.4</td><td>58.1</td><td>30.3</td><td>31.0</td><td>22.9</td><td>31.9</td></tr><tr><td>XLT(Vanilla)</td><td>2.8</td><td>52.6</td><td>23.7</td><td>19.3</td><td>1.3</td><td>19.9</td></tr><tr><td>XLT(Eng-FT)</td><td>18.1</td><td>58.2</td><td>27.7</td><td>26.4</td><td>19.1</td><td>29.9</td></tr><tr><td>AriMerge</td><td>6.0</td><td>57.9</td><td>13.6</td><td>30.1</td><td>19.5</td><td>25.4</td></tr><tr><td>AdaMergeX</td><td>19.2</td><td>59.0</td><td>33.6</td><td>31.6</td><td>23.3</td><td>33.3</td></tr><tr><td rowspan="6">\( (IA)^3 \)</td><td>Vanilla</td><td>2.7</td><td>52.3</td><td>14.8</td><td>0.0</td><td>20.9</td><td>18.1</td></tr><tr><td>Eng-FT</td><td>2.3</td><td>52.5</td><td>26.5</td><td>34.0</td><td>17.4</td><td>26.5</td></tr><tr><td>XLT(Vanilla)</td><td>2.8</td><td>52.6</td><td>23.7</td><td>19.3</td><td>1.3</td><td>19.9</td></tr><tr><td>XLT(Eng-FT)</td><td>2.8</td><td>52.6</td><td>25.5</td><td>21.3</td><td>1.4</td><td>20.7</td></tr><tr><td>AriMerge</td><td>0.7</td><td>51.5</td><td>28.2</td><td>32.4</td><td>15.5</td><td>25.7</td></tr><tr><td>AdaMergeX</td><td>3.9</td><td>53.1</td><td>28.6</td><td>35.5</td><td>21.4</td><td>28.5</td></tr></table>
|
| 161 |
+
|
| 162 |
+
Table 2: Main experimental results on 5 representative cross-lingual tasks. Details of the selected zero-shot prompt, the baselines, and hyperparameters are described in Section 4.1.
|
| 163 |
+
|
| 164 |
+
Evaluation Metrics For reasoning and NLU tasks, we use accuracy scores as our evaluation metric. For the summarization task, we evaluate the performance by ROUGE-L score (Lin, 2004).
|
| 165 |
+
|
| 166 |
+
Experiment Details The backbone model that we use to test AdaMergeX is Llama2-7b (Touvron et al., 2023) for LoRA and $(\mathrm{IA})^3$ , and XLM-R for Prefix-Tuning. To fine-tune Llama2 using LoRA and $(\mathrm{IA})^3$ , we configure the target modules to include all available layers. We follow the notation of (Vaswani et al., 2017). In particular, we utilize the attention layer's $\{W^Q, W^K, W^V, W^O\}$ and the feed-forward layer's $\{W_1, W_2\}$ for LoRA. For $(\mathrm{IA})^3$ , we focus on $W^K$ and $W^V$ in the attention layer, as well as $W_2$ in the feed-forward layer. For the merging target modules, inspired by Geva et al. (2021) who attributes task ability to the feedword layer, we merge $\{W^Q, W^V\}$ for LoRA as we focus on language ability instead. Detailed training parameters can be found in Appendix A.3. We employ conventional causal language modeling as the reference task, where the prediction of the subsequent token is based on preceding inputs. Specifically, we generate the training set from the corpora provided by Wikipedia Foundation (wikipedia-2023-11-01) $^2$ , segmenting it into equal lengths 512 and randomly selecting a corpus of 20k for each language. There is only one hyperparameter in our method, which is $t$ in Equation (7), (9), and (10). When tuning this hyperparameter, for each task, we select the validation set from French and then extend it to encompass all other languages, for those tasks that do not contain French validation set, we adopt Vietnamese instead. For XLT
|
| 167 |
+
|
| 168 |
+
method (Huang et al., 2023b), we adopt the same zero-shot prompts as in the original paper.
|
| 169 |
+
|
| 170 |
+
# 4.2 Main Results
|
| 171 |
+
|
| 172 |
+
Table 2 presents our main experimental results on 5 representative cross-lingual tasks with LlaMa2, where we report the average scores across all languages. Detailed results of each language are shown in Table 7 and 8 in Appendix A.4 for LoRA and (IA) $^3$ respectively. Table 3 presents the results on XLM-R, where we compare with MAD-X and LF-SFT on XCOPA and XQuAD $^3$ .
|
| 173 |
+
|
| 174 |
+
AdaMergeX outperforms direct transfer and prompting methods When comparing to fine-tuning on the task in English and direct transfer to the target language, AdaMergX outperforms it on all settings and achieves $1.4\%$ absolute improvement with LoRA and $1.5\%$ absolute improvement with $(\mathrm{IA})^3$ . When comparing to the state-of-the-art method for cross-lingual transfer in LLMs via prompting, XLT with Vanilla Llama2 model ("XLT (Vanilla)" and model fine-tuned on target task in English ("XLT (Eng-FT)"), AdaMergeX outperforms it on all settings and achieves $3.4\%$ absolute improvement with LoRA and $7.3\%$ absolute improvement with $(\mathrm{IA})^3$ . This achievement proves that the introduction of adapter merging to achieve cross-lingual transfer is effective, especially in the circumstance of LLMs.
|
| 175 |
+
|
| 176 |
+
AdaMergeX outperforms decoupling task ability and language ability method As shown in Table 3, compared to MAD-X and LF-SFT, which struggle to fully separate task ability from language
|
| 177 |
+
|
| 178 |
+
<table><tr><td>Task</td><td>Method</td><td>tr</td><td>vi</td><td>th</td><td>sw</td><td>el</td><td>ru</td><td>Avg.</td></tr><tr><td rowspan="3">XCOPA</td><td>MAD-X</td><td>60.3</td><td>66.1</td><td>61.8</td><td>56.3</td><td>-</td><td>-</td><td>59.5</td></tr><tr><td>AriMerge</td><td>66.7</td><td>67.8</td><td>64.3</td><td>60.5</td><td>-</td><td>-</td><td>64.8</td></tr><tr><td>AdaMergeX</td><td>69.4</td><td>70.5</td><td>66.9</td><td>63.2</td><td>-</td><td>-</td><td>67.5</td></tr><tr><td rowspan="4">XQuAD</td><td>MAD-X</td><td>51.1</td><td>-</td><td>55.7</td><td>-</td><td>54.3</td><td>57.8</td><td>54.7</td></tr><tr><td>LF-SFT</td><td>58.6</td><td>-</td><td>75.2</td><td>-</td><td>65.5</td><td>64.6</td><td>66.0</td></tr><tr><td>AriMerge</td><td>61.1</td><td>-</td><td>75.6</td><td>-</td><td>67.4</td><td>68.2</td><td>68.1</td></tr><tr><td>AdaMergeX</td><td>63.8</td><td>-</td><td>77.9</td><td>-</td><td>70.2</td><td>70.4</td><td>70.6</td></tr></table>
|
| 179 |
+
|
| 180 |
+
ability, AdaMergeX demonstrates remarkable enhancements. In particular, AdaMergeX showcases an impressive absolute improvement of $8.0\%$ and $15.9\%$ on XCOPA and XQuAD, respectively, in comparison to MAD-X. Additionally, it achieves a significant $4.6\%$ absolute improvement on XQuAD when compared to LF-SFT. Therefore, our proposed new decoupling method is much more effective than others.
|
| 181 |
+
|
| 182 |
+
AdaMergeX outperforms general adapter merging methods Compared with the state-of-the-art method for adapter merging namely Arimerge, AdaMergeX outperforms it on all settings and achieves $6.9\%$ absolute improvement with LoRA and $2.3\%$ absolute improvement with $(\mathrm{IA})^{3}$ . Therefore, AdaMergeX, which adaptively considers the structure of adapters, outperforms all previous general adapter merging methods that adopt arithmetic addition for all kinds of adapters.
|
| 183 |
+
|
| 184 |
+
AdaMergeX performs consistently well with LoRA and $(\mathbf{IA})^3$ LoRA achieves higher absolute performance than $(\mathrm{IA})^3$ , which shows the effectiveness of LoRA on fine-tuning. However, compared to the absolute improvement of AdaMergeX on LoRA and $(\mathrm{IA})^3$ , they are comparable. For example, for MGSM, LoRA and $(\mathrm{IA})^3$ get the same absolute improvement $1.1\%$ , and for XNLI, on which LoRA and $(\mathrm{IA})^3$ both achieve the highest absolute improvement, their performance are comparable. This proves that AdaMergeX performs consistently well on different adapters.
|
| 185 |
+
|
| 186 |
+
# 4.3 Detailed Analysis
|
| 187 |
+
|
| 188 |
+
In this section, we validate the generalizability of our proposed method across various aspects including the source language, reference task, backbone model, and target modules. Furthermore, we perform an ablation analysis to assess the essentiality of the adaptive merging method.
|
| 189 |
+
|
| 190 |
+
Source Language To prove the generalizability of AdaMergeX on the source language, we explore its performance with different source languages in Table 4. We test on five source languages including German, French, Spanish, Thai, and Vietnamese. We find that the performance is highly related to the source language, which depends on the language ability of the corresponding language. However, the improvements are consistent across languages. For example, the improvement was most significant with Vietnamese as the source language, with an absolute improvement of $3.4\%$ with LoRA and $3.8\%$ with (IA) $^{3}$ . Therefore, AdaMergeX consistently performs well with different source languages.
|
| 191 |
+
|
| 192 |
+
Table 3: Experiment results on XCOPA and XQuAD with XLM-R, where AdaMergeX is implemented on LoRA.
|
| 193 |
+
|
| 194 |
+
<table><tr><td rowspan="2" colspan="2">Method</td><td colspan="2">Reasoning</td><td colspan="2">NLU</td><td>NLG</td><td rowspan="2">Avg.</td></tr><tr><td>MGSM</td><td>XCOPA</td><td>XNLI</td><td>XQuAD</td><td>XLSum</td></tr><tr><td rowspan="10">LoRA</td><td>De-Tune</td><td>20.9</td><td>-</td><td>48.3</td><td>44.4</td><td>-</td><td>37.9</td></tr><tr><td>AdaMergeX</td><td>22.3</td><td>-</td><td>50.9</td><td>46.5</td><td>-</td><td>39.9</td></tr><tr><td>Fr-Tune</td><td>19.9</td><td>-</td><td>52.9</td><td>-</td><td>24.1</td><td>32.3</td></tr><tr><td>AdaMergeX</td><td>22.2</td><td>-</td><td>57.1</td><td>-</td><td>24.8</td><td>34.7</td></tr><tr><td>Es-Tune</td><td>19.2</td><td>-</td><td>33.9</td><td>45.4</td><td>22.1</td><td>30.2</td></tr><tr><td>AdaMergeX</td><td>18.7</td><td>-</td><td>35.1</td><td>49.1</td><td>23.7</td><td>31.7</td></tr><tr><td>Th-Tune</td><td>3.2</td><td>49.3</td><td>1.9</td><td>39.8</td><td>20.3</td><td>22.9</td></tr><tr><td>AdaMergeX</td><td>4.5</td><td>48.9</td><td>6.2</td><td>44.2</td><td>20.1</td><td>24.8</td></tr><tr><td>Vi-Tune</td><td>-</td><td>63.8</td><td>49.1</td><td>36.2</td><td>21.7</td><td>42.7</td></tr><tr><td>AdaMergeX</td><td>-</td><td>64.2</td><td>53.2</td><td>38.9</td><td>22.3</td><td>44.7</td></tr><tr><td rowspan="10">\( (IA)^3 \)</td><td>De-Tune</td><td>2.9</td><td>-</td><td>43.5</td><td>45.6</td><td>-</td><td>30.7</td></tr><tr><td>AdaMergeX</td><td>6.3</td><td>-</td><td>44.0</td><td>47.1</td><td>-</td><td>32.5</td></tr><tr><td>Fr-Tune</td><td>2.5</td><td>-</td><td>48.7</td><td>-</td><td>19.8</td><td>23.7</td></tr><tr><td>AdaMergeX</td><td>4.1</td><td>-</td><td>47.9</td><td>-</td><td>21.6</td><td>24.5</td></tr><tr><td>Es-Tune</td><td>3.5</td><td>-</td><td>49.2</td><td>45.9</td><td>18.2</td><td>29.2</td></tr><tr><td>AdaMergeX</td><td>5.3</td><td>-</td><td>50.9</td><td>44.6</td><td>20.1</td><td>30.2</td></tr><tr><td>Th-Tune</td><td>1.2</td><td>49.8</td><td>0.0</td><td>27.7</td><td>20.2</td><td>19.8</td></tr><tr><td>AdaMergeX</td><td>1.9</td><td>50.4</td><td>0.0</td><td>28.9</td><td>24.1</td><td>21.1</td></tr><tr><td>Vi-Tune</td><td>-</td><td>49.8</td><td>45.5</td><td>33.2</td><td>20.1</td><td>37.2</td></tr><tr><td>AdaMergeX</td><td>-</td><td>48.7</td><td>50.2</td><td>36.1</td><td>22.5</td><td>39.4</td></tr></table>
|
| 195 |
+
|
| 196 |
+
Table 4: Ablation study on source language.
|
| 197 |
+
|
| 198 |
+
Reference Task To prove the generalizability of AdaMergeX on the reference task, we explore its performance with different reference tasks in Table 5. We test on three different reference tasks, including XCOPA, XNLI, XQuAD, while the source
|
| 199 |
+
|
| 200 |
+
language is English. The dataset was tested on the corresponding available languages among German, French, Spanish, Thai, and Vietnamese. Specifically, the improvement was most significant with XQuAD as the reference task, with an absolute improvement of $1.3\%$ with LoRA and $1.7\%$ with (IA) $^{3}$ . Thus, it verifies that AdaMergeX is general to any reference task.
|
| 201 |
+
|
| 202 |
+
<table><tr><td></td><td>Ref.</td><td>Task</td><td>Method</td><td>MGSM</td><td>XCOPA</td><td>XNLI</td><td>XQuAD</td><td>XLSum</td><td>Avg.</td></tr><tr><td rowspan="4">LoRA</td><td>-</td><td>Eng-Tune</td><td>14.4</td><td>59.9</td><td>44.6</td><td>42.3</td><td>16.1</td><td>35.1</td><td></td></tr><tr><td>XCOPA</td><td>AdaMergeX</td><td>15.2</td><td>60.2</td><td>45.1</td><td>43.8</td><td>18.2</td><td>36.5</td><td></td></tr><tr><td>XNLI</td><td>AdaMergeX</td><td>14.5</td><td>60.9</td><td>46.7</td><td>44.1</td><td>18.4</td><td>36.9</td><td></td></tr><tr><td>XQuAD</td><td>AdaMergeX</td><td>14.9</td><td>61.8</td><td>45.4</td><td>44.4</td><td>18.1</td><td>36.9</td><td></td></tr><tr><td rowspan="4">(IA)3</td><td>-</td><td>Eng-Tune</td><td>2.6</td><td>52.7</td><td>40.0</td><td>39.2</td><td>10.8</td><td>29.1</td><td></td></tr><tr><td>XCOPA</td><td>AdaMergeX</td><td>4.9</td><td>54.3</td><td>40.5</td><td>40.4</td><td>12.4</td><td>30.5</td><td></td></tr><tr><td>XNLI</td><td>AdaMergeX</td><td>3.6</td><td>54.6</td><td>41.2</td><td>39.9</td><td>13.1</td><td>30.5</td><td></td></tr><tr><td>XQuAD</td><td>AdaMergeX</td><td>4.1</td><td>53.9</td><td>42.1</td><td>41.0</td><td>12.9</td><td>30.8</td><td></td></tr></table>
|
| 203 |
+
|
| 204 |
+
Table 5: Ablation study on reference Task.
|
| 205 |
+
|
| 206 |
+
Backbone Models Not limited to Decode-only Models such as Llama2, we do further analysis on Encoder-Decoder model T5-base (Raffel et al., 2020) to prove its universal effectiveness. AdaMergeX achieves consistently the best performance compared to fine-tuning on English and AriMerge as shown in Table 9 of Appendix A.5. Furthermore, we also implement our method on Encoder-only model XLM-R and compare with MAD-X and LF-SFT as shown in Table 3. This shows the flexibility of choosing the backbone model when implementing AdaMergeX.
|
| 207 |
+
|
| 208 |
+
Merging Method We conduct an ablation analysis on merging method to ascertain the indispensability and the effectiveness of adaptive merging in AdaMergeX. Table 10 in Appendix A.6 shows the detailed results, where AdaMergeX (adaptive) represents AdaMergeX with adaptive merging methods, while AdaMergeX (cross) represents AdaMergeX with cross merging methods, i.e., LoRA with merging method of $(\mathrm{IA})^3$ and vice versa. We find that when applying the merging method of $(\mathrm{IA})^3$ to LoRA, the performance is reduced much, and vice versa. As a result, the adaptive merging method is crucial for adapter merging.
|
| 209 |
+
|
| 210 |
+
# 5 Related Work
|
| 211 |
+
|
| 212 |
+
# 5.1 Cross-Linguual Transfer
|
| 213 |
+
|
| 214 |
+
The emergence of multilingual systems (Kenton and Toutanova, 2019; Conneau and Lample, 2019;
|
| 215 |
+
|
| 216 |
+
Conneau et al., 2020; OpenAI, 2022; Anil et al., 2023; Touvron et al., 2023) has sparked interest in cross-lingual transfer (Kim et al., 2017; Lin et al., 2019; Schuster et al., 2019; Pfeiffer et al., 2020). Fine-tuning on the target language and target task is an intuitive way to make models obtain the ability of this task, but it is too costly in the era of LLMs as we always lack enough training data (Ma et al., 2023). Alternatively, some researchers explore realigning representations among languages (Nguyen et al., 2023; Salesky et al., 2023; Gao et al., 2023). However, Gaschi et al. (2023) demonstrates that aligned representations do not significantly benefit cross-lingual transfer. To address this issue, some works adopt explicit translation to achieve cross-lingual transfer (Liang et al., 2023; Huang et al., 2023b). However, they rely on translation ability which is not guaranteed. In addition, Pfeiffer et al. (2020) and Ansell et al. (2022) decouple language ability and task ability, but they ignore the interconnection of these two abilities. Furthermore, in the era of in-context learning (Brown et al., 2020; Chowdhery et al., 2022; Touvron et al., 2023; OpenAI, 2023), Li et al. (2023) and Tanwar et al. (2023) utilize prompt tuning to achieve cross-lingual transfer. Nevertheless, the performance remains limited for low-resource languages, which is often not carefully considered in the pre-training of LLMs.
|
| 217 |
+
|
| 218 |
+
# 5.2 Model Merging
|
| 219 |
+
|
| 220 |
+
Model merging has been widely used in image identification (Wortsman et al., 2022; Matena and Raffel, 2022), knowledge editing (Mitchell et al., 2022; Meng et al., 2022) and task combination (Ilharco et al., 2022). In the era of PEFT, researchers have started exploring different approaches to merging adapters (Zhang et al., 2023a; Yadav et al., 2023; Huang et al., 2023a; Chronopoulou et al., 2023; Ponti et al., 2023). These studies, however, have primarily focused on task transfer and have solely utilized linear combinations of different adapters, which may not be applicable to all types of adapters. Moreover, the utilization of model merging for cross-lingual transfer is under-studied.
|
| 221 |
+
|
| 222 |
+
# 6 Conclusion
|
| 223 |
+
|
| 224 |
+
In this work, we propose a new cross-lingual transfer method AdaMergeX. We split target task ability in the target language into two parts: "task ability" and "language ability". In the context of PEFT, task ability can be obtained by tuning on
|
| 225 |
+
|
| 226 |
+
the target task in the source language. To achieve cross-lingual transfer, which aims to transfer task ability from the source language to the target language, we introduce a reference task from which we obtain language ability and further merge it to task ability by adapter merging. Different from all previous adapter merging methods, we propose a structure adaptive adapter merging method that aligns the adapter merging method with the way adapters combined to LLMs. Experiment results show that AdaMergeX performs well among all settings. Moreover, ablation analysis proves that AdaMergeX is robust to backbone models, source languages, and source tasks.
|
| 227 |
+
|
| 228 |
+
# Limitations
|
| 229 |
+
|
| 230 |
+
Our research primarily utilizes models with around 7 billion parameters, specifically Llama2-7b, due to limitations in computational resources. Exploring our methodologies on larger-scale models may offer further valuable perspectives. Furthermore, although the training set for the reference task is easily accessible, fine-tuning the parameters of the entire model necessitates a certain investment of time. However, this training time can be significantly reduced by integrating language-specific adapters or employing language-specific Mixture of Experts (MoE) techniques, which ultimately lowers the overall training cost.
|
| 231 |
+
|
| 232 |
+
# Acknowledgment
|
| 233 |
+
|
| 234 |
+
This work was substantially supported by DAMO Academy through DAMO Academy Research Intern Program. This research is partially supported by the National Research Foundation Singapore under the AI Singapore Programme (AISG Award No: AISG2-TC-2023-010-SGIL) and the Singapore Ministry of Education Academic Research Fund Tier 1 (Award No: T1 251RES2207).
|
| 235 |
+
|
| 236 |
+
# References
|
| 237 |
+
|
| 238 |
+
Rohan Anil, Andrew M Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, et al. 2023. Palm 2 technical report. arXiv preprint arXiv:2305.10403.
|
| 239 |
+
Alan Ansell, Edoardo Ponti, Anna Korhonen, and Ivan Vulic. 2022. Composable sparse fine-tuning for cross-lingual transfer. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1778–1796.
|
| 240 |
+
|
| 241 |
+
Mikel Artetxe, Sebastian Ruder, and Dani Yogatama. 2020. On the cross-lingual transferability of monolingual representations. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4623–4637.
|
| 242 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901.
|
| 243 |
+
Guanhua Chen, Shuming Ma, Yun Chen, Dongdong Zhang, Jia Pan, Wenping Wang, and Furu Wei. 2022. Towards making the most of cross-lingual transfer for zero-shot neural machine translation. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 142-157.
|
| 244 |
+
Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. 2022. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311.
|
| 245 |
+
Alexandra Chronopoulou, Matthew E Peters, Alexander Fraser, and Jesse Dodge. 2023. Adaptersoup: Weight averaging to improve generalization of pretrained language models. In *Findings of the Association for Computational Linguistics: EACL* 2023, pages 2009-2018.
|
| 246 |
+
Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Édouard Grave, Myle Ott, Luke Zettle-moyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8440-8451.
|
| 247 |
+
Alexis Conneau and Guillaume Lample. 2019. Cross-lingual language model pretraining. Advances in neural information processing systems, 32.
|
| 248 |
+
Alexis Conneau, Guillaume Lample, Rudy Rinott, Adina Williams, Samuel R Bowman, Holger Schwenk, and Veselin Stoyanov. 2018. Xnli: Evaluating crosslingual sentence representations. arXiv preprint arXiv:1809.05053.
|
| 249 |
+
Ujan Deb, Ridayesh Parab, and Preethi Jyothi. 2023. Zero-shot cross-lingual transfer with learned projections using unlabeled target-language data. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 449-457.
|
| 250 |
+
Pengzhi Gao, Liwen Zhang, Zhongjun He, Hua Wu, and Haifeng Wang. 2023. Improving zero-shot multilingual neural machine translation by leveraging cross-lingual consistency regularization. arXiv preprint arXiv:2305.07310.
|
| 251 |
+
|
| 252 |
+
Félix Gaschi, Patricio Cerda, Parisa Rastin, and Yannick Toussaint. 2023. Exploring the relationship between alignment and cross-lingual transfer in multilingual transformers. arXiv preprint arXiv:2306.02790.
|
| 253 |
+
Mor Geva, Roei Schuster, Jonathan Berant, and Omer Levy. 2021. Transformer feed-forward layers are key-value memories. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 5484-5495, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 254 |
+
Tahmid Hasan, Abhik Bhattacharjee, Md Saiful Islam, Kazi Mubasshir, Yuan-Fang Li, Yong-Bin Kang, M Sohel Rahman, and Rifat Shahriyar. 2021. Xl-sum: Large-scale multilingual abstractive summarization for 44 languages. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pages 4693-4703.
|
| 255 |
+
Junxian He, Chunting Zhou, Xuezhe Ma, Taylor Berg-Kirkpatrick, and Graham Neubig. 2021. Towards a unified view of parameter-efficient transfer learning. In International Conference on Learning Representations.
|
| 256 |
+
Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR.
|
| 257 |
+
Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. 2021. Lora: Low-rank adaptation of large language models. In International Conference on Learning Representations.
|
| 258 |
+
Chengsong Huang, Qian Liu, Bill Yuchen Lin, Tianyu Pang, Chao Du, and Min Lin. 2023a. Lorahub: Efficient cross-task generalization via dynamic lora composition. arXiv preprint arXiv:2307.13269.
|
| 259 |
+
Haoyang Huang, Tianyi Tang, Dongdong Zhang, Wayne Xin Zhao, Ting Song, Yan Xia, and Furu Wei. 2023b. Not all languages are created equal in llms: Improving multilingual capability by cross-lingual-thought prompting. arXiv preprint arXiv:2305.07004.
|
| 260 |
+
Gabriel Ilharco, Marco Tulio Ribeiro, Mitchell Wortman, Ludwig Schmidt, Hannaneh Hajishirzi, and Ali Farhadi. 2022. Editing models with task arithmetic. In The Eleventh International Conference on Learning Representations.
|
| 261 |
+
Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of naacL-HLT, volume 1, page 2.
|
| 262 |
+
Joo-Kyung Kim, Young-Bum Kim, Ruhi Sarikaya, and Eric Fosler-Lussier. 2017. Cross-lingual transfer
|
| 263 |
+
|
| 264 |
+
learning for pos tagging without cross-lingual resources. In Proceedings of the 2017 conference on empirical methods in natural language processing, pages 2832-2838.
|
| 265 |
+
Shuang Li, Xuming Hu, Aiwei Liu, Yawen Yang, Fukun Ma, Philip S Yu, and Lijie Wen. 2023. Enhancing cross-lingual natural language inference by soft prompting with multilingual verbalizer. arXiv preprint arXiv:2305.12761.
|
| 266 |
+
Xiang Lisa Li and Percy Liang. 2021. Prefix-tuning: Optimizing continuous prompts for generation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 4582-4597, Online. Association for Computational Linguistics.
|
| 267 |
+
Yaobo Liang, Quanzhi Zhu, Junhe Zhao, and Nan Duan. 2023. Machine-created universal language for cross-lingual transfer. arXiv preprint arXiv:2305.13071.
|
| 268 |
+
Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81.
|
| 269 |
+
Yu-Hsiang Lin, Chian-Yu Chen, Jean Lee, Zirui Li, Yuyan Zhang, Mengzhou Xia, Shruti Rijhwani, Junxian He, Zhisong Zhang, Xuezhe Ma, et al. 2019. Choosing transfer languages for cross-lingual learning. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, volume 57.
|
| 270 |
+
Haokun Liu, Derek Tam, Mohammed Muqeeth, Jay Mohta, Tenghao Huang, Mohit Bansal, and Colin A Raffel. 2022. Few-shot parameter-efficient fine-tuning is better and cheaper than in-context learning. Advances in Neural Information Processing Systems, 35:1950-1965.
|
| 271 |
+
Bolei Ma, Ercong Nie, Helmut Schmid, and Hinrich Schütze. 2023. Is prompt-based finetuning always better than vanilla finetuning? insights from cross-lingual language understanding. arXiv preprint arXiv:2307.07880.
|
| 272 |
+
Michael S Matena and Colin A Raffel. 2022. Merging models with fisher-weighted averaging. Advances in Neural Information Processing Systems, 35:17703-17716.
|
| 273 |
+
Kevin Meng, Arnab Sen Sharma, Alex J Andonian, Yonatan Belinkov, and David Bau. 2022. Mass-editing memory in a transformer. In *The Eleventh International Conference on Learning Representations*.
|
| 274 |
+
Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.
|
| 275 |
+
|
| 276 |
+
Eric Mitchell, Charles Lin, Antoine Bosselut, Christopher D Manning, and Chelsea Finn. 2022. Memory-based model editing at scale. In International Conference on Machine Learning, pages 15817-15831. PMLR.
|
| 277 |
+
Hoang H Nguyen, Chenwei Zhang, Tao Zhang, Eugene Rohrbaugh, and Philip S Yu. 2023. Enhancing cross-lingual transfer via phonemic transcription integration. arXiv preprint arXiv:2307.04361.
|
| 278 |
+
OpenAI. 2022. Chatgpt: Optimizing language models for dialogue. OpenAI Blog.
|
| 279 |
+
OpenAI. 2023. Gpt-4 technical report.
|
| 280 |
+
Xiao Pan, Mingxuan Wang, Liwei Wu, and Lei Li. 2021. Contrastive learning for many-to-many multilingual neural machine translation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 244-258.
|
| 281 |
+
Jonas Pfeiffer, Ivan Vulic, Iryna Gurevych, and Sebastian Ruder. 2020. Mad-x: An adapter-based framework for multi-task cross-lingual transfer. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7654-7673.
|
| 282 |
+
Edoardo Maria Ponti, Goran Glavaš, Olga Majewska, Qianchu Liu, Ivan Vulić, and Anna Korhonen. 2020. Xcopa: A multilingual dataset for causal commonsense reasoning. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2362-2376.
|
| 283 |
+
Edoardo Maria Ponti, Alessandro Sordoni, Yoshua Bengio, and Siva Reddy. 2023. Combining parameter-efficient modules for task-level generalisation. In Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, pages 687-702.
|
| 284 |
+
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. The Journal of Machine Learning Research, 21(1):5485-5551.
|
| 285 |
+
Elizabeth Salesky, Neha Verma, Philipp Koehn, and Matt Post. 2023. Pixel representations for multilingual translation and data-efficient cross-lingual transfer. arXiv preprint arXiv:2305.14280.
|
| 286 |
+
Sebastian Schuster, Sonal Gupta, Rushin Shah, and Mike Lewis. 2019. Cross-lingual transfer learning for multilingual task oriented dialog. In Proceedings of NAACL-HLT, pages 3795-3805.
|
| 287 |
+
Freda Shi, Mirac Suzgun, Markus Freitag, Xuezhi Wang, Suraj Srivats, Soroush Vosoughi, Hyung Won Chung, Yi Tay, Sebastian Ruder, Denny Zhou, et al. 2022. Language models are multilingual chain-of-thought
|
| 288 |
+
|
| 289 |
+
reasoners. In The Eleventh International Conference on Learning Representations.
|
| 290 |
+
Eshaan Tanwar, Manish Borthakur, Subhabrata Dutta, and Tanmoy Chakraborty. 2023. Multilingual llms are better cross-lingual in-context learners with alignment. arXiv preprint arXiv:2305.05940.
|
| 291 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288.
|
| 292 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems, 30.
|
| 293 |
+
Mitchell Wortsman, Gabriel Ilharco, Samir Ya Gadre, Rebecca Roelofs, Raphael Gontijo-Lopes, Ari S Morcos, Hongseok Namkoong, Ali Farhadi, Yair Carmon, Simon Kornblith, et al. 2022. Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time. In International Conference on Machine Learning, pages 23965-23998. PMLR.
|
| 294 |
+
Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, and Colin Raffel. 2021. mt5: A massively multilingual pre-trained text-to-text transformer. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 483-498.
|
| 295 |
+
Prateek Yadav, Derek Tam, Leshem Choshen, Colin Raffel, and Mohit Bansal. 2023. Resolving interference when merging models. arXiv preprint arXiv:2306.01708.
|
| 296 |
+
Jinghan Zhang, Shiqi Chen, Junteng Liu, and Junxian He. 2023a. Composing parameter-efficient modules with arithmetic operations. arXiv preprint arXiv:2306.14870.
|
| 297 |
+
Rui Zhang, Yangfeng Ji, Yue Zhang, and Rebecca J Passonneau. 2022. Contrastive data and learning for natural language processing. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Tutorial Abstracts, pages 39-47.
|
| 298 |
+
Wenxuan Zhang, Sharifah Mahani Aljunied, Chang Gao, Yew Ken Chia, and Lidong Bing. 2023b. M3Exam: A multilingual, multimodal, multilevel benchmark for examining large language models. arXiv preprint arXiv:2306.05179.
|
| 299 |
+
Wenhao Zhu, Yunzhe Lv, Qingxiu Dong, Fei Yuan, Jingjing Xu, Shujian Huang, Lingpeng Kong, Jiajun Chen, and Lei Li. 2023. Extrapolating large language models to non-english by aligning languages. arXiv preprint arXiv:2308.04948.
|
| 300 |
+
|
| 301 |
+
# A Appendix
|
| 302 |
+
|
| 303 |
+
# A.1 AdaMergeX on Prefic-Tuning
|
| 304 |
+
|
| 305 |
+
The results demonstrate that AdaMergeX excels remarkably within the realm of prefix-tuning, a distinct and separate approach to fine-tuning. Results on XNLI task with mT5 (Xue et al., 2021) are shown as follows in Table 6.
|
| 306 |
+
|
| 307 |
+
# A.2 Prompts
|
| 308 |
+
|
| 309 |
+
Detailed prompts of tasks in each language are listed in Figure 2.
|
| 310 |
+
|
| 311 |
+
# A.3 Training Details
|
| 312 |
+
|
| 313 |
+
For the settings details of experiments on XQuAD on XLM-R, comprehensive settings are provided below. We utilize XNLI as the reference task for both English and the target language, and employ SQuAD to train the task adapter for English. Details regarding hyperparameters are outlined as follows.
|
| 314 |
+
|
| 315 |
+
# LoRA setting
|
| 316 |
+
|
| 317 |
+
```txt
|
| 318 |
+
lora_r = 8
|
| 319 |
+
lora_alpha = 16
|
| 320 |
+
target Modules = ["q_proj", "v_proj"]
|
| 321 |
+
lora_dropout = 0.1
|
| 322 |
+
```
|
| 323 |
+
|
| 324 |
+
# Training setting
|
| 325 |
+
|
| 326 |
+
```hcl
|
| 327 |
+
NUM_EPOCHS = 5
|
| 328 |
+
PADDING_SIDE = "right"
|
| 329 |
+
EPOCHS = 3
|
| 330 |
+
LR = 2e-5
|
| 331 |
+
TRAIN_BS = 4
|
| 332 |
+
```
|
| 333 |
+
|
| 334 |
+
# A.4 Detailed Results
|
| 335 |
+
|
| 336 |
+
We present detailed results in Table 7 and Table 8.
|
| 337 |
+
|
| 338 |
+
# A.5 AdaMergeX on T5-base
|
| 339 |
+
|
| 340 |
+
Because T5-base only supports Spanish and French in chosen languages, we only test these two languages. In the case of LoRA on XNLI, AdaMergeX obtains $4.2\%$ absolute improvements in Spanish and $2.8\%$ absolute improvements in French. For $(\mathrm{IA})^3$ , the improvements are $1.1\%$ and $4.0\%$ respectively.
|
| 341 |
+
|
| 342 |
+
# A.6 Ablation on Adaptive Merging
|
| 343 |
+
|
| 344 |
+
We find that when applying the merging method of (IA) $^3$ to LoRA, the performance is reduced much. Specifically, on XNLI the performance gets $39.5\%$ absolute reduction, while for XQuAD the reduction
|
| 345 |
+
|
| 346 |
+
is $45.9\%$ absolute value. When applying the merging method of LoRA to $(\mathrm{IA})^3$ , the performance also decreases compared to that of the adaptive merging method. For XNLI the reduction is $2.4\%$ , while for XQuAD the reduction is $0.7\%$ . The reduction is smaller than that for LoRA. This can be attributed to the fact that the fine-tuning of $(\mathrm{IA})^3$ is not as effective as that of LoRA and has a relatively minor impact on the overall model performance.
|
| 347 |
+
|
| 348 |
+
# A.7 Ablation on Merging Modules
|
| 349 |
+
|
| 350 |
+
We present ablation on merging methods in Table 11 and Table 12.
|
| 351 |
+
|
| 352 |
+
<table><tr><td>Task</td><td>Method</td><td>es</td><td>fr</td><td>ru</td><td>tr</td><td>vi</td><td>th</td><td>sw</td><td>el</td><td>Avg.</td></tr><tr><td rowspan="3">XCOPA</td><td>Eng-FT</td><td>-</td><td>-</td><td>-</td><td>-</td><td>69.5</td><td>57.4</td><td>62.8</td><td>-</td><td>65.2</td></tr><tr><td>AriMerge</td><td>-</td><td>-</td><td>-</td><td>-</td><td>65.4</td><td>59.7</td><td>64.1</td><td>-</td><td>63.1</td></tr><tr><td>AdaMergeX</td><td>-</td><td>-</td><td>-</td><td>-</td><td>71.3</td><td>63.2</td><td>65.6</td><td>-</td><td>66.7</td></tr><tr><td rowspan="3">XNLI</td><td>Eng-FT</td><td>31.2</td><td>29.7</td><td>30.4</td><td>19.8</td><td>43.1</td><td>11.6</td><td>13.2</td><td>16.3</td><td>24.4</td></tr><tr><td>AriMerge</td><td>29.8</td><td>28.3</td><td>33.2</td><td>21.4</td><td>42.9</td><td>11.8</td><td>14.6</td><td>21.8</td><td>25.5</td></tr><tr><td>AdaMergeX</td><td>34.1</td><td>31.4</td><td>34.2</td><td>20.9</td><td>44.8</td><td>20.3</td><td>16.7</td><td>25.3</td><td>28.5</td></tr><tr><td rowspan="3">XLSum</td><td>Eng-FT</td><td>13.4</td><td>14.2</td><td>12.7</td><td>14.1</td><td>18.9</td><td>14.9</td><td>7.8</td><td>-</td><td>13.7</td></tr><tr><td>AriMerge</td><td>14.5</td><td>15.2</td><td>15.6</td><td>13.9</td><td>20.2</td><td>15.6</td><td>8.6</td><td>-</td><td>14.8</td></tr><tr><td>AdaMergeX</td><td>14.9</td><td>16.1</td><td>17.4</td><td>16.1</td><td>19.8</td><td>17.1</td><td>10.3</td><td>-</td><td>16.0</td></tr></table>
|
| 353 |
+
|
| 354 |
+
Table 6: Results of AdaMergeX on Prefix-tuning with mT5.
|
| 355 |
+
|
| 356 |
+
<table><tr><td>MGSM (French)</td></tr><tr><td>Let's think step by step.</td></tr><tr><td>Question: Les canes de Janet pondent 16 øefs par jour. Chaque matin, elle en mange trois au petit déjeuner et en utilise quatre autres pour préparer des muffins pour ses amis. Ce qui rests, elle le vend quotidiennement au marché fermier, au prix de 2 $ l'œuf de cane frais. Combien (en dollars) gagne-t-elle chaque jour au marché fermier? Answer:</td></tr><tr><td>XCOPA (Vietnamese)</td></tr><tr><td>Here is a premise and a question. Help me pick the more plausible option. Answer with (A) or (B).</td></tr><tr><td>Premise: Cák mt hùng dã dc dong goistrong bc bong bóng. Question: What is the cause? (A) Nó d v. (B) Nó nh. Answer:</td></tr><tr><td>XNLI (French)</td></tr><tr><td>You should judge whether the hypothesis is true (entailment), false (contradiction), or undetermined (neutral) given the premise. The relationship can be chosen from entailment, contradiction, and neutral.</td></tr><tr><td>Premise: Cela fait 17 ans que je suis associé à l'IRT. Hypothesis: Je n'ai rien à voir avec l'IRT. Relationship:</td></tr><tr><td>XLSum (Vietnamese)</td></tr><tr><td>Summarize the context in one sentence.</td></tr><tr><td>Title: Côte d'Ivoire: le groupe Magic System féte ses 20 ans</td></tr><tr><td>Context: Formé en 1997, le groupe a connu la consécration deux ans plus tard avec son tube Premier Gaou. Le groupe ivoirien féte ses 20 ans avec une tournée africaine et une autobiographie. Nous célébrons 20 ans d'amitiés, de collaboration, de moments de joies et de tristesses; raçonne A'Salfo, le leader du groupe qui a su ouvrir les portes du marché african et international au genre zouglou mais aussi aux autres genres ivoiriens, dont le coupé-décalé. A'Salfo, Manadjia, Tino et Goudé, les quatre boys d'Anoumabo, quartier déshériétés d'Abidjan, aux ruelles bouées et sablonneues, ont joué partout, des stades africains aux salles mythiques comme l'Apollo à New York ou l'Olympia à Paris et jusqu'à Louvre, le 7 mai, pour le concert célibrant la victoire du étident français Emmanuel Macron. Magic System a bénéficié de conseils visés d'Alpha Blondy. Formé en 1997, le groupe a connu la consécration deux ans plus tard avec son tube Premier Gaou, fable sur les déboires sentimentaux d'un jeune homme naïf - le gaou est un homme créédule en nouchi, l'argent abidjanais. Le tube va propulser les quatre amis sur la scène mondiale. Magic System a multiplié les succès, enchānant les albums, sans oublier l'amitié. Magic System est aussi un groupe qui a toutesyouvoulelever lesdéfis, après Premier Gaou, nos détracteurs ont parlé de coup de chance! On a donc relevé ce défi, explique Manadjia, le grosdu groupe. Le groupe reconnait avoir bénéficié de conseils visés, dont ceux de la star ivoirienne du reggae, Alpha Blondy. Summary:</td></tr><tr><td>XQuAD (French)</td></tr><tr><td>Ni ma din tíh mt ct ngang lién quan din khi lng mà ten-x ngut sùc d'tính toán. Hinh the nay bao gm thut ng áp sut gn lin vi cálc lc hot dng binh thng di vi khu vc nt ngang (dng chéo ma trn ca temx) cùng nh céc thut ng ct gln lin vi cálc lc tác dng song song vi din tich mt ct ngang (cáy y t ngoài dng chéo). May ten-x ng sut lién quan din céc lc gyä ra tt c céc bin dng (bin dng) bao gm c ng sut kéo và nén.:133-134:38-1-38-11</td></tr><tr><td>Question: Diu gi dc s dng d tính din tích mt ctrong th tích ca mt vt th? Answer:</td></tr></table>
|
| 357 |
+
|
| 358 |
+
Figure 2: One-shot prompting examples of tested datasets.
|
| 359 |
+
|
| 360 |
+
Table 7: Comprehensive experimental results for both baselines and AdaMergeX are obtained across all datasets in corresponding available languages. The fine-tuning method employed was LoRA, with Llama2-7b serving as the backbone model.
|
| 361 |
+
|
| 362 |
+
<table><tr><td>Models</td><td>Method</td><td>de</td><td>ru</td><td>fr</td><td>es</td><td>zh</td><td>vi</td><td>tr</td><td>ar</td><td>el</td><td>th</td><td>hi</td><td>sw</td></tr><tr><td rowspan="6">MGSM</td><td>Vanilla</td><td>2.4</td><td>3.6</td><td>3.6</td><td>3.2</td><td>2.4</td><td>-</td><td>-</td><td>-</td><td>-</td><td>2.0</td><td>-</td><td>2.0</td></tr><tr><td>Eng-FT</td><td>22.4</td><td>24.8</td><td>20.4</td><td>22.4</td><td>22.8</td><td>-</td><td>-</td><td>-</td><td>-</td><td>6.8</td><td>-</td><td>2.4</td></tr><tr><td>XLT(Vanilla)</td><td>2.0</td><td>2.8</td><td>2.8</td><td>3.2</td><td>2.8</td><td>-</td><td>-</td><td>-</td><td>-</td><td>2.0</td><td>-</td><td>3.2</td></tr><tr><td>XLT(Eng-FT)</td><td>22.0</td><td>24.0</td><td>22.8</td><td>24.4</td><td>24.2</td><td>-</td><td>-</td><td>-</td><td>-</td><td>5.2</td><td>-</td><td>4.4</td></tr><tr><td>AriMerge</td><td>6.4</td><td>8.0</td><td>2.4</td><td>10.4</td><td>3.2</td><td>-</td><td>-</td><td>-</td><td>-</td><td>11.6</td><td>-</td><td>0.0</td></tr><tr><td>AdaMergeX</td><td>24.8</td><td>26.2</td><td>23.6</td><td>22.4</td><td>22.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>8.0</td><td>-</td><td>7.2</td></tr><tr><td rowspan="6">XCOPA</td><td>Vanilla</td><td>-</td><td>-</td><td>-</td><td>-</td><td>54.4</td><td>54.0</td><td>-</td><td>-</td><td>-</td><td>51.8</td><td>-</td><td>49.0</td></tr><tr><td>Eng-FT</td><td>-</td><td>-</td><td>-</td><td>-</td><td>61.8</td><td>67.2</td><td>-</td><td>-</td><td>-</td><td>52.6</td><td>-</td><td>50.6</td></tr><tr><td>XLT(Vanilla)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>56.8</td><td>52.4</td><td>-</td><td>-</td><td>-</td><td>51.0</td><td>-</td><td>50.0</td></tr><tr><td>XLT(Eng-FT)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>60.6</td><td>70.0</td><td>-</td><td>-</td><td>-</td><td>51.6</td><td>-</td><td>50.4</td></tr><tr><td>AriMerge</td><td>-</td><td>-</td><td>-</td><td>-</td><td>61.0</td><td>69.8</td><td>-</td><td>-</td><td>-</td><td>50.6</td><td>-</td><td>50.0</td></tr><tr><td>AdaMergeX</td><td>-</td><td>-</td><td>-</td><td>-</td><td>61.8</td><td>69.8</td><td>-</td><td>-</td><td>-</td><td>51.8</td><td>-</td><td>52.2</td></tr><tr><td rowspan="6">XNLI</td><td>Vanilla</td><td>27.4</td><td>26.6</td><td>24.0</td><td>20.2</td><td>0.3</td><td>21.5</td><td>14.3</td><td>0.1</td><td>0.3</td><td>0.3</td><td>0.0</td><td>43.0</td></tr><tr><td>Eng-FT</td><td>54.0</td><td>54.0</td><td>58.2</td><td>60.5</td><td>33.5</td><td>47.0</td><td>9.6</td><td>0.8</td><td>5.4</td><td>3.3</td><td>5.2</td><td>31.8</td></tr><tr><td>XLT(Vanilla)</td><td>44.7</td><td>44.4</td><td>39</td><td>36.9</td><td>5.3</td><td>36</td><td>20.6</td><td>0.4</td><td>0.2</td><td>13.9</td><td>0.2</td><td>42.6</td></tr><tr><td>XLT(Eng-FT)</td><td>54.1</td><td>44.3</td><td>44.6</td><td>58.6</td><td>34.0</td><td>43.0</td><td>15.9</td><td>0.0</td><td>1.2</td><td>2.0</td><td>0.9</td><td>33.9</td></tr><tr><td>AriMerge</td><td>28.7</td><td>16.5</td><td>12.8</td><td>21.2</td><td>1.0</td><td>32.1</td><td>16.2</td><td>0.3</td><td>1.8</td><td>0.0</td><td>10.2</td><td>22.8</td></tr><tr><td>AdaMergeX</td><td>57.8</td><td>56.7</td><td>63.1</td><td>62.8</td><td>32.9</td><td>49.2</td><td>10.3</td><td>1.0</td><td>9.1</td><td>13.3</td><td>14.9</td><td>35.9</td></tr><tr><td rowspan="6">XLSum</td><td>Vanilla</td><td>-</td><td>13.4</td><td>12.5</td><td>11.4</td><td>56.0</td><td>22.1</td><td>15.7</td><td>23.5</td><td>-</td><td>14.8</td><td>31.6</td><td>8.1</td></tr><tr><td>Eng-FT</td><td>-</td><td>21.7</td><td>16.1</td><td>11.3</td><td>58.4</td><td>21.2</td><td>16.4</td><td>25.8</td><td>-</td><td>15.6</td><td>32.9</td><td>9.9</td></tr><tr><td>XLT(Vanilla)</td><td>-</td><td>0.6</td><td>2.3</td><td>1.8</td><td>0.5</td><td>1.3</td><td>2.5</td><td>0.8</td><td>-</td><td>0.2</td><td>0.8</td><td>2.1</td></tr><tr><td>XLT(Eng-FT)</td><td>-</td><td>17.8</td><td>5.0</td><td>6.6</td><td>56.8</td><td>13.5</td><td>10.8</td><td>28.9</td><td>-</td><td>13.5</td><td>33.9</td><td>3.9</td></tr><tr><td>AriMerge</td><td>-</td><td>14.5</td><td>8.7</td><td>9.8</td><td>49.8</td><td>12.6</td><td>11.7</td><td>29.8</td><td>-</td><td>17.2</td><td>34.2</td><td>6.5</td></tr><tr><td>AdaMergeX</td><td>-</td><td>21.6</td><td>16.2</td><td>11.9</td><td>58.4</td><td>21.6</td><td>16.7</td><td>25.6</td><td>-</td><td>15.5</td><td>33.9</td><td>11.4</td></tr><tr><td rowspan="6">XQuAD</td><td>Vanilla</td><td>0.0</td><td>0.0</td><td>-</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>-</td></tr><tr><td>Eng-FT</td><td>49.0</td><td>34.1</td><td>-</td><td>48.2</td><td>53.5</td><td>40.9</td><td>17.3</td><td>10.2</td><td>13.9</td><td>31.0</td><td>11.8</td><td>-</td></tr><tr><td>XLT(Vanilla)</td><td>34.8</td><td>14.0</td><td>-</td><td>29.8</td><td>33.1</td><td>21.8</td><td>20.2</td><td>12.0</td><td>8.6</td><td>7.1</td><td>12.1</td><td>-</td></tr><tr><td>XLT(Eng-FT)</td><td>39.1</td><td>26.3</td><td>-</td><td>40.7</td><td>41.2</td><td>33.9</td><td>19.0</td><td>13.8</td><td>13.0</td><td>23.8</td><td>13.2</td><td>-</td></tr><tr><td>AriMerge</td><td>50.7</td><td>31.8</td><td>-</td><td>49.1</td><td>50.2</td><td>42.3</td><td>15.9</td><td>10.4</td><td>12.6</td><td>28.7</td><td>9.7</td><td>-</td></tr><tr><td>AdaMergeX</td><td>50.7</td><td>34.1</td><td>-</td><td>50.0</td><td>53.2</td><td>41.7</td><td>17.3</td><td>10.4</td><td>13.7</td><td>31.8</td><td>13.1</td><td>-</td></tr></table>
|
| 363 |
+
|
| 364 |
+
Table 8: Comprehensive experimental results for both baselines and AdaMergeX are obtained across all datasets in corresponding available languages. The fine-tuning method employed was (IA) $^3$ , with Llama2-7b serving as the backbone model.
|
| 365 |
+
|
| 366 |
+
<table><tr><td>Models</td><td>Method</td><td>de</td><td>ru</td><td>fr</td><td>es</td><td>zh</td><td>vi</td><td>tr</td><td>ar</td><td>el</td><td>th</td><td>hi</td><td>sw</td></tr><tr><td rowspan="6">MGSM</td><td>Vanilla</td><td>2.4</td><td>3.6</td><td>3.6</td><td>3.2</td><td>2.4</td><td>-</td><td>-</td><td>-</td><td>-</td><td>2.0</td><td>-</td><td>2.0</td></tr><tr><td>Eng-FT</td><td>2.0</td><td>2.0</td><td>3.6</td><td>2.4</td><td>1.6</td><td>-</td><td>-</td><td>-</td><td>-</td><td>2.4</td><td>-</td><td>2.0</td></tr><tr><td>XLT(Vanilla)</td><td>2.0</td><td>2.8</td><td>2.8</td><td>3.2</td><td>2.8</td><td>-</td><td>-</td><td>-</td><td>-</td><td>2.0</td><td>-</td><td>3.2</td></tr><tr><td>XLT(Eng-FT)</td><td>0.8</td><td>1.6</td><td>4.8</td><td>4.0</td><td>3.2</td><td>-</td><td>-</td><td>-</td><td>-</td><td>2.8</td><td>-</td><td>2.4</td></tr><tr><td>AriMerge</td><td>0.0</td><td>0.4</td><td>0.4</td><td>0.0</td><td>1.6</td><td>-</td><td>-</td><td>-</td><td>-</td><td>2.0</td><td>-</td><td>0.4</td></tr><tr><td>AdaMergeX</td><td>4.4</td><td>3.6</td><td>4.8</td><td>6.0</td><td>3.6</td><td>-</td><td>-</td><td>-</td><td>-</td><td>2.8</td><td>-</td><td>2.0</td></tr><tr><td rowspan="6">XCOPA</td><td>Vanilla</td><td>-</td><td>-</td><td>-</td><td>-</td><td>54.4</td><td>54.0</td><td>-</td><td>-</td><td>-</td><td>51.8</td><td>-</td><td>49.0</td></tr><tr><td>Eng-FT</td><td>-</td><td>-</td><td>-</td><td>-</td><td>54.8</td><td>54.2</td><td>-</td><td>-</td><td>-</td><td>51.2</td><td>-</td><td>49.8</td></tr><tr><td>XLT(Vanilla)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>56.8</td><td>52.4</td><td>-</td><td>-</td><td>-</td><td>51.0</td><td>-</td><td>50.0</td></tr><tr><td>XLT(Eng-FT)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>56.8</td><td>53.2</td><td>-</td><td>-</td><td>-</td><td>51.4</td><td>-</td><td>49.8</td></tr><tr><td>AriMerge</td><td>-</td><td>-</td><td>-</td><td>-</td><td>53.0</td><td>50.6</td><td>-</td><td>-</td><td>-</td><td>52.2</td><td>-</td><td>50.2</td></tr><tr><td>AdaMergeX</td><td>-</td><td>-</td><td>-</td><td>-</td><td>55.0</td><td>55.2</td><td>-</td><td>-</td><td>-</td><td>52.1</td><td>-</td><td>50.0</td></tr><tr><td rowspan="6">XNLI</td><td>Vanilla</td><td>27.4</td><td>26.6</td><td>24.0</td><td>20.2</td><td>0.3</td><td>21.5</td><td>14.3</td><td>0.1</td><td>0.3</td><td>0.3</td><td>0.0</td><td>43.0</td></tr><tr><td>Eng-FT</td><td>46.4</td><td>45.3</td><td>51.9</td><td>50.7</td><td>1.6</td><td>51.0</td><td>31.4</td><td>0.1</td><td>0.8</td><td>0.0</td><td>0.0</td><td>39.3</td></tr><tr><td>XLT(Vanilla)</td><td>44.7</td><td>44.4</td><td>39.0</td><td>36.9</td><td>5.3</td><td>36.0</td><td>20.6</td><td>0.4</td><td>0.2</td><td>13.9</td><td>0.2</td><td>42.6</td></tr><tr><td>XLT(Eng-FT)</td><td>34.3</td><td>36.8</td><td>36.3</td><td>34.2</td><td>25.4</td><td>34.4</td><td>32.1</td><td>5.2</td><td>3.8</td><td>20.7</td><td>8.0</td><td>34.4</td></tr><tr><td>AriMerge</td><td>42.4</td><td>47.2</td><td>52.9</td><td>49.3</td><td>6.4</td><td>54.5</td><td>49.1</td><td>0.2</td><td>0.5</td><td>0.1</td><td>0.0</td><td>35.5</td></tr><tr><td>AdaMergeX</td><td>45.3</td><td>46.5</td><td>53.0</td><td>54.3</td><td>1.5</td><td>58.8</td><td>41.7</td><td>2.2</td><td>0.9</td><td>0.1</td><td>0.1</td><td>38.4</td></tr><tr><td rowspan="6">XLSum</td><td>Vanilla</td><td>-</td><td>13.4</td><td>12.5</td><td>11.4</td><td>56.0</td><td>22.1</td><td>15.7</td><td>23.5</td><td>-</td><td>14.8</td><td>31.6</td><td>8.1</td></tr><tr><td>Eng-FT</td><td>-</td><td>4.2</td><td>9.0</td><td>6.8</td><td>56.6</td><td>14.7</td><td>13.6</td><td>16.6</td><td>-</td><td>12.5</td><td>32.3</td><td>7.6</td></tr><tr><td>XLT(Vanilla)</td><td>-</td><td>0.6</td><td>2.3</td><td>1.8</td><td>0.5</td><td>1.3</td><td>2.5</td><td>0.8</td><td>-</td><td>0.2</td><td>0.8</td><td>2.1</td></tr><tr><td>XLT(Eng-FT)</td><td>-</td><td>0.6</td><td>3.1</td><td>1.8</td><td>0.4</td><td>1.3</td><td>2.5</td><td>1.1</td><td>-</td><td>0.3</td><td>0.8</td><td>2.1</td></tr><tr><td>AriMerge</td><td>-</td><td>4.8</td><td>6.3</td><td>7.6</td><td>44.1</td><td>9.9</td><td>11.8</td><td>15.4</td><td>-</td><td>13.1</td><td>32.3</td><td>9.4</td></tr><tr><td>AdaMergeX</td><td>-</td><td>14.5</td><td>13.1</td><td>11.5</td><td>55.2</td><td>24.4</td><td>15.3</td><td>23.5</td><td>-</td><td>13.6</td><td>33.4</td><td>9.2</td></tr><tr><td rowspan="6">XQuAD</td><td>Vanilla</td><td>0.0</td><td>0.0</td><td>-</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>-</td></tr><tr><td>Eng-FT</td><td>47.3</td><td>32.8</td><td>-</td><td>47.6</td><td>53.7</td><td>35.1</td><td>28.9</td><td>22.8</td><td>21.9</td><td>26.9</td><td>23.2</td><td>-</td></tr><tr><td>XLT(Vanilla)</td><td>34.8</td><td>14.0</td><td>-</td><td>29.8</td><td>33.1</td><td>21.8</td><td>20.2</td><td>12.0</td><td>8.6</td><td>7.1</td><td>12.1</td><td>-</td></tr><tr><td>XLT(Eng-FT)</td><td>37.1</td><td>16.8</td><td>-</td><td>32.4</td><td>37.6</td><td>25.1</td><td>19.3</td><td>14.0</td><td>10.0</td><td>7.0</td><td>14.1</td><td>-</td></tr><tr><td>AriMerge</td><td>46.0</td><td>32.2</td><td>-</td><td>44.5</td><td>51.2</td><td>35.4</td><td>28.2</td><td>23.4</td><td>20.6</td><td>21.6</td><td>20.7</td><td>-</td></tr><tr><td>AdaMergeX</td><td>48.6</td><td>33.0</td><td>-</td><td>48.2</td><td>56.0</td><td>35.7</td><td>29.3</td><td>25.4</td><td>24.5</td><td>29.2</td><td>24.6</td><td>-</td></tr></table>
|
| 367 |
+
|
| 368 |
+
Table 9: Ablation study on backbone models. Results are evaluated on T5-base.
|
| 369 |
+
|
| 370 |
+
<table><tr><td>Adapters</td><td>Task</td><td>Method</td><td>es</td><td>fr</td><td>Avg.</td></tr><tr><td rowspan="6">LoRA</td><td rowspan="3">XNLI</td><td>Eng-FT</td><td>33.0</td><td>32.9</td><td>33.0</td></tr><tr><td>AriMerge</td><td>34.1</td><td>30.1</td><td>32.1</td></tr><tr><td>AdaMergeX</td><td>37.2</td><td>35.7</td><td>36.5</td></tr><tr><td rowspan="3">XLSum</td><td>Eng-FT</td><td>12.4</td><td>15.3</td><td>13.9</td></tr><tr><td>AriMerge</td><td>13.1</td><td>16.5</td><td>14.8</td></tr><tr><td>AdaMergeX</td><td>14.9</td><td>16.6</td><td>15.8</td></tr><tr><td rowspan="6">(IA)3</td><td rowspan="3">XNLI</td><td>Eng-FT</td><td>38.2</td><td>38.4</td><td>38.3</td></tr><tr><td>AriMerge</td><td>35.6</td><td>36.1</td><td>35.9</td></tr><tr><td>AdaMergeX</td><td>39.3</td><td>42.4</td><td>40.8</td></tr><tr><td rowspan="3">XLSum</td><td>Eng-FT</td><td>13.2</td><td>14.7</td><td>14.0</td></tr><tr><td>AriMerge</td><td>14.3</td><td>15.1</td><td>14.7</td></tr><tr><td>AdaMergeX</td><td>14.2</td><td>16.7</td><td>15.5</td></tr></table>
|
| 371 |
+
|
| 372 |
+
Table 10: Ablation study on adaptive merging method. AdaMergeX (adaptive) represents AdaMergeX with adaptive merging methods, while AdaMergeX (cross) represents AdaMergeX with cross merging methods, i.e., LoRA with merging method of $(\mathrm{IA})^3$ and vice versa. Increase $\uparrow$ and decrease $\downarrow$ are both compared to the baseline method Eng-Tune.
|
| 373 |
+
|
| 374 |
+
<table><tr><td>Adapters</td><td>Tasks</td><td>Method</td><td>es</td><td>vi</td><td>Avg.</td></tr><tr><td rowspan="6">LoRA</td><td rowspan="3">XNLI</td><td>Eng-Tune</td><td>60.5</td><td>47.0</td><td>53.8</td></tr><tr><td>AdaMergeX (adaptive)</td><td>62.8 ↑ 2.3</td><td>49.2 ↑ 2.2</td><td>56.0 ↑ 2.2</td></tr><tr><td>AdaMergeX (cross)</td><td>17.6 ↓ 42.9</td><td>15.4 ↓ 31.6</td><td>16.5 ↓ 37.3</td></tr><tr><td rowspan="3">XQUAD</td><td>Eng-Tune</td><td>48.2</td><td>40.9</td><td>44.6</td></tr><tr><td>AdaMergeX (adaptive)</td><td>50.0 ↑ 1.8</td><td>41.7 ↑ 0.8</td><td>45.9 ↑ 1.3</td></tr><tr><td>AdaMergeX (cross)</td><td>0.0 ↓ 48.2</td><td>0.0 ↓ 40.9</td><td>0.0 ↓ 44.6</td></tr><tr><td rowspan="6">(IA)3</td><td rowspan="3">XNLI</td><td>Eng-Tune</td><td>50.7</td><td>51.0</td><td>50.9</td></tr><tr><td>AdaMergeX (adaptive)</td><td>54.3 ↑ 3.6</td><td>58.8 ↑ 7.8</td><td>56.4 ↑ 5.5</td></tr><tr><td>AdaMergeX (cross)</td><td>50.9 ↑ 0.2</td><td>57.4 ↑ 6.4</td><td>54.2 ↑ 3.1</td></tr><tr><td rowspan="3">XQUAD</td><td>Eng-Tune</td><td>47.6</td><td>35.1</td><td>41.4</td></tr><tr><td>AdaMergeX (adaptive)</td><td>48.2 ↑ 0.6</td><td>35.7 ↑ 0.6</td><td>42.0 ↑ 0.6</td></tr><tr><td>AdaMergeX (cross)</td><td>47.5 ↓ 0.1</td><td>34.9 ↓ 0.2</td><td>41.3 ↓ 0.1</td></tr></table>
|
| 375 |
+
|
| 376 |
+
<table><tr><td>Models</td><td>Method</td><td>de</td><td>ru</td><td>fr</td><td>es</td><td>th</td><td>sw</td><td>Avg.</td></tr><tr><td rowspan="2">XNLI</td><td>Eng-Tune</td><td>63.3</td><td>56.4</td><td>56.6</td><td>58.6</td><td>4.1</td><td>41.5</td><td>46.8</td></tr><tr><td>AdaMergeX</td><td>63.8</td><td>57.2</td><td>58.2</td><td>58.9</td><td>3.7</td><td>41.8</td><td>47.3↑0.5</td></tr><tr><td rowspan="2">XQuAD</td><td>Eng-Tune</td><td>9.8</td><td>8.7</td><td>-</td><td>15.2</td><td>4.4</td><td>-</td><td>9.5</td></tr><tr><td>AdaMergeX</td><td>10.4</td><td>7.8</td><td>-</td><td>21.4</td><td>5.4</td><td>-</td><td>11.2↑1.7</td></tr></table>
|
| 377 |
+
|
| 378 |
+
Table 11: Llama2-7b on LoRA with fine-tuning target modules as $W^{Q}$ , $W^{V}$ and merging target modules as $W^{Q}$ , $W^{V}$ .
|
| 379 |
+
|
| 380 |
+
<table><tr><td>Models</td><td>Method</td><td>de</td><td>ru</td><td>fr</td><td>es</td><td>th</td><td>sw</td><td>Avg.</td></tr><tr><td rowspan="2">XNLI</td><td>Eng-Tune</td><td>54.0</td><td>54.0</td><td>58.2</td><td>60.5</td><td>3.3</td><td>31.8</td><td>43.6</td></tr><tr><td>AdaMergeX</td><td>53.7</td><td>55.6</td><td>60.5</td><td>62.7</td><td>4.9</td><td>33.6</td><td>45.2↑1.6</td></tr><tr><td rowspan="2">XQuAD</td><td>Eng-Tune</td><td>49.0</td><td>34.1</td><td>-</td><td>48.2</td><td>31.0</td><td>-</td><td>40.6</td></tr><tr><td>AdaMergeX</td><td>50.2</td><td>32.9</td><td>-</td><td>48.9</td><td>31.3</td><td>-</td><td>40.8↑0.2</td></tr></table>
|
| 381 |
+
|
| 382 |
+
Table 12: Llama2-7b on LoRA with fine-tuning target modules as $W^{Q}$ , $W^{K}$ , $W^{V}$ , $W^{O}$ , $W_{1}$ , $W_{2}$ and merging target modules as $W^{Q}$ , $W^{K}$ , $W^{V}$ , $W^{O}$ , $W_{1}$ , $W_{2}$ .
|
adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f7f4ccafee09cb58f197cebc24ff9d399eac02afcc3ca95c9874006a2beb7c39
|
| 3 |
+
size 994437
|
adamergexcrosslingualtransferwithlargelanguagemodelsviaadaptiveadaptermerging/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e279687e4382f8fb38d8e4d50bb10a09831d41f398208fdaa1058718fe66de89
|
| 3 |
+
size 516141
|
adaptingsentencelevelautomaticmetricsfordocumentlevelsimplificationevaluation/62862604-1763-4a1b-a361-a1046b1ed4b2_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2bb15c52f3684c0651740cdb122ee672b86c12fbca4559589e6858f31f974e9f
|
| 3 |
+
size 114230
|
adaptingsentencelevelautomaticmetricsfordocumentlevelsimplificationevaluation/62862604-1763-4a1b-a361-a1046b1ed4b2_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c57a1830399b6f6772984e529b8ba96c201e5bf0d90217465a84bd08cb2506e4
|
| 3 |
+
size 134575
|