Add Batch 2a7a78cf-9fb5-42b0-bc9a-22fced734deb
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/25427acc-0095-4e20-ae7a-6d156491d531_content_list.json +3 -0
- 112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/25427acc-0095-4e20-ae7a-6d156491d531_model.json +3 -0
- 112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/25427acc-0095-4e20-ae7a-6d156491d531_origin.pdf +3 -0
- 112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/full.md +500 -0
- 112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/images.zip +3 -0
- 112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/layout.json +3 -0
- abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/bafb35f6-8fe7-40f9-8743-f4d30539c3ef_content_list.json +3 -0
- abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/bafb35f6-8fe7-40f9-8743-f4d30539c3ef_model.json +3 -0
- abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/bafb35f6-8fe7-40f9-8743-f4d30539c3ef_origin.pdf +3 -0
- abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/full.md +378 -0
- abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/images.zip +3 -0
- abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/layout.json +3 -0
- ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/f96ed249-8546-48a4-b23c-b67cd31bf9fc_content_list.json +3 -0
- ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/f96ed249-8546-48a4-b23c-b67cd31bf9fc_model.json +3 -0
- ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/f96ed249-8546-48a4-b23c-b67cd31bf9fc_origin.pdf +3 -0
- ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/full.md +0 -0
- ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/images.zip +3 -0
- ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/layout.json +3 -0
- absevalanagentbasedframeworkforscriptevaluation/5ede6651-5e3a-4a8f-94d6-a388c5a623ea_content_list.json +3 -0
- absevalanagentbasedframeworkforscriptevaluation/5ede6651-5e3a-4a8f-94d6-a388c5a623ea_model.json +3 -0
- absevalanagentbasedframeworkforscriptevaluation/5ede6651-5e3a-4a8f-94d6-a388c5a623ea_origin.pdf +3 -0
- absevalanagentbasedframeworkforscriptevaluation/full.md +570 -0
- absevalanagentbasedframeworkforscriptevaluation/images.zip +3 -0
- absevalanagentbasedframeworkforscriptevaluation/layout.json +3 -0
- academicscancontributetodomainspecializedlanguagemodels/8f961a0e-5519-4e77-ad99-b5d1e1517080_content_list.json +3 -0
- academicscancontributetodomainspecializedlanguagemodels/8f961a0e-5519-4e77-ad99-b5d1e1517080_model.json +3 -0
- academicscancontributetodomainspecializedlanguagemodels/8f961a0e-5519-4e77-ad99-b5d1e1517080_origin.pdf +3 -0
- academicscancontributetodomainspecializedlanguagemodels/full.md +219 -0
- academicscancontributetodomainspecializedlanguagemodels/layout.json +3 -0
- accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/4149d901-7794-481a-b333-412366837bec_content_list.json +3 -0
- accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/4149d901-7794-481a-b333-412366837bec_model.json +3 -0
- accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/4149d901-7794-481a-b333-412366837bec_origin.pdf +3 -0
- accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/full.md +221 -0
- accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/images.zip +3 -0
- accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/layout.json +3 -0
- aceallmbasednegotiationcoachingsystem/009d36b4-85b3-47cc-8be1-d64ca1120806_content_list.json +3 -0
- aceallmbasednegotiationcoachingsystem/009d36b4-85b3-47cc-8be1-d64ca1120806_model.json +3 -0
- aceallmbasednegotiationcoachingsystem/009d36b4-85b3-47cc-8be1-d64ca1120806_origin.pdf +3 -0
- aceallmbasednegotiationcoachingsystem/full.md +791 -0
- aceallmbasednegotiationcoachingsystem/images.zip +3 -0
- aceallmbasednegotiationcoachingsystem/layout.json +3 -0
- acloserlookatmultidimensionalonlinepoliticalincivility/8e194152-6624-404c-bf7d-b3df6189c956_content_list.json +3 -0
- acloserlookatmultidimensionalonlinepoliticalincivility/8e194152-6624-404c-bf7d-b3df6189c956_model.json +3 -0
- acloserlookatmultidimensionalonlinepoliticalincivility/8e194152-6624-404c-bf7d-b3df6189c956_origin.pdf +3 -0
- acloserlookatmultidimensionalonlinepoliticalincivility/full.md +387 -0
- acloserlookatmultidimensionalonlinepoliticalincivility/images.zip +3 -0
- acloserlookatmultidimensionalonlinepoliticalincivility/layout.json +3 -0
- acomparisonoflanguagemodelingandtranslationasmultilingualpretrainingobjectives/979594f1-eb73-4d54-8635-879feb871523_content_list.json +3 -0
- acomparisonoflanguagemodelingandtranslationasmultilingualpretrainingobjectives/979594f1-eb73-4d54-8635-879feb871523_model.json +3 -0
- acomparisonoflanguagemodelingandtranslationasmultilingualpretrainingobjectives/979594f1-eb73-4d54-8635-879feb871523_origin.pdf +3 -0
112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/25427acc-0095-4e20-ae7a-6d156491d531_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ec07eb9c34579559c4a7aa5c73b6df3037eb75e71424769a06f9b63f2ccedd5
|
| 3 |
+
size 113812
|
112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/25427acc-0095-4e20-ae7a-6d156491d531_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dde0e6f1b95e91d15493123203716fa938ee2ee11c57f0110bb1b0b19a242150
|
| 3 |
+
size 143855
|
112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/25427acc-0095-4e20-ae7a-6d156491d531_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:03712a106946fce9b0a915bff6e445b592654911d68f7884a7aa8ed3b5899765
|
| 3 |
+
size 3018671
|
112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/full.md
ADDED
|
@@ -0,0 +1,500 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# $1 + 1 > 2$ : Can Large Language Models Serve as Cross-Lingual Knowledge Aggregators?
|
| 2 |
+
|
| 3 |
+
Yue Huang $^{1*}$ , Chenrui Fan $^{2*}$ , Yuan Li $^{3}$ , Siyuan Wu $^{4}$ , Tianyi Zhou $^{2}$ , Xiangliang Zhang $^{1\dagger}$ , Lichao Sun $^{5}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>University of Notre Dame <sup>2</sup>University of Maryland, College Park <sup>3</sup>University of Cambridge <sup>4</sup>Huazhong University of Science and Technology <sup>5</sup>Lehigh University {yhuang37, xzhang33}@nd.edu cfan42@umd.edu
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Large Language Models (LLMs) have garnered significant attention due to their remarkable ability to process information across various languages. Despite their capabilities, they exhibit inconsistencies in handling identical queries in different languages, presenting challenges for further advancement. This paper introduces a method to enhance the multilingual performance of LLMs by aggregating knowledge from diverse languages. This approach incorporates a low-resource knowledge detector specific to a language, a language selection process, and mechanisms for answer replacement and integration. Our experiments demonstrate notable performance improvements, particularly in reducing language performance disparity. An ablation study confirms that each component of our method significantly contributes to these enhancements. This research highlights the inherent potential of LLMs to harmonize multilingual capabilities and offers valuable insights for further exploration.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Large Language Models (LLMs) are increasingly recognized for their impressive capabilities in natural language processing (NLP). Employed across a variety of domains such as the medical sector (Liu et al., 2023c; Zhang et al., 2023a), data generation (Wu et al., 2024), scientific research (Guo et al., 2023; Li et al., 2024c), and LLM-based agents (Liu et al., 2023b; Guo et al., 2024; Huang et al., 2023b; Chen et al., 2024a), LLMs have demonstrated significant utility. Additionally, recent advancements in LLMs have expanded research (Qin et al., 2024; Li et al., 2024a; Xu et al., 2024b; Chen et al., 2024b), which focuses on enhancing their ability to process multiple languages and thereby increasing their accessibility and relevance across diverse linguistic demographics.
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Figure 1: The top is an example of distinct answers to the same questions in different languages. The bottom is the GPT-4's performance on 300 queries in HalluEval (Li et al., 2023a) of nine different languages.
|
| 17 |
+
|
| 18 |
+
Despite these advancements, LLMs demonstrate inconsistencies when processing queries in different languages with the same meaning (Li et al., 2024d), as evidenced by the results in Figure 1. This inconsistency not only diminishes the efficacy and fairness of LLMs but also signals underlying knowledge conflicts (Xu et al., 2024a) that prevent these models from achieving true intelligence (Liu et al., 2023b; Huang et al., 2023b). Furthermore, such inconsistency can erode trust in LLM applications, particularly when users from varied linguistic backgrounds cannot equally benefit from the technology (Li et al., 2023b).
|
| 19 |
+
|
| 20 |
+
To address the inconsistency problems in LLMs, we propose a novel method by leveraging the intrinsic capabilities of LLMs through integrating knowledge across different languages. Our approach begins with the development of a low-resource knowledge detector. This detector assesses whether a user's query involves knowledge that is underrepresented in the specific language. When the query does not feature low-resource knowledge, it is directly addressed by the LLMs. In contrast, if low-resource knowledge is detected, the LLMs will be required to select the most relevant target language to handle this specific knowledge. Once the target
|
| 21 |
+
|
| 22 |
+
language is selected, the query is translated into this language, and the LLMs generate a response based on the translated query. This response either replaces the original answer or is integrated with it. Finally, the response is translated back to the original language of the query and delivered to the user.
|
| 23 |
+
|
| 24 |
+
We conducted comprehensive experiments using six popular LLMs and five bilingual datasets (specifically, English and Chinese) to evaluate the effectiveness of our proposed method. The experimental results demonstrate that our approach successfully integrates knowledge from different languages, leading to the improvement in overall performance. Importantly, it significantly reduces the performance disparities across languages, thereby addressing the inconsistency issues inherent in LLMs and promoting fairness for downstream applications. Additionally, our ablation study confirms that both the low-resource knowledge detector and the language selection process are crucial to the improvements observed. Overall, our contributions are as follows:
|
| 25 |
+
|
| 26 |
+
- We posed an important challenge on the inconsistency of LLMs in downstream tasks, and the low-resource knowledge in a specific language can be brought from another language.
|
| 27 |
+
- Based on the observation, we propose a method that utilizes the LLMs' internal capability to enhance its performance on datasets in different datasets through a low-resources knowledge detector, language selection process, and answer replacement & integration.
|
| 28 |
+
- We conduct extensive experiments on six popular LLMs and five bilingual datasets. The results show that our proposed method effectively enhances the performance of LLMs by integrating knowledge from different languages and reduce the performance gap in different languages.
|
| 29 |
+
|
| 30 |
+
# 2 Related Work
|
| 31 |
+
|
| 32 |
+
# 2.1 Multilingual LLMs
|
| 33 |
+
|
| 34 |
+
There has been a surge in research and work on Multilingual Large Language Models (MLLMs) (Qin et al., 2024; Li et al., 2024a; Xu et al., 2024b; Chen et al., 2024b; Etxaniz et al., 2023). For instance, the InternLM, proposed by Team (2023), is a multilingual language model that has demonstrated excellent performance on multiple Chinese benchmarks. Similarly, PolyLM (Wei et al., 2023b) is another LLM trained using curriculum learning,
|
| 35 |
+
|
| 36 |
+
surpassing other open-source models in multilingual tasks. Besides the above multilingual LLMs, the popular LLMs also include the ChatGLM series developed by Du et al. (2022) and Zeng et al. (2022), and Baichuan series Yang et al. (2023). To improve model performance on multilingual tasks, Muennighoff et al. (2023) and Zhang et al. (2023b) focus on utilizing multilingual training data to fine-tune the parameters. Our work also connects broadly to cross-lingual methods at inference time. Liu et al. (2024) pointed out that translation into English enhances performance for some multilingual tasks, while native language prompting more effectively addresses culturally and linguistically specific questions. In addition, Huang et al. (2023a) and Qin et al. (2023) introduced cross-lingual prompting to enhance the multilingual capabilities of large language models. They focus on improving logical reasoning and task performance across diverse languages. Pourkamali and Sharifi (2024) proposed Self-Supervised Prompting (SSP), a novel method for in-context learning in low-resource languages that improves performance by using stages of noisy labeling and selective exemplar use.
|
| 37 |
+
|
| 38 |
+
In terms of evaluation, Lai et al. (2023) assessed ChatGPT's performance across 37 different languages. CulturaX (Nguyen et al., 2023) is a multilingual dataset containing 6.3 trillion tokens across 167 languages, aimed at promoting the development of multilingual LLMs. Additionally, M3Exam (Zhang et al., 2023c) introduces a dataset derived from real and official human exam questions, designed for evaluating LLMs in a multilingual, multimodal, and multilevel context. BUFFET consolidates 15 varied tasks across 54 languages into a sequence-to-sequence format, offering a standardized set of few-shot examples and instructions (Asai et al., 2023).
|
| 39 |
+
|
| 40 |
+
# 2.2 Factuality in LLMs
|
| 41 |
+
|
| 42 |
+
One way to improve the factuality of LLMs is the utilization of knowledge graphs (KGs)(Sun et al., 2024b). For instance, Abu-Rasheed et al. (2024) uses knowledge graphs to learn explainable recommendations. Yang et al. (2024b) suggests improving LLMs through the development of knowledge graph-enhanced LLMs, which offers a method to boost the factual reasoning capabilities of LLMs. (Sun et al., 2024a) utilizes the LLM as an agent to interact with and navigate through the KGs, identi
|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
Figure 2: The knowledge domain of a multilingual LLM can be separated into multiple sections (the figure shows two). The language-specific knowledge (pure blue or pure orange) in one language can be utilized for improving the performance in other languages.
|
| 46 |
+
|
| 47 |
+
fying relevant entities and relationships, and conducting reasoning with the knowledge it gathers.
|
| 48 |
+
|
| 49 |
+
Another method to enhance the factual knowledge of LLMs is the utilization of prompt engineering. Previous studies propose various prompt methods such as Chain-of-Thoughts (CoT) (Wei et al., 2023a) and Tree-of-Thoughts (ToT) (Yao et al., 2023). Moreover, some studies use knowledge injection to enhance the domain capability of LLMs (Huang and Sun, 2024; Huang et al., 2024a).
|
| 50 |
+
|
| 51 |
+
# 2.3 Hallucination Mitigation
|
| 52 |
+
|
| 53 |
+
A significant challenge associated with LLMs is their tendency to generate seemingly plausible yet fabricated responses, a phenomenon known as hallucination which is a significant concern in the trustworthiness of LLMs (Huang et al., 2024b, 2023c). To address this issue and prevent misinformation (Huang et al., 2024a), recent research has introduced various hallucination mitigation strategies (Tonmoy et al., 2024). For example, Feng et al. (2024) leverage multi-LLM collaboration to decrease hallucinations in LLM outputs. Additionally, Guan et al. (2024) have developed a novel framework called Knowledge Graph-based Retrofitting (KGR), which integrates LLMs with KGs to minimize factual hallucinations during reasoning. Similarly, Manakul et al. (2023) propose SelfCheckGPT, a sampling method that verifies the accuracy of responses from black-box models without the need for an external database.
|
| 54 |
+
|
| 55 |
+
# 3 Methodology
|
| 56 |
+
|
| 57 |
+
# 3.1 Motivation
|
| 58 |
+
|
| 59 |
+
Our proposed method draws inspiration from the distinct knowledge domains inherent to different languages. As illustrated in Figure 2, language-specific knowledge can serve as supplementary in
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
Figure 3: The average performance of six LLMs in five datasets. We show the accuracy of Chinese and English domain knowledge with the query/answer in Chinese and English.
|
| 63 |
+
|
| 64 |
+
formation for another language. Figure 3 demonstrates that when queries related to English domain knowledge are posed in Chinese, the performance (i.e., accuracy) of LLMs declines compared to those posed in the English language. Furthermore, Figure 8 reveals that LLMs often provide correct answers in only one of two languages for a given query, suggesting the potential to use the correct response to rectify inaccuracies in the other language. These observations underscore the potential to leverage the strengths of each language to enhance LLM performance across different languages. As shown in Figure 4, the proposed method includes three main modules: low-resource knowledge detection, target language selection, and answer replacement & integration.
|
| 65 |
+
|
| 66 |
+
# 3.2 Construction of Low-Resource Dataset
|
| 67 |
+
|
| 68 |
+
We first construct a low-resource dataset to measure current LLMs' multilingual knowledge transfer capacity. We also use this dataset to train our Low-Resource Knowledge Detector in section 3.3. We initial the dataset with the combination of various existing question-answering datasets including TriviaQA (Joshi et al., 2017), CMMLU (Li et al., 2024b), HalluEval (Li et al., 2023a), TruthfulQA (Lin et al., 2022), MKQA (Longpre et al., 2021), XQuAD (Artetxe et al., 2019), LC-QuAD (Trivedi et al., 2017), KgCLUE (Xu et al., 2020). Moreover, we also construct a dataset that uses LLM-powered synthesized data to cover more knowledge and topics in the training corpus (We call it MULTIGEN). The details of the constructed dataset are shown in Appendix C.
|
| 69 |
+
|
| 70 |
+
To label these data items, we first use an LLM-Human collaboration to label the samples as Chinese-specific, English-specific, or common sense. Specifically, to confirm the correctness of the labeling, we infer the GPT-4 twice to label the samples with a temperature of 1.0 to enlarge the
|
| 71 |
+
|
| 72 |
+
potential uncertainty of its output. We then conduct human inspections of the dataset where the labels are inconsistent in two labeling processes, to confirm the labeling and filter out the samples that are too hard or ambiguous for current LLMs. The statistics of the dataset can be found in Table 1.
|
| 73 |
+
|
| 74 |
+
# 3.3 Low-Resource Knowledge Detector
|
| 75 |
+
|
| 76 |
+
The multilingual misalignment stems from the unbalanced training data as the knowledge with low data resources is less likely to be captured by the language model during the pretraining process. For example, queries about the details of Chinese history are not well answered by the model if asked in English as they appear less frequently in the English pretraining corpus. This phenomenon could be improved by fully utilizing the model's inherent capacity. To implement this process, we first adopt a low-resource knowledge detector to identify these low-resource queries and later borrow knowledge from other languages for help.
|
| 77 |
+
|
| 78 |
+
We train a classifier for each source language to identify the low-resource query for that language. This classifier separates the query about common sense and language-specified knowledge(e.g. Spanish query about Spanish culture) from the low-resources query(e.g. Spanish query about Turkish geography). Queries of the former class are fed into the normal pipeline of language generation while the latter queries are to be enhanced by the knowledge of other languages through our design of other modules. Given a query $x$ in the original language $L_{o}$ , the low-resource knowledge detector $F_{L_{o}}$ works as follow:
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
F _ {L _ {o}} (x) = \left\{ \begin{array}{l l} 1, & x \text {i s l o w - r e s o u r c e q u e r y o f} L _ {o} \\ 0, & \text {e l s e} \end{array} \right. \tag {1}
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
We demonstrate in the experiment that a classifier is effective enough to distinguish low-resource queries from others. The construction of the training dataset of $F_{L}$ can be found in subsection 4.1.
|
| 85 |
+
|
| 86 |
+
The method is cost-effective as it does not require the translation of all queries to multiple languages considering that low-resource query is only a small part of user queries. The majority of user queries are related to common sense and knowledge specified in that language and do not need to go through the following process.
|
| 87 |
+
|
| 88 |
+
# Algorithm 1 Proposed Method
|
| 89 |
+
|
| 90 |
+
Require: Query $x$ in original language $L_{o}$
|
| 91 |
+
Ensure: Final answer $a_{\mathrm{final}}$
|
| 92 |
+
|
| 93 |
+
1: Low-Resource Knowledge Detection:
|
| 94 |
+
2: Train classifier $F_{L_o}$ for language $L_o$
|
| 95 |
+
3: isLowResource $\leftarrow F_{L_o}(x)$
|
| 96 |
+
4: if isLowResource == 1 then
|
| 97 |
+
|
| 98 |
+
5: Target Language Selection:
|
| 99 |
+
6: Define prompt $P_{\mathrm{sel}}$ for selecting target language
|
| 100 |
+
7: $L_{t}\gets \mathrm{LLM}(P_{\mathrm{sel}}(x))$
|
| 101 |
+
8: $x^{\prime}\gets \operatorname {Trans}(x,L_{t})$
|
| 102 |
+
9: Answer Generation:
|
| 103 |
+
0: $a_{t}\gets \mathrm{LLM}(x^{\prime})$
|
| 104 |
+
11: $a_{o}\gets \operatorname {Trans}(a_{t},L_{o})$
|
| 105 |
+
12: Answer Integration:
|
| 106 |
+
13: Define prompt $P_{\mathrm{int}}$ for integrating answers
|
| 107 |
+
14: $a_{\mathrm{final}} \gets \mathrm{LLM}(P_{\mathrm{int}}(a_t, a_o))$
|
| 108 |
+
15: else
|
| 109 |
+
16: $a_{\mathrm{final}} \gets \mathrm{LLM}(x)$
|
| 110 |
+
17: end if
|
| 111 |
+
18: return $a_{\text{final}}$
|
| 112 |
+
|
| 113 |
+
# 3.4 Target Language Selection
|
| 114 |
+
|
| 115 |
+
After selecting the low-resource query from the user's input, we later adopt a target language selection module to find the most suitable language for that question(e.g. translating a question in English about Chinese history into Chinese). Answering the query with its most resourceful language would improve output quality in terms of correctness and may offer more useful details to the user. We implement this process by prompting the LLM itself as the selection is model-dependent. Different LLMs may select different target languages due to their pretraining corpus. Given the prompt $P_{\mathrm{sel}}$ to help select the target language, the low-resource query $x$ , the procedure of Target Language Selection is defined as follows:
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
x ^ {\prime} \leftarrow \operatorname {T r a n s} \left(x, \operatorname {L L M} \left(P _ {\operatorname {s e l}} (x)\right)\right), \tag {2}
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
where translator $\mathrm{Trans}(Q,L_t)$ translates the input $Q$ into target language $L_{t}$ , and LLM is the large language model that selects the most suitable language for $x$ with prompt $P_{\mathrm{sel}}$ .
|
| 122 |
+
|
| 123 |
+
# 3.5 Answer Replacement & Integration
|
| 124 |
+
|
| 125 |
+
After translating the original query $x$ to the query in target language $x'$ , we use it to prompt the model for the answer in target language $a_t$ . We simply
|
| 126 |
+
|
| 127 |
+

|
| 128 |
+
Figure 4: The proposed method begins with the query detection of low-resource knowledge powered by a detector. If low-resource knowledge is detected within the queries, LLMs then select the language most likely to yield the best answer. Answer replacement and integration are employed to formulate the final response.
|
| 129 |
+
|
| 130 |
+
translate the answer back to the original language to get the final answer $a_{\mathrm{final}}$ for the user's understanding.
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
a _ {\text {f i n a l}} \leftarrow \operatorname {T r a n s} \left(a _ {t}, L _ {o}\right), \tag {3}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
where $L_{o}$ is the original language of the user's query.
|
| 137 |
+
|
| 138 |
+
We also explore the integration of answers in the scenario of open-ended question answering (the prompt template is shown in Appendix F). We let the LLM combine and integrate the answer in the target language $a_{t}$ and the answer in the original language $a_{o} = \mathrm{LLM}(x)$ :
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
a _ {\text {f i n a l}} = \operatorname {L L M} \left(P _ {\text {i n t}} \left(a _ {t}, a _ {o}\right)\right), \tag {4}
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+
where $P_{\mathrm{int}}$ is the prompt to help LLM integrate between $a_{t}$ and $a_{o}$ , and $a_{\mathrm{final}}$ is the final answer.
|
| 145 |
+
|
| 146 |
+
# 4 Experiments
|
| 147 |
+
|
| 148 |
+
We chose English and Chinese for our experiments primarily due to their broad applicability and the availability of resources. Firstly, most LLMs, particularly open-source ones like the ChatGLM series, perform significantly in English and Chinese. This trend highlights the advanced development and optimization of LLMs for these languages, making them ideal for rigorous testing. Secondly, major LLM benchmarks and datasets predominantly focus on these two languages. For instance, besides English benchmarks or datasets, benchmarks such as HalluQA and AlignBench are primarily designed around English and Chinese, providing a robust framework for evaluating our methods. Lastly, the linguistic features and data availability in English and Chinese ensure comprehensive evaluation and validation of our approaches and suggest that our findings could be extrapolated to other languages. This potential for
|
| 149 |
+
|
| 150 |
+
<table><tr><td>Dataset</td><td>Chinese</td><td>Common</td><td>English</td><td>Total</td><td>Lang.</td></tr><tr><td>TriviaQA</td><td>21</td><td>754</td><td>1040</td><td>1815</td><td>En.</td></tr><tr><td>CMMLU</td><td>1200</td><td>2162</td><td>2751</td><td>6113</td><td>Ch.</td></tr><tr><td>HalluEval</td><td>28</td><td>923</td><td>1033</td><td>1984</td><td>En.</td></tr><tr><td>TruthfulQA</td><td>9</td><td>322</td><td>212</td><td>543</td><td>En.</td></tr><tr><td>MKQA</td><td>71</td><td>315</td><td>1114</td><td>1500</td><td>En.</td></tr><tr><td>XQuAD</td><td>72</td><td>610</td><td>503</td><td>1185</td><td>En.</td></tr><tr><td>LC-QuAD</td><td>2</td><td>640</td><td>345</td><td>987</td><td>En.</td></tr><tr><td>KgCLUE</td><td>1218</td><td>610</td><td>172</td><td>2000</td><td>Ch.</td></tr><tr><td>MULTIGEN</td><td>1095</td><td>1121</td><td>1083</td><td>3299</td><td>En.</td></tr><tr><td>Total</td><td>3716</td><td>7457</td><td>8253</td><td>19426</td><td>/</td></tr></table>
|
| 151 |
+
|
| 152 |
+
Table 1: Dataset statistics of the low-resource knowledge detector. "Lang." is the original language for the dataset.
|
| 153 |
+
|
| 154 |
+
cross-linguistic application supports the broader relevance and utility of our study, choosing English and Chinese as both strategic and impactful.
|
| 155 |
+
|
| 156 |
+
# 4.1 Experiment Setup
|
| 157 |
+
|
| 158 |
+
Training Datasets for Detectors. As we need to train the low-resource detector for each language, for the dataset in English (e.g., TriviaQA) or the dataset in Chinese (e.g., CMMLU, KgCLUE), we translate them to another language (i.e., Chinese or English) through translation API *.
|
| 159 |
+
|
| 160 |
+
Detailed Setting. To ensure the reproducibility of results, the temperature parameter for all LLMs is set to 0. For ChatGPT, GPT-4, and Qwen-turbo, we use the official API. For Yi-34b, we use the API from Replicate†. For ChatGLM3 and Llama3-Chinese, we deploy them locally for inference with a V100 (40G).
|
| 161 |
+
|
| 162 |
+
Test Datasets. We selected five datasets for our study, comprising four pre-existing datasets and one that we developed in-house. The following criteria guided our selection:
|
| 163 |
+
|
| 164 |
+
- The datasets should not predominantly consist of common-sense questions (i.e., questions that
|
| 165 |
+
|
| 166 |
+

|
| 167 |
+
Figure 5: Statistics of the dataset in our experiments.
|
| 168 |
+
|
| 169 |
+
are independent of linguistic background), as this minimizes the potential for LLMs to demonstrate improvement through linguistic knowledge.
|
| 170 |
+
|
| 171 |
+
- The datasets should maintain a balance in difficulty; they should not be overly simplistic or excessively challenging. Datasets that are too easy can lead to inflated performance metrics for LLMs, thereby reducing the potential for meaningful improvement. Conversely, datasets that are too challenging can degrade performance across all linguistic contexts, thereby constraining the opportunity to enhance performance in the target language by leveraging knowledge of additional languages.
|
| 172 |
+
|
| 173 |
+
For all datasets in our study, we select QA-pair samples from them and do not use other extra data to facilitate our evaluation. Totally, we select five datasets for evaluating our method. These include four existing dataset: TriviaQA (Joshi et al., 2017), HaluEval (Li et al., 2023a), HalluQA (Cheng et al., 2023), and WebQA (Li et al., 2016). We show the statistics of the datasets we selected in Figure 5 and the details are shown in Appendix A. In addition to the four datasets mentioned above, we have constructed a bilingual Chinese-English dataset tailored to the Chinese domain. Details of the construction process are provided in Appendix D.
|
| 174 |
+
|
| 175 |
+
Models. We carefully select six popular LLMs including proprietary and open-source LLMs that master both English and Chinese: ChatGPT (OpenAI, 2023a), GPT-4 (OpenAI, 2023b), ChatGLM3 (Zeng et al., 2022; Du et al., 2022), Yi-34b (AI et al., 2024), Qwen-turbo (Bai et al., 2023), and LLama3-Chinese (Ila, 2024).
|
| 176 |
+
|
| 177 |
+
# 4.2 Main Results
|
| 178 |
+
|
| 179 |
+
We evaluate the effectiveness of our proposed method on five benchmark datasets and six popular LLMs mentioned above. Each dataset is translated into a Chinese and an English version for later as
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
Figure 6: The average performance gap on datasets before and after applying our method.
|
| 183 |
+
|
| 184 |
+
sessment. We first infer the models with the queries in the dataset to get the generated answers. We then leverage GPT-4 as the judge model to compare each generated answer with the reference answer in the dataset to see if the model produces a correct output. We calculate the generation accuracy and present the result in Table 2. We mark the result in green where there is a significant improvement of more than $1\%$ and mark the result in red if the accuracy decrease by more than $1\%$ .
|
| 185 |
+
|
| 186 |
+
As can be seen from the table, our method can effectively improve the performance of the model in many scenarios. To be specific, the performance of the GPT-4 model on the HalluEval dataset in Chinese improves significantly from $47.99\%$ to $64.36\%$ . This means there still exists a large crosslingual knowledge gap in advanced models such as GPT-4 and our method successfully leverages the knowledge across languages to enhance the model's performance. It is important to notice that the improvements do not rely on other models or online resources, they exist due to our leverage of the model's inherent capacity.
|
| 187 |
+
|
| 188 |
+
It can also be observed from Table 2 that most improvements happen in the language that is different from that of the original dataset, which is also the part where the models suffer from a weaker performance. The comparison of the cross-lingual performance gap before and after applying our method is shown in Figure 6. The figure showcases that our method could significantly reduce the knowledge gap between languages in all LLMs we evaluate, thus improving the fairness of the application for users of different languages.
|
| 189 |
+
|
| 190 |
+
# 4.3 Ablation Study
|
| 191 |
+
|
| 192 |
+
As our generation pipeline consists of several parts, we conduct an ablation study to validate their effectiveness and expenses.
|
| 193 |
+
|
| 194 |
+
The Impact of the Low-resource Detector. The
|
| 195 |
+
|
| 196 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Lang.</td><td colspan="2">ChatGLM3</td><td colspan="2">ChatGPT</td><td colspan="2">GPT-4</td><td colspan="2">Yi-34b</td><td colspan="2">Qwen-turbo</td><td colspan="2">Llama3-Ch.</td></tr><tr><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td></tr><tr><td rowspan="2">HalluEval</td><td>(en)</td><td>18.03%</td><td>18.03%</td><td>57.98%</td><td>57.84%</td><td>67.13%</td><td>67.13%</td><td>42.86%</td><td>42.72%</td><td>29.31%</td><td>29.31%</td><td>40.67%</td><td>40.67%</td></tr><tr><td>(ch)</td><td>11.23%</td><td>17.34%</td><td>32.07%</td><td>51.40%</td><td>47.99%</td><td>64.36%</td><td>25.10%</td><td>39.67%</td><td>19.35%</td><td>26.09%</td><td>25.35%</td><td>37.19%</td></tr><tr><td rowspan="2">HalluQA</td><td>(en)</td><td>20.00%</td><td>25.95%</td><td>34.27%</td><td>30.90%</td><td>51.89%</td><td>54.05%</td><td>38.38%</td><td>47.03%</td><td>25.97%</td><td>37.57%</td><td>22.83%</td><td>19.57%</td></tr><tr><td>(ch)</td><td>22.16%</td><td>22.16%</td><td>21.91%</td><td>24.16%</td><td>49.73%</td><td>51.35%</td><td>45.95%</td><td>44.86%</td><td>43.65%</td><td>43.09%</td><td>15.22%</td><td>16.30%</td></tr><tr><td rowspan="2">Chinese Domain</td><td>(en)</td><td>9.52%</td><td>20.78%</td><td>41.85%</td><td>42.73%</td><td>56.71%</td><td>58.44%</td><td>33.33%</td><td>55.84%</td><td>27.19%</td><td>46.05%</td><td>30.73%</td><td>24.24%</td></tr><tr><td>(ch)</td><td>32.47%</td><td>32.47%</td><td>41.85%</td><td>41.85%</td><td>59.31%</td><td>59.74%</td><td>63.64%</td><td>63.20%</td><td>62.28%</td><td>61.84%</td><td>18.61%</td><td>18.61%</td></tr><tr><td rowspan="2">triviaQA</td><td>(en)</td><td>36.32%</td><td>36.32%</td><td>90.53%</td><td>90.37%</td><td>94.09%</td><td>94.09%</td><td>79.33%</td><td>79.17%</td><td>59.59%</td><td>59.47%</td><td>77.27%</td><td>77.16%</td></tr><tr><td>(ch)</td><td>21.33%</td><td>31.95%</td><td>54.60%</td><td>82.67%</td><td>82.77%</td><td>91.90%</td><td>59.43%</td><td>75.56%</td><td>41.53%</td><td>52.99%</td><td>43.92%</td><td>65.17%</td></tr><tr><td rowspan="2">WebQA</td><td>(en)</td><td>28.51%</td><td>38.15%</td><td>59.08%</td><td>58.88%</td><td>67.70%</td><td>69.41%</td><td>57.07%</td><td>68.71%</td><td>49.48%</td><td>61.08%</td><td>50.00%</td><td>48.09%</td></tr><tr><td>(ch)</td><td>48.69%</td><td>48.49%</td><td>57.35%</td><td>57.86%</td><td>72.52%</td><td>72.42%</td><td>76.93%</td><td>76.13%</td><td>71.12%</td><td>71.33%</td><td>37.02%</td><td>38.43%</td></tr></table>
|
| 197 |
+
|
| 198 |
+
Table 2: Six LLMs' performance on our proposed method.
|
| 199 |
+
|
| 200 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Lang.</td><td colspan="2">Yi-34b</td><td colspan="2">Qwen-turbo</td><td colspan="2">Llama3-Ch.</td></tr><tr><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td></tr><tr><td rowspan="2">HalluEval</td><td>(en)</td><td>42.86%</td><td>41.75%</td><td>29.31%</td><td>29.59%</td><td>40.67%</td><td>40.67%</td></tr><tr><td>(ch)</td><td>25.10%</td><td>39.81%</td><td>19.35%</td><td>26.51%</td><td>25.35%</td><td>37.33%</td></tr><tr><td rowspan="2">HalluQA</td><td>(en)</td><td>38.38%</td><td>47.03%</td><td>25.97%</td><td>37.57%</td><td>22.83%</td><td>18.48%</td></tr><tr><td>(ch)</td><td>45.95%</td><td>45.95%</td><td>43.65%</td><td>39.78%</td><td>15.22%</td><td>20.65%</td></tr><tr><td>Chinese</td><td>(en)</td><td>33.33%</td><td>57.58%</td><td>27.19%</td><td>48.25%</td><td>30.74%</td><td>24.24%</td></tr><tr><td>Domain</td><td>(ch)</td><td>63.64%</td><td>57.14%</td><td>62.28%</td><td>62.28%</td><td>18.61%</td><td>22.51%</td></tr></table>
|
| 201 |
+
|
| 202 |
+
low-resource detector serves as a filter to sift the language-specific queries from the majority of the queries that involve only commonsense, thus improving efficiency and reducing the expense of the pipeline. As can be observed in Figure 7, a low-resource query detector would significantly reduce the average inference time per sample from more than 9 seconds to less than 6.5 seconds if the ratio of the low-resource queries is 0.05 in the dataset. When the ratio of the low-resource query in the dataset increases, the detector passes more samples into the translation pipeline and increases the average inference time.
|
| 203 |
+
|
| 204 |
+
Another intriguing finding is that the low-resource detector would increase the model performance. As shown in Table 3, the performance of the pipeline is unstable when we remove the low-resource detector. The overall performance would also drop as we observed in Figure 7. This indicates that the detector and LLM itself can be complementary. The full result of the models' performance without the low-resource detector can be found in Table 7.
|
| 205 |
+
|
| 206 |
+
The Impact of the Language Selection Module. The language selection module can choose the proper language to answer the question with
|
| 207 |
+
|
| 208 |
+

|
| 209 |
+
Figure 7: The relationship of time efficiency and error rate. The error rate is the percentage sum of all decreasing in five datasets (value in red on Table 2, Table 3 ( $w/o$ Detection), Table 4 ( $w/o$ Selection)).
|
| 210 |
+
|
| 211 |
+
Table 3: Selected LLMs' performance on the setting without a low-resource detector.
|
| 212 |
+
|
| 213 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Lang.</td><td colspan="2">Yi-34b</td><td colspan="2">Qwen-turbo</td><td colspan="2">Llama3-Ch.</td></tr><tr><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td></tr><tr><td rowspan="2">HalluEval</td><td>(en)</td><td>42.86%</td><td>42.72%</td><td>29.31%</td><td>29.17%</td><td>40.67%</td><td>40.25%</td></tr><tr><td>(ch)</td><td>25.10%</td><td>42.58%</td><td>19.35%</td><td>28.61%</td><td>25.34%</td><td>39.97%</td></tr><tr><td rowspan="2">HalluQA</td><td>(en)</td><td>38.38%</td><td>46.49%</td><td>25.97%</td><td>38.67%</td><td>22.83%</td><td>21.74%</td></tr><tr><td>(ch)</td><td>45.95%</td><td>44.86%</td><td>43.65%</td><td>41.44%</td><td>15.21%</td><td>15.76%</td></tr><tr><td>Chinese</td><td>(en)</td><td>33.33%</td><td>61.04%</td><td>27.19%</td><td>58.33%</td><td>30.74%</td><td>19.48%</td></tr><tr><td>Domain</td><td>(ch)</td><td>63.64%</td><td>60.17%</td><td>62.28%</td><td>58.33%</td><td>18.61%</td><td>18.61%</td></tr></table>
|
| 214 |
+
|
| 215 |
+
Table 4: Selected LLMs' performance on the setting without language selection.
|
| 216 |
+
|
| 217 |
+
model-specific choice. It is also flexible in the multi-lingual setting as the resulting target language can be more than two as we test. However, we still validate its effectiveness in the bi-lingual setting, comparing it with the strategy of using the opposite language when the query is detected as low-resources, and show our result in Table 4. The trade-off between its cost and error can also be found in Figure 7.
|
| 218 |
+
|
| 219 |
+
As we can see from Figure 7, the language selection module only adds a small inference cost
|
| 220 |
+
|
| 221 |
+
<table><tr><td rowspan="2">Type</td><td rowspan="2">Lang.</td><td colspan="2">ChatGPT</td><td colspan="2">GPT-4</td><td colspan="2">ChatGLM3</td><td colspan="2">Yi-34b</td><td colspan="2">Qwen-Turbo</td><td colspan="2">Llama3-Ch.</td></tr><tr><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td><td>Orig.i.</td><td>Impro.</td></tr><tr><td rowspan="2">Integrate</td><td>(ch)</td><td>4.98</td><td>5.26</td><td>6.90</td><td>6.85</td><td>4.15</td><td>4.09</td><td>5.82</td><td>5.91</td><td>5.88</td><td>5.87</td><td>3.71</td><td>3.58</td></tr><tr><td>(en)</td><td>5.92</td><td>6.02</td><td>7.32</td><td>7.54</td><td>4.02</td><td>4.07</td><td>5.86</td><td>6.13</td><td>5.59</td><td>5.78</td><td>4.60</td><td>4.60</td></tr><tr><td rowspan="2">Replace</td><td>(ch)</td><td>4.98</td><td>5.47</td><td>6.90</td><td>6.98</td><td>4.15</td><td>4.16</td><td>5.82</td><td>6.23</td><td>5.88</td><td>6.00</td><td>3.71</td><td>3.93</td></tr><tr><td>(en)</td><td>5.92</td><td>5.97</td><td>7.32</td><td>7.12</td><td>4.02</td><td>4.26</td><td>5.86</td><td>6.25</td><td>5.59</td><td>5.88</td><td>4.60</td><td>4.54</td></tr></table>
|
| 222 |
+
|
| 223 |
+
while significantly improving the model performance. This is due to the existence of the query that is low-resource for both languages, in which switching to the opposite language may make the situation worse. In these situations, the language selection module may pick a third language to better answer the question. The full result of the performance without the language selection module can be found in Table 8.
|
| 224 |
+
|
| 225 |
+
The Comparison between Answer Replacement and Integration. We further investigated the effectiveness of answer replacement and integration strategies. Given that QA setups with a golden answer may not always accommodate answer integration effectively (for example, when the answers in two different languages factually conflict), we opted for a subset in AlignBench (Liu et al., 2023a) as our evaluation dataset. AlignBench provides a comprehensive, multi-dimensional benchmark designed to assess the alignment of LLMs in Chinese, featuring a variety of open-ended questions. To create a bilingual dataset, we translated the Chinese questions into English. For each response evaluation, we employed an LLM-as-a-judge approach, utilizing the prompt template from AlignBench. The LLM judge then assigned an overall score ranging from 1 to 10 to each LLM response. As indicated in Table 5, both replacement and integration methods significantly enhanced the LLMs' performance across most datasets. Direct replacement led to more substantial improvements but also introduced a higher rate of errors, as evidenced by the performance dips in GPT-4 and Llama3-Ch. Interestingly, the integration method showed a more pronounced performance improvement in English responses, suggesting that LLMs may possess stronger capabilities for answer optimization in English than in Chinese (Yang et al., 2024a).
|
| 226 |
+
|
| 227 |
+
The Impact of Different Detection Models. As we build a different low-resource detector for each language, the selection of the tokenizer and classification model would impact the training of the de
|
| 228 |
+
|
| 229 |
+
Table 5: Model performance on AlignBench (Liu et al., 2023a) in the setting of answer replacement and integration.
|
| 230 |
+
|
| 231 |
+
<table><tr><td>Model</td><td>Acc.</td><td>Recall</td><td>Precision</td><td>F1.</td></tr><tr><td>bert-base-chinese (ch)</td><td>86.64</td><td>86.64</td><td>86.68</td><td>86.66</td></tr><tr><td>bert-uncased (en)</td><td>94.98</td><td>94.98</td><td>94.88</td><td>94.91</td></tr><tr><td>Multilingual Bert (ch)</td><td>86.47</td><td>86.47</td><td>86.58</td><td>86.51</td></tr><tr><td>Multilingual Bert (en)</td><td>94.73</td><td>94.73</td><td>94.64</td><td>94.67</td></tr></table>
|
| 232 |
+
|
| 233 |
+
Table 6: The impact of model selection on detector training.
|
| 234 |
+
|
| 235 |
+
tector thereby influencing the overall performance. We adopt language-specific Bert and multi-lingual Bert models to train our low-resource query detector and report the result in Table 6. As shown in the model, using the language-specific model and tokenizer would slightly improve the result of using a multi-lingual model.
|
| 236 |
+
|
| 237 |
+
# 5 Discussion on Other Approach
|
| 238 |
+
|
| 239 |
+
As the confidence of the generated content is related to its entropy during the generation process, a natural idea is to calculate the entropy in different languages and compare them to decide which is the best language to answer the question. This approach is widely used for measuring LLMs' uncertainty and detecting hallucinations (Manakul et al., 2023). However, our trial demonstrates that this approach is infeasible and achieves merely random-guess-level performance when selecting the right language for the given queries.
|
| 240 |
+
|
| 241 |
+
To explore how to leverage entropy-related statistics to select the target language, we train a model $f$ that takes the statistics as input and outputs the selection of the language $Y$ . The statistics we use for a language $l$ include the entropy of the query $E_{Q_l}$ , the entropy of the response $E_{R_l}$ , the perplexity of the query $P_{Q_l}$ , and the perplexity of the response $P_{R_l}$ . We adopt an MLP as the classification model $f:(E_{Q_l},E_{R_l},P_{Q_l},P_{R_l})\to Y$ and train the model on the low-resource query dataset we construct. We trained based on SVM and random forests in Llama2-7b's output. The accuracy is no more than
|
| 242 |
+
|
| 243 |
+
$60\%$ . This is a merely random-guess-level performance when taking the entropy-related statistics as input. We attribute this to the hallucination issue of LLMs, that the model may become overconfident even with the wrong answer (Groot and Valdenegro-Toro, 2024), which indicates LLMs are not calibrated well now (Zhang et al., 2024).
|
| 244 |
+
|
| 245 |
+
# 6 Conclusion
|
| 246 |
+
|
| 247 |
+
This paper presents a method to improve the multilingual capabilities of LLMs by leveraging knowledge from various languages, which includes a low-resource knowledge detector, a process for selecting languages, and answer replacement & integration. Our experiments show significant enhancements in performance, especially in reducing disparities across languages. Moreover, each module in our method contributes to the improvement. Overall, this study underscores the potential of LLMs to unify multilingual functions and provide insights for future research.
|
| 248 |
+
|
| 249 |
+
# Limitations
|
| 250 |
+
|
| 251 |
+
Our method requires training a separate low-resource query detector for each language. This is not convenient as the developer of a certain language should construct a low-resource training set himself, which involves collecting language-specific data. Also, the dataset should be updated with time with the rise of the new language-specific data.
|
| 252 |
+
|
| 253 |
+
# Ethics Statement
|
| 254 |
+
|
| 255 |
+
This study adheres to ethical standards in AI research and development. We acknowledge that while our methods aim to enhance the multilingual capabilities of LLMs, they must be implemented with careful consideration of potential biases. Efforts were made to ensure that the knowledge aggregation does not disproportionately favor any particular language or cultural perspective. We also emphasize transparency in our methodologies and findings to enable scrutiny and replication by the research community. The research was conducted without utilizing any personally identifiable information, thereby safeguarding privacy and upholding data protection standards. We commit to ongoing evaluation of our methods in diverse linguistic settings to address and mitigate any emergent biases or disparities. This research seeks not only to advance technology but also to promote inclusiv
|
| 256 |
+
|
| 257 |
+
ity and fairness in AI applications across different linguistic and cultural groups. In this paper, we utilized AI tools to aid in writing and coding, ensuring that they did not directly contribute to the writing process and that their use adheres to academic standards. Additionally, we ensured that all datasets and benchmarks used in the study comply with their intended purposes and standards.
|
| 258 |
+
|
| 259 |
+
# References
|
| 260 |
+
|
| 261 |
+
2024. Llama3-chinese. https://github.com/LlamaFamily/Llama-Chinese.
|
| 262 |
+
Hasan Abu-Rasheed, Christian Weber, and Madjid Fathi. 2024. Knowledge graphs as context sources for llm-based explanations of learning recommendations.
|
| 263 |
+
01. AI, :, Alex Young, Bei Chen, Chao Li, Chengen Huang, Ge Zhang, Guanwei Zhang, Heng Li, Jiangcheng Zhu, Jianqun Chen, Jing Chang, Kaidong Yu, Peng Liu, Qiang Liu, Shawn Yue, Senbin Yang, Shiming Yang, Tao Yu, Wen Xie, Wenhao Huang, Xiaohui Hu, Xiaoyi Ren, Xinyao Niu, Pengcheng Nie, Yuchi Xu, Yudong Liu, Yue Wang, Yuxuan Cai, Zhenyu Gu, Zhiyuan Liu, and Zonghong Dai. 2024. Yi: Open foundation models by 01.ai.
|
| 264 |
+
Mikel Artetxe, Sebastian Ruder, and Dani Yogatama. 2019. On the cross-lingual transferability of monolingual representations. CoRR, abs/1910.11856.
|
| 265 |
+
Akari Asai, Sneha Kudugunta, Xinyan Velocity Yu, Terra Blevins, Hila Gonen, Machel Reid, Yulia Tsvetkov, Sebastian Ruder, and Hannaneh Hajishirzi. 2023. Buffet: Benchmarking large language models for few-shot cross-lingual transfer. arXiv preprint arXiv:2305.14857.
|
| 266 |
+
Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, Jianxin Ma, Rui Men, Xingzhang Ren, Xuancheng Ren, Chuanqi Tan, Sinan Tan, Jianhong Tu, Peng Wang, Shijie Wang, Wei Wang, Shengguang Wu, Benfeng Xu, Jin Xu, An Yang, Hao Yang, Jian Yang, Shusheng Yang, Yang Yao, Bowen Yu, Hongyi Yuan, Zheng Yuan, Jianwei Zhang, Xingxuan Zhang, Yichang Zhang, Zhenru Zhang, Chang Zhou, Jingren Zhou, Xiaohuan Zhou, and Tianhang Zhu. 2023. Qwen technical report. arXiv preprint arXiv:2309.16609.
|
| 267 |
+
Dongping Chen, Yue Huang, Siyuan Wu, Jingyu Tang, Liuyi Chen, Yilin Bai, Zhigang He, Chenlong Wang, Huichi Zhou, Yiqiang Li, et al. 2024a. Gui-world: A dataset for gui-oriented multimodal llm-based agents. arXiv preprint arXiv:2406.10819.
|
| 268 |
+
Du Chen, Yi Huang, Xiaopu Li, Yongqiang Li, Yongqiang Liu, Haihui Pan, Leichao Xu, Dacheng
|
| 269 |
+
|
| 270 |
+
Zhang, Zhipeng Zhang, and Kun Han. 2024b. Orion-14b: Open-source multilingual large language models.
|
| 271 |
+
Qinyuan Cheng, Tianxiang Sun, Wenwei Zhang, Siyin Wang, Xiangyang Liu, Mozhi Zhang, Junliang He, Mianqiu Huang, Zhangyue Yin, Kai Chen, and Xipeng Qiu. 2023. Evaluating hallucinations in Chinese large language models.
|
| 272 |
+
Zhengxiao Du, Yujie Qian, Xiao Liu, Ming Ding, Jiezhong Qiu, Zhilin Yang, and Jie Tang. 2022. Glm: General language model pretraining with autoregressive blank infilling.
|
| 273 |
+
Julen Etxaniz, Gorka Azkune, Aitor Soroa, Oier Lopez de Lacalle, and Mikel Artetxe. 2023. Do multilingual language models think better in english?
|
| 274 |
+
Shangbin Feng, Weijia Shi, Yike Wang, Wenxuan Ding, Vidhisha Balachandran, and Yulia Tsvetkov. 2024. Don't hallucinate, abstain: Identifying llm knowledge gaps via multi-llm collaboration.
|
| 275 |
+
Chujie Gao, Qihui Zhang, Dongping Chen, Yue Huang, Siyuan Wu, Zhengyan Fu, Yao Wan, Xiangliang Zhang, and Lichao Sun. 2024. The best of both worlds: Toward an honest and helpful large language model. arXiv preprint arXiv:2406.00380.
|
| 276 |
+
Tobias Groot and Matias Valdenegro-Toro. 2024. Overconfidence is key: Verbalized uncertainty evaluation in large language and vision-language models.
|
| 277 |
+
Xinyan Guan, Yanjiang Liu, Hongyu Lin, Yaojie Lu, Ben He, Xianpei Han, and Le Sun. 2024. Mitigating large language model hallucinations via autonomous knowledge graph-based retrofitting. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 18126-18134.
|
| 278 |
+
Taicheng Guo, Xiuying Chen, Yaqi Wang, Ruidi Chang, Shichao Pei, Nitesh V. Chawla, Olaf Wiest, and Xi-angliang Zhang. 2024. Large language model based multi-agents: A survey of progress and challenges.
|
| 279 |
+
Taicheng Guo, kehan Guo, Bozhao Nan, Zhenwen Liang, Zhichun Guo, Nitesh Chawla, Olaf Wiest, and Xiangliang Zhang. 2023. What can large language models do in chemistry? a comprehensive benchmark on eight tasks. In Advances in Neural Information Processing Systems, volume 36, pages 59662-59688. Curran Associates, Inc.
|
| 280 |
+
Haoyang Huang, Tianyi Tang, Dongdong Zhang, Xin Zhao, Ting Song, Yan Xia, and Furu Wei. 2023a. Not all languages are created equal in LLMs: Improving multilingual capability by cross-lingual-thought prompting. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 12365–12394, Singapore. Association for Computational Linguistics.
|
| 281 |
+
Yue Huang, Jiawen Shi, Yuan Li, Chenrui Fan, Siyuan Wu, Qihui Zhang, Yixin Liu, Pan Zhou, Yao Wan,
|
| 282 |
+
|
| 283 |
+
Neil Zhenqiang Gong, et al. 2023b. Metatool benchmark for large language models: Deciding whether to use tools and which to use. arXiv preprint arXiv:2310.03128.
|
| 284 |
+
Yue Huang, Kai Shu, Philip S. Yu, and Lichao Sun. 2024a. From creation to clarification: Chatgpt's journey through the fake news quagmire. In *Compan- ion Proceedings of the ACM Web Conference* 2024, WWW '24, page 513–516, New York, NY, USA. Association for Computing Machinery.
|
| 285 |
+
Yue Huang and Lichao Sun. 2024. Fakegpt: Fake news generation, explanation and detection of large language models.
|
| 286 |
+
Yue Huang, Lichao Sun, Haoran Wang, Siyuan Wu, Qihui Zhang, Yuan Li, Chujie Gao, Yixin Huang, Wenhan Lyu, Yixuan Zhang, et al. 2024b. Position: Trustllm: Trustworthiness in large language models. In International Conference on Machine Learning, pages 20166-20270. PMLR.
|
| 287 |
+
Yue Huang, Qihui Zhang, Lichao Sun, et al. 2023c. Trustgpt: A benchmark for trustworthy and responsible large language models. arXiv preprint arXiv:2306.11507.
|
| 288 |
+
Mandar Joshi, Eunsol Choi, Daniel S. Weld, and Luke Zettlemoyer. 2017. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension.
|
| 289 |
+
Viet Dac Lai, Nghia Trung Ngo, Amir Pouran Ben Vey-seh, Hieu Man, Franck Dernoncourt, Trung Bui, and Thien Huu Nguyen. 2023. Chatgpt beyond english: Towards a comprehensive evaluation of large language models in multilingual learning.
|
| 290 |
+
Chong Li, Wen Yang, Jiajun Zhang, Jinliang Lu, Shaonan Wang, and Chengqing Zong. 2024a. X-instruction: Aligning language model in low-resource languages with self-curated cross-lingual instructions.
|
| 291 |
+
Haonan Li, Yixuan Zhang, Fajri Koto, Yifei Yang, Hai Zhao, Yeyun Gong, Nan Duan, and Timothy Baldwin. 2024b. Cmmlu: Measuring massive multitask language understanding in chinese.
|
| 292 |
+
Junyi Li, Xiaoxue Cheng, Wayne Xin Zhao, Jian-Yun Nie, and Ji-Rong Wen. 2023a. Halueval: A largescale hallucination evaluation benchmark for large language models.
|
| 293 |
+
Peng Li, Wei Li, Zhengyan He, Xuguang Wang, Ying Cao, Jie Zhou, and Wei Xu. 2016. Dataset and neural recurrent sequence labeling model for open-domain factoid question answering.
|
| 294 |
+
Yingji Li, Mengnan Du, Rui Song, Xin Wang, and Ying Wang. 2023b. A survey on fairness in large language models. arXiv preprint arXiv:2308.10149.
|
| 295 |
+
|
| 296 |
+
Yuan Li, Yue Huang, Yuli Lin, Siyuan Wu, Yao Wan, and Lichao Sun. 2024c. I think, therefore i am: Awareness in large language models. arXiv preprint arXiv:2401.17882.
|
| 297 |
+
Yuan Li, Yue Huang, Hongyi Wang, Xiangliang Zhang, James Zou, and Lichao Sun. 2024d. Quantifying ai psychology: A psychometrics benchmark for large language models. arXiv preprint arXiv:2406.17675.
|
| 298 |
+
Stephanie Lin, Jacob Hilton, and Owain Evans. 2022. Truthfulqa: Measuring how models mimic human falsehoods.
|
| 299 |
+
Chaoqun Liu, Wenxuan Zhang, Yiran Zhao, Anh Tuan Luu, and Lidong Bing. 2024. Is translation all you need? a study on solving multilingual tasks with large language models. arXiv preprint arXiv:2403.10258.
|
| 300 |
+
Xiao Liu, Xuanyu Lei, Shengyuan Wang, Yue Huang, Zhuoer Feng, Bosi Wen, Jiale Cheng, Pei Ke, Yifan Xu, Weng Lam Tam, et al. 2023a. Alignbench: Benchmarking chinese alignment of large language models. arXiv preprint arXiv:2311.18743.
|
| 301 |
+
Xiao Liu, Hao Yu, Hanchen Zhang, Yifan Xu, Xuanyu Lei, Hanyu Lai, Yu Gu, Hangliang Ding, Kaiwen Men, Kejuan Yang, Shudan Zhang, Xiang Deng, Aohan Zeng, Zhengxiao Du, Chenhui Zhang, Sheng Shen, Tianjun Zhang, Yu Su, Huan Sun, Minlie Huang, Yuxiao Dong, and Jie Tang. 2023b. Agent-bench: Evaluating llms as agents.
|
| 302 |
+
Zhengliang Liu, Yue Huang, Xiaowei Yu, Lu Zhang, Zihao Wu, Chao Cao, Haixing Dai, Lin Zhao, Yiwei Li, Peng Shu, et al. 2023c. Deid-gpt: Zero-shot medical text de-identification by gpt-4. arXiv preprint arXiv:2303.11032.
|
| 303 |
+
Shayne Longpre, Yi Lu, and Joachim Daiber. 2021. Mkqa: A linguistically diverse benchmark for multilingual open domain question answering.
|
| 304 |
+
Potsawee Manakul, Adian Liusie, and Mark JF Gales. 2023. Selfcheckgpt: Zero-resource black-box hallucination detection for generative large language models. arXiv preprint arXiv:2303.08896.
|
| 305 |
+
Niklas Muennighoff, Thomas Wang, Lintang Sutawika, Adam Roberts, Stella Biderman, Teven Le Scao, M Saiful Bari, Sheng Shen, Zheng-Xin Yong, Hailey Schoelkopf, Xiangru Tang, Dragomir Radev, Alham Fikri Aji, Khalid Almubarak, Samuel Albanie, Zaid Alyafeai, Albert Webson, Edward Raff, and Colin Raffel. 2023. Crosslingual generalization through multitask finetuning.
|
| 306 |
+
Thuat Nguyen, Chien Van Nguyen, Viet Dao Lai, Hieu Man, Nghia Trung Ngo, Franck Dernoncourt, Ryan A. Rossi, and Thien Huu Nguyen. 2023. Culturax: A cleaned, enormous, and multilingual dataset for large language models in 167 languages.
|
| 307 |
+
OpenAI. 2023a. Chatgpt. https://openai.com/product/chatgpt.
|
| 308 |
+
|
| 309 |
+
OpenAI. 2023b. Gpt-4. https://openai.com/gpt-4.
|
| 310 |
+
Nooshin Pourkamali and Shler Ebrahim Sharifi. 2024. Machine translation with large language models: Prompt engineering for persian, english, and russian directions. arXiv preprint arXiv:2401.08429.
|
| 311 |
+
Libo Qin, Qiguang Chen, Fuxuan Wei, Shijue Huang, and Wanxiang Che. 2023. Cross-lingual prompting: Improving zero-shot chain-of-thought reasoning across languages. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 2695–2709, Singapore. Association for Computational Linguistics.
|
| 312 |
+
Libo Qin, Qiguang Chen, Yuhang Zhou, Zhi Chen, Yinghui Li, Lizi Liao, Min Li, Wanxiang Che, and Philip S. Yu. 2024. Multilingual large language model: A survey of resources, taxonomy and frontiers.
|
| 313 |
+
Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Lionel M. Ni, Heung-Yeung Shum, and Jian Guo. 2024a. Think-on-graph: Deep and responsible reasoning of large language model on knowledge graph.
|
| 314 |
+
Kai Sun, Yifan Ethan Xu, Hanwen Zha, Yue Liu, and Xin Luna Dong. 2024b. Head-to-tail: How knowledgeable are large language models (llms)? a.k.a. will llms replace knowledge graphs?
|
| 315 |
+
InternLM Team. 2023. Internlm: A multilingual language model with progressively enhanced capabilities.
|
| 316 |
+
SM Tonmoy, SM Zaman, Vinija Jain, Anku Rani, Vipula Rawte, Aman Chadha, and Amitava Das. 2024. A comprehensive survey of hallucination mitigation techniques in large language models. arXiv preprint arXiv:2401.01313.
|
| 317 |
+
Priyansh Trivedi, Gaurav Maheshwari, Mohnish Dubey, and Jens Lehmann. 2017. Lc-quad: A corpus for complex question answering over knowledge graphs. In International Semantic Web Conference, pages 210–218. Springer.
|
| 318 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed Chi, Quoc Le, and Denny Zhou. 2023a. Chain-of-thought prompting elicits reasoning in large language models.
|
| 319 |
+
Xiangpeng Wei, Haoran Wei, Huan Lin, Tianhao Li, Pei Zhang, Xingzhang Ren, Mei Li, Yu Wan, Zhiwei Cao, Binbin Xie, Tianxiang Hu, Shangjie Li, Binyuan Hui, Bowen Yu, Dayiheng Liu, Baosong Yang, Fei Huang, and Jun Xie. 2023b. Polylm: An open source polyglot large language model.
|
| 320 |
+
Siyuan Wu, Yue Huang, Chujie Gao, Dongping Chen, Qihui Zhang, Yao Wan, Tianyi Zhou, Xiangliang Zhang, Jianfeng Gao, Chaowei Xiao, et al. 2024. Unigen: A unified framework for textual dataset generation using large language models. arXiv preprint arXiv:2406.18966.
|
| 321 |
+
|
| 322 |
+
Liang Xu, Hai Hu, Xuanwei Zhang, Lu Li, Chenjie Cao, Yudong Li, Yechen Xu, Kai Sun, Dian Yu, Cong Yu, Yin Tian, Qianqian Dong, Weitang Liu, Bo Shi, Yiming Cui, Junyi Li, Jun Zeng, Rongzhao Wang, Weijian Xie, Yanting Li, Yina Patterson, Zuoyu Tian, Yiwen Zhang, He Zhou, Shaoweihua Liu, Zhe Zhao, Qipeng Zhao, Cong Yue, Xinrui Zhang, Zhengliang Yang, Kyle Richardson, and Zhenzhong Lan. 2020. CLUE: A Chinese language understanding evaluation benchmark. In Proceedings of the 28th International Conference on Computational Linguistics, pages 4762-4772, Barcelona, Spain (Online). International Committee on Computational Linguistics.
|
| 323 |
+
|
| 324 |
+
Rongwu Xu, Zehan Qi, Cunxiang Wang, Hongru Wang, Yue Zhang, and Wei Xu. 2024a. Knowledge conflicts for llms: A survey.
|
| 325 |
+
|
| 326 |
+
Yuemei Xu, Ling Hu, Jiayi Zhao, Zihan Qiu, Yuqi Ye, and Hanwen Gu. 2024b. A survey on multilingual large language models: Corpora, alignment, and bias.
|
| 327 |
+
|
| 328 |
+
Aiyuan Yang, Bin Xiao, Bingning Wang, Borong Zhang, Ce Bian, Chao Yin, Chenxu Lv, Da Pan, Dian Wang, Dong Yan, et al. 2023. Baichuan 2: Open large-scale language models. arXiv preprint arXiv:2309.10305.
|
| 329 |
+
|
| 330 |
+
Chengrun Yang, Xuezhi Wang, Yifeng Lu, Hanxiao Liu, Quoc V. Le, Denny Zhou, and Xinyun Chen. 2024a. Large language models as optimizers.
|
| 331 |
+
|
| 332 |
+
Linyao Yang, Hongyang Chen, Zhao Li, Xiao Ding, and Xindong Wu. 2024b. Give us the facts: Enhancing large language models with knowledge graphs for fact-aware language modeling.
|
| 333 |
+
|
| 334 |
+
Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. Tree of thoughts: Deliberate problem solving with large language models.
|
| 335 |
+
|
| 336 |
+
Yue Yu, Yuchen Zhuang, Jieyu Zhang, Yu Meng, Alexander Ratner, Ranjay Krishna, Jiaming Shen, and Chao Zhang. 2023. Large language model as attributed training data generator: A tale of diversity and bias.
|
| 337 |
+
|
| 338 |
+
Aohan Zeng, Xiao Liu, Zhengxiao Du, Zihan Wang, Hanyu Lai, Ming Ding, Zhuoyi Yang, Yifan Xu, Wendi Zheng, Xiao Xia, et al. 2022. Glm-130b: An open bilingual pre-trained model. arXiv preprint arXiv:2210.02414.
|
| 339 |
+
|
| 340 |
+
Kai Zhang, Jun Yu, Zhiling Yan, Yixin Liu, Eashan Adhikarla, Sunyang Fu, Xun Chen, Chen Chen, Yuyin Zhou, Xiang Li, et al. 2023a. Biomedgpt: A unified and generalist biomedical generative pre-trained transformer for vision, language, and multimodal tasks. arXiv preprint arXiv:2305.17100.
|
| 341 |
+
|
| 342 |
+
Mozhi Zhang, Mianqiu Huang, Rundong Shi, Linsen Guo, Chong Peng, Peng Yan, Yaqian Zhou, and Xipeng Qiu. 2024. Calibrating the confidence of large language models by eliciting fidelity.
|
| 343 |
+
|
| 344 |
+
Shaolei Zhang, Qingkai Fang, Zhuocheng Zhang, Zhengrui Ma, Yan Zhou, Langlin Huang, Mengyu Bu, Shangtong Gui, Yunji Chen, Xilin Chen, and Yang Feng. 2023b. Bayling: Bridging cross-lingual alignment and instruction following through interactive translation for large language models.
|
| 345 |
+
|
| 346 |
+
Wenxuan Zhang, Sharifah Mahani Aljunied, Chang Gao, Yew Ken Chia, and Lidong Bing. 2023c. M3exam: A multilingual, multimodal, multilevel benchmark for examining large language models.
|
| 347 |
+
|
| 348 |
+
# A Dataset Details
|
| 349 |
+
|
| 350 |
+
- TriviaQA (Joshi et al., 2017) is a reading comprehension dataset that features more than 650,000 question-answer-evidence triples. It consists of lots of question-answer pairs created by trivia aficionados, along with independently collected evidence documents—averaging six per question—that offer robust distant supervision for responding to the queries.
|
| 351 |
+
- HaluEval (Li et al., 2023a) is a benchmark designed to assess how well LLMs hallucinations—unverifiable or incorrect content in their outputs. It includes a collection of generated texts and human-annotated samples that help evaluate the models' performance in detecting such errors.
|
| 352 |
+
- HalluQA (Cheng et al., 2023) is a dataset consisting of 450 carefully crafted adversarial questions that cover various domains, incorporating elements of Chinese historical culture, customs, and social phenomena. It aims to evaluate LLMs on their propensity to produce two types of errors: imitative falsehoods and factual inaccuracies.
|
| 353 |
+
- WebQA (Li et al., 2016) is a large-scale, human-annotated real-world QA dataset, developed to address the scarcity of extensive real-world QA datasets for neural QA systems.
|
| 354 |
+
|
| 355 |
+
# B Experiment Results
|
| 356 |
+
|
| 357 |
+
We show the full experiment results in Table 7, Table 8, and Figure 8.
|
| 358 |
+
|
| 359 |
+
# C Details of Constructed Dataset
|
| 360 |
+
|
| 361 |
+
For the generated dataset, inspired by previous studies (Huang et al., 2023b; Yu et al., 2023), we employed attribute-guided prompting to instruct LLMs to generate relevant questions on specific topics, as illustrated in Table 9. We chose GPT-4 as our generation model because of its exceptional ability to follow instructions. The prompt template is shown in Figure 9. For the generated items, we
|
| 362 |
+
|
| 363 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Lang.</td><td colspan="2">ChatGLM3</td><td colspan="2">ChatGPT</td><td colspan="2">GPT-4</td><td colspan="2">Yi-34b</td><td colspan="2">Qwen-turbo</td><td colspan="2">Llama3-Ch.</td></tr><tr><td>Orig.</td><td>Impro.</td><td>Orig.</td><td>Impro.</td><td>Orig.</td><td>Impro.</td><td>Orig.</td><td>Impro.</td><td>Orig.</td><td>Impro.</td><td>Orig.</td><td>Impro.</td></tr><tr><td rowspan="2">HalluEval</td><td>(en)</td><td>18.03%</td><td>18.03%</td><td>57.98%</td><td>57.84%</td><td>67.13%</td><td>66.99%</td><td>42.86%</td><td>41.75%</td><td>29.31%</td><td>29.59%</td><td>40.67%</td><td>40.67%</td></tr><tr><td>(ch)</td><td>11.23%</td><td>17.34%</td><td>32.07%</td><td>52.38%</td><td>47.99%</td><td>65.05%</td><td>25.10%</td><td>39.81%</td><td>19.35%</td><td>26.51%</td><td>25.35%</td><td>37.33%</td></tr><tr><td rowspan="2">HalluQA</td><td>(en)</td><td>20.00%</td><td>25.41%</td><td>34.27%</td><td>30.90%</td><td>51.89%</td><td>53.51%</td><td>38.38%</td><td>47.03%</td><td>25.97%</td><td>37.57%</td><td>22.83%</td><td>18.48%</td></tr><tr><td>(ch)</td><td>22.16%</td><td>22.70%</td><td>21.91%</td><td>25.28%</td><td>49.73%</td><td>51.89%</td><td>45.95%</td><td>45.95%</td><td>43.65%</td><td>39.78%</td><td>15.22%</td><td>20.65%</td></tr><tr><td>Chinese</td><td>(en)</td><td>9.52%</td><td>21.21%</td><td>41.85%</td><td>42.73%</td><td>56.71%</td><td>57.58%</td><td>33.33%</td><td>57.58%</td><td>27.19%</td><td>48.25%</td><td>30.74%</td><td>24.24%</td></tr><tr><td>Domain</td><td>(ch)</td><td>32.47%</td><td>25.54%</td><td>41.85%</td><td>42.29%</td><td>59.31%</td><td>58.44%</td><td>63.64%</td><td>57.14%</td><td>62.28%</td><td>62.28%</td><td>18.61%</td><td>22.51%</td></tr><tr><td rowspan="2">triviaQA</td><td>(en)</td><td>36.32%</td><td>36.38%</td><td>90.53%</td><td>90.31%</td><td>94.09%</td><td>93.93%</td><td>79.33%</td><td>78.90%</td><td>59.59%</td><td>59.47%</td><td>77.27%</td><td>77.05%</td></tr><tr><td>(ch)</td><td>21.33%</td><td>32.22%</td><td>54.60%</td><td>83.33%</td><td>82.77%</td><td>92.29%</td><td>59.43%</td><td>76.27%</td><td>41.53%</td><td>53.55%</td><td>43.92%</td><td>66.32%</td></tr><tr><td rowspan="2">WebQA</td><td>(en)</td><td>28.51%</td><td>38.96%</td><td>59.08%</td><td>58.98%</td><td>67.70%</td><td>69.61%</td><td>57.07%</td><td>69.71%</td><td>49.48%</td><td>62.11%</td><td>50.00%</td><td>47.08%</td></tr><tr><td>(ch)</td><td>48.69%</td><td>42.07%</td><td>57.35%</td><td>59.29%</td><td>72.52%</td><td>72.32%</td><td>76.93%</td><td>74.12%</td><td>71.12%</td><td>70.70%</td><td>37.02%</td><td>40.54%</td></tr></table>
|
| 364 |
+
|
| 365 |
+
Table 7: Six LLMs' performance on the setting without a low-resource detector.
|
| 366 |
+
|
| 367 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Lang.</td><td colspan="2">ChatGLM3</td><td colspan="2">ChatGPT</td><td colspan="2">GPT-4</td><td colspan="2">Yi-34b</td><td colspan="2">Qwen-turbo</td><td colspan="2">Llama3-Ch.</td></tr><tr><td>Orig.</td><td>Impro.</td><td>Orig.</td><td>Impro.</td><td>Orig.</td><td>Impro.</td><td>Orig.</td><td>Impro.</td><td>Orig.</td><td>Impro.</td><td>Orig.</td><td>Impro.</td></tr><tr><td rowspan="2">HalluEval</td><td>(en)</td><td>18.03%</td><td>18.31%</td><td>57.98%</td><td>57.70%</td><td>67.13%</td><td>66.57%</td><td>42.86%</td><td>42.72%</td><td>29.31%</td><td>29.17%</td><td>40.67%</td><td>40.25%</td></tr><tr><td>(ch)</td><td>11.23%</td><td>18.03%</td><td>32.07%</td><td>56.02%</td><td>47.99%</td><td>66.16%</td><td>25.10%</td><td>42.58%</td><td>19.35%</td><td>28.61%</td><td>25.34%</td><td>39.97%</td></tr><tr><td rowspan="2">HalluQA</td><td>(en)</td><td>20.00%</td><td>25.95%</td><td>34.27%</td><td>32.02%</td><td>51.89%</td><td>53.51%</td><td>38.38%</td><td>46.49%</td><td>25.97%</td><td>38.67%</td><td>22.83%</td><td>21.74%</td></tr><tr><td>(ch)</td><td>22.16%</td><td>23.78%</td><td>21.91%</td><td>23.60%</td><td>49.73%</td><td>51.35%</td><td>45.95%</td><td>44.86%</td><td>43.65%</td><td>41.44%</td><td>15.21%</td><td>15.76%</td></tr><tr><td>Chinese</td><td>(en)</td><td>9.52%</td><td>32.03%</td><td>41.85%</td><td>41.85%</td><td>56.71%</td><td>59.31%</td><td>33.33%</td><td>61.04%</td><td>27.19%</td><td>58.33%</td><td>30.74%</td><td>19.48%</td></tr><tr><td>Domain</td><td>(ch)</td><td>32.47%</td><td>30.74%</td><td>41.85%</td><td>41.41%</td><td>59.31%</td><td>58.44%</td><td>63.64%</td><td>60.17%</td><td>62.28%</td><td>58.33%</td><td>18.61%</td><td>18.61%</td></tr><tr><td rowspan="2">triviaQA</td><td>(en)</td><td>36.32%</td><td>35.78%</td><td>90.53%</td><td>89.09%</td><td>94.09%</td><td>93.54%</td><td>79.33%</td><td>78.73%</td><td>59.59%</td><td>58.80%</td><td>77.27%</td><td>76.12%</td></tr><tr><td>(ch)</td><td>21.33%</td><td>35.94%</td><td>54.60%</td><td>89.15%</td><td>82.77%</td><td>93.22%</td><td>59.43%</td><td>78.18%</td><td>41.53%</td><td>58.41%</td><td>43.92%</td><td>74.92%</td></tr><tr><td rowspan="2">WebQA</td><td>(en)</td><td>28.51%</td><td>44.38%</td><td>59.08%</td><td>59.90%</td><td>67.70%</td><td>70.81%</td><td>57.07%</td><td>73.72%</td><td>49.48%</td><td>67.70%</td><td>50.00%</td><td>46.48%</td></tr><tr><td>(ch)</td><td>48.69%</td><td>46.99%</td><td>57.35%</td><td>58.88%</td><td>72.52%</td><td>71.61%</td><td>76.93%</td><td>74.22%</td><td>71.12%</td><td>69.25%</td><td>37.02%</td><td>41.15%</td></tr></table>
|
| 368 |
+
|
| 369 |
+
Table 8: Six LLMs' performance on the setting without language selection.
|
| 370 |
+
|
| 371 |
+

|
| 372 |
+
|
| 373 |
+

|
| 374 |
+
|
| 375 |
+

|
| 376 |
+
|
| 377 |
+
Figure 8: Performance percentage of LLMs across different datasets. 'All correct' indicates that the LLMs answered correctly in both the Chinese and English datasets. 'All wrong' signifies that the LLMs answered incorrectly in both datasets. 'Half' denotes that the LLMs answered correctly in only one of the datasets.
|
| 378 |
+

|
| 379 |
+
All Correct All Wrong Half
|
| 380 |
+
|
| 381 |
+

|
| 382 |
+
|
| 383 |
+

|
| 384 |
+
|
| 385 |
+
manually evaluate the correctness of its label to ensure the data quality.
|
| 386 |
+
|
| 387 |
+
# D Collection of Chinese Domain Dataset
|
| 388 |
+
|
| 389 |
+
Our Chinese domain dataset consists of 227 items. This dataset encompasses knowledge and information specific to Chinese content, including aspects of geography, history, culture, and more. We sourced the content from a broad range of Chinese social media platforms and search engines. After initial curation, we conducted filtering to remove contents that cannot be accurately translated into English or may result in discrepancies in meaning upon translation, such as phrases from ancient Chinese.
|
| 390 |
+
|
| 391 |
+
# E Answer Evaluation
|
| 392 |
+
|
| 393 |
+
We adopt the LLM-as-a-Judge for answer evaluation in all experiments to reduce the bias that comes from keyword matching. We use GPT-4 for our evaluation due to its exceptional capability and wide application in previous studies (Liu et al., 2023a; Gao et al., 2024). For the five QA datasets, we use the prompt template shown in Figure 10.
|
| 394 |
+
|
| 395 |
+
# F Prompt Template
|
| 396 |
+
|
| 397 |
+
We show the prompt template used in our study in Figure 9, Figure 10, Figure 12, Figure 11, Figure 14, and Figure 13.
|
| 398 |
+
|
| 399 |
+
# G Screenshot of Human Evaluation
|
| 400 |
+
|
| 401 |
+
We show the screenshots of human evaluation in Figure 15 and Figure 16.
|
| 402 |
+
|
| 403 |
+
# Prompt Template
|
| 404 |
+
|
| 405 |
+
Next, I will provide you with a topic, and you will assist me in generating data based on this topic. I need you to generate three categories: questions with an English background, questions with a Chinese background, and questions with no specific language.
|
| 406 |
+
|
| 407 |
+
I will provide you with some examples:
|
| 408 |
+
|
| 409 |
+
Question: Piaget believes that communication has two functions, one is the egocentric function, and the other is?
|
| 410 |
+
|
| 411 |
+
Category: English knowledge
|
| 412 |
+
|
| 413 |
+
Question: With one byte, how many different codes can be generated at most?
|
| 414 |
+
|
| 415 |
+
Category: Knowledge with no specific language
|
| 416 |
+
|
| 417 |
+
Question: What are some famous dishes from Guangdong?
|
| 418 |
+
|
| 419 |
+
Category: Chinese knowledge
|
| 420 |
+
|
| 421 |
+
For each type of question, you need to generate ten, a total of thirty.
|
| 422 |
+
|
| 423 |
+
You need to return the data in JSON format, as follows:
|
| 424 |
+
|
| 425 |
+
```txt
|
| 426 |
+
{ "Question": "Category", "Question": "Category", "Question": "Category", "Question": "Category", ... }
|
| 427 |
+
```
|
| 428 |
+
|
| 429 |
+
Please generate the corresponding data in Chinese.
|
| 430 |
+
|
| 431 |
+
The topic I provide is: [TOPIC]
|
| 432 |
+
|
| 433 |
+
Figure 9: Prompt template for the generated dataset.
|
| 434 |
+
|
| 435 |
+
# Prompt Template
|
| 436 |
+
|
| 437 |
+
As a helpful assistant, your task is now to help me assess the correctness of the provided answers. I will present a question along with its correct answer. Subsequently, I will also provide you with the answer you need to evaluate. If the answer to be evaluated correctly expresses the same meaning as the correct answer or contains the correct answer, then it is right. Ignore case errors. Although there are some errors in certain explanations within the answer, as long as the core answer is correct, the response is considered correct. Return me only one word: 'correct' or 'wrong'.
|
| 438 |
+
|
| 439 |
+
Here is the question and its correct answer:
|
| 440 |
+
|
| 441 |
+
Question: [QUESTION]
|
| 442 |
+
|
| 443 |
+
Answer: [ANSWER]
|
| 444 |
+
|
| 445 |
+
Here is the answer you should evaluate: [RES]
|
| 446 |
+
|
| 447 |
+
Figure 10: Prompt template for LLM-as-a-Judge.
|
| 448 |
+
|
| 449 |
+
# Prompt Template
|
| 450 |
+
|
| 451 |
+
You are a very helpful assistant. I will provide you with a question and the answers in both Chinese and English. You need to integrate the Chinese and English answers to provide the final answer. During the integration process, you need to follow these rules:
|
| 452 |
+
|
| 453 |
+
1. You should primarily refer to the Chinese answer, appropriately integrating parts of the English answer.
|
| 454 |
+
2. If there is a factual conflict between the English and Chinese answers, you must refer to the Chinese answer.
|
| 455 |
+
3. The integrated answer should be of higher quality than the individual answers and better address the corresponding question.
|
| 456 |
+
4. The integrated answer must be all in English
|
| 457 |
+
Question: [[Q]]
|
| 458 |
+
Chinese answer: [[CH_RES]]
|
| 459 |
+
English answer: [[EN_RES]]
|
| 460 |
+
|
| 461 |
+
Figure 11: Prompt template for integration (For the situation when the selected language is English).
|
| 462 |
+
|
| 463 |
+
# Prompt Template
|
| 464 |
+
|
| 465 |
+
你是一个非常有帮助的助手。我将给你提供一个问题,以及该问题的中英文的答案。你需要融合中英文答案,给出最终的答案。在融合答案的过程中,你需要遵循下面的规则:
|
| 466 |
+
|
| 467 |
+
1.你需要着重参考英文的答案,适当融合部分中文的答案。
|
| 468 |
+
2. 如果英文的答案与中文的答案发生事实性冲突,你必须参考英文的答案。
|
| 469 |
+
3. 融合后的答案应该比融合前的答案具有更高的质量,更好地回答对应的问题。
|
| 470 |
+
4. 融合后的答案必须全都是中文。
|
| 471 |
+
|
| 472 |
+
问题:[[Q]]
|
| 473 |
+
|
| 474 |
+
中文答案:[[CH_RES]]
|
| 475 |
+
|
| 476 |
+
英文答案:[EN_RES]
|
| 477 |
+
|
| 478 |
+
Figure 12: Prompt template for integration (For the situation when the selected language is Chinese).
|
| 479 |
+
|
| 480 |
+
# Prompt Template
|
| 481 |
+
|
| 482 |
+
As a helpful assistant, you need to categorize an English question, considering that the background of this question is not common in an English environment. Therefore, you need to choose the most suitable language for this question. You need to analyze the required language context for the question first, and then tell me at the end which language you think is most suitable to answer the question. The question is as follows:
|
| 483 |
+
|
| 484 |
+
Figure 13: Prompt template for language selection (For the query in English).
|
| 485 |
+
|
| 486 |
+
# Prompt Template
|
| 487 |
+
|
| 488 |
+
作为乐于助人的助理,您需要将一个中文问题进行分类,考虑到该问题背景在中文环境中并不常见,因此您需要返回最适合该问题的语言。你需要首先对问题所需要的语言环境进行分析,然后在最后告诉我你返回的最适合回答该问题的语言。问题如下:
|
| 489 |
+
|
| 490 |
+
Figure 14: Prompt template for language selection (For the query in Chinese).
|
| 491 |
+
|
| 492 |
+

|
| 493 |
+
Figure 15: Screenshot of human annotation (1).
|
| 494 |
+
|
| 495 |
+

|
| 496 |
+
Figure 16: Screenshot of human annotation (2).
|
| 497 |
+
|
| 498 |
+
Table 9: Topics used for data generation.
|
| 499 |
+
|
| 500 |
+
<table><tr><td colspan="4">Topic Word</td></tr><tr><td>History</td><td>Literature</td><td>Science</td><td>Art</td></tr><tr><td>Social Sciences</td><td>Technology</td><td>Philosophy</td><td>Geography</td></tr><tr><td>Culture</td><td>Health</td><td>Artificial Intelligence</td><td>Machine Learning</td></tr><tr><td>Big Data</td><td>Blockchain</td><td>Internet of Things</td><td>Environmental Protection</td></tr><tr><td>Sustainable Development</td><td>Energy</td><td>Finance</td><td>Education</td></tr><tr><td>Human Genetics</td><td>Artificial Life</td><td>Space Exploration</td><td>Food Science</td></tr><tr><td>Sports</td><td>Psychology</td><td>Political Science</td><td>Economics</td></tr><tr><td>Sociology</td><td>Law</td><td></td><td></td></tr></table>
|
112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b5371f4a54d86c446f4bbd6e4002f8d8f6feffda6d77a9b651335ee4ac1bb0fc
|
| 3 |
+
size 995634
|
112canlargelanguagemodelsserveascrosslingualknowledgeaggregators/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:02ef28291dffdb79b6cb7ac5f28638b32afa69ac8c3e561375f2b3d45c914426
|
| 3 |
+
size 528074
|
abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/bafb35f6-8fe7-40f9-8743-f4d30539c3ef_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df742ac38de3bb0d9170a4d2385361cc5e1f73dd7fef9cf14e239dacbeb0320f
|
| 3 |
+
size 87365
|
abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/bafb35f6-8fe7-40f9-8743-f4d30539c3ef_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c454184da16cca314624534758d573350c2f40242caac79e15fc2b1893a5b902
|
| 3 |
+
size 107590
|
abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/bafb35f6-8fe7-40f9-8743-f4d30539c3ef_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb0bbe5e43d5f4f30a0f080a0a0bcd3a2bce0bd45434df976c931a0d1c087a8f
|
| 3 |
+
size 296752
|
abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/full.md
ADDED
|
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Bayesian Approach to Harnessing the Power of LLMs in Authorship Attribution
|
| 2 |
+
|
| 3 |
+
Zhengmian Hu $^{1,2*}$ , Tong Zheng $^{1*}$ , Heng Huang $^{1}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Department of Computer Science, University of Maryland, College Park, MD 20742 $^{2}$ Adobe Research
|
| 6 |
+
|
| 7 |
+
huzhengmian@gmail.com, zhengtong12356@gmail.com, heng@umd.edu,
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Authorship attribution aims to identify the origin or author of a document. Traditional approaches have heavily relied on manual features and fail to capture long-range correlations, limiting their effectiveness. Recent advancements leverage text embeddings from pretrained language models, which require significant fine-tuning on labeled data, posing challenges in data dependency and limited interpretability. Large Language Models (LLMs), with their deep reasoning capabilities and ability to maintain long-range textual associations, offer a promising alternative. This study explores the potential of pre-trained LLMs in one-shot authorship attribution, specifically utilizing Bayesian approaches and probability outputs of LLMs. Our methodology calculates the probability that a text entails previous writings of an author, reflecting a more nuanced understanding of authorship. By utilizing only pre-trained models such as Llama-3-70B, our results on the IMDb and blog datasets show an impressive $85\%$ accuracy in one-shot authorship classification across ten authors. Our findings set new baselines for one-shot authorship analysis using LLMs and expand the application scope of these models in forensic linguistics. This work also includes extensive ablation studies to validate our approach.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Authorship attribution, the process of identifying the origin or author of a document, has been a longstanding challenge in forensic linguistics. It has numerous applications, including detecting plagiarism (Alzahrani et al., 2011) and attribution of historical text (Silva et al., 2023). As the digital age progresses, the need for reliable methods to determine authorship has become increasingly important, especially in the context of combating misinformation spread through social media and con
|
| 16 |
+
|
| 17 |
+
ducting forensic analysis. The ability to attribute authorship can also lead to challenges around privacy and anonymity (Juola et al., 2008).
|
| 18 |
+
|
| 19 |
+
The field traces its roots back to the early 19th century (Mechti and Almansour, 2021), with early studies focusing on stylistic features and human expert analysis (Mosteller and Wallace, 1963). Traditional methods often relied on stylometry, which quantifies writing styles (Holmes, 1994), and rule-based computational linguistic methods (Stamatatos, 2009) to deduce authorship. Later, statistical algorithms incorporating extensive text preprocessing and feature engineering (Bozkurt et al., 2007; Seroussi et al., 2014) were introduced to improve accuracy. However, these methods often struggled with capturing long-range dependencies in text and require careful setup of specific thresholds for various indicators, which can be challenging to select effectively. They also involve designing complex, high-quality features, which can be costly and time-consuming.
|
| 20 |
+
|
| 21 |
+
The advent of deep learning has transformed the landscape of authorship attribution by turning the problem into a multi-class classification challenge, allowing for the capture of more features and addressing more complex scenarios effectively (Ruder et al., 2016; Ge et al., 2016; Shrestha et al., 2017; Zhang et al., 2018). However, these neural network (NN) models often lack interpretability and struggle with generalization in cases of limited samples.
|
| 22 |
+
|
| 23 |
+
Despite advancements, the field still faces significant challenges. Obtaining large, balanced datasets that represent multiple authors fairly is difficult, and as the number of authors increases, the accuracy of machine learning models tends to decrease.
|
| 24 |
+
|
| 25 |
+
On the other hand, language models, central to modern NLP applications, define the probability of distributions of words or sequences of words and have traditionally been used to predict and generate plausible language. Yet, for a long time, these
|
| 26 |
+
|
| 27 |
+
models, including high-bias models like bag-of-words and n-gram models, struggled to fit the true probability distributions of natural language. Deep learning's rapid development has enabled orders of magnitude scaling up of computing and data, facilitating the use of more complex models such as Random Forests (Breiman, 2001), character-level CNNs (Zafar et al., 2020), Recurrent Neural Networks (Bagnall, 2015), and Transformer (Vaswani et al., 2017).
|
| 28 |
+
|
| 29 |
+
The recent rapid evolution of Large Language Models (LLMs) has dramatically improved the ability to fit natural language distributions. Trained on massive corpora exceeding 1 trillion tokens, these models have become highly capable of handling a wide range of linguistic tasks, including understanding, generation, and meaningful dialogue (Liang et al., 2022; Bubeck et al., 2023; Zhang et al., 2023a, 2024). They can also explain complex concepts and capture subtle nuances of language. They have been extensively applied in various applications such as chatbots, writing assistants, information retrieval, and translation services. More impressively, LLMs have expanded their utility to novel tasks without additional training, simply through the use of prompts and in-context learning (Brown et al., 2020). This unique ability motivates researchers to adapt LLMs to an even broader range of tasks and topics including reasoning (Wei et al., 2022), theory of mind (Kosinski, 2023) and medical scenario (Singhal et al., 2023).
|
| 30 |
+
|
| 31 |
+
Interestingly, language models have also been explored for authorship attribution (Agun and Yilmazel, 2017; Le and Mikolov, 2014; McCallum, 1999). Recently, research has utilized LLMs for question answering (QA) tasks within the application of authorship verification and authorship attribution (Huang et al., 2024), though these have primarily been tested in small-scale settings. Other approaches have attempted to leverage model embeddings and fine-tuning for authorship attribution, such as using GAN-BERT (Silva et al., 2023) and BERTAA (Fabien et al., 2020). However, these techniques often face challenges with scalability and need retraining when updating candidate authors. Moreover, they require relatively large dataset and multiple epochs of fine-tuning to converge. Given the challenges with current approaches, a natural question arises: How can we harness LLMs for more effective authorship attribution?
|
| 32 |
+
|
| 33 |
+
Two aspects of evidence provide insights to an-
|
| 34 |
+
|
| 35 |
+
swer the above questions. First, recent studies on LLMs have shown that these models possess hallucination problems (Ji et al., 2023). More interestingly, the outputs of LLMs given prompts may disagree with their internal thinking (Liu et al., 2023). Therefore, it is advisable not to rely solely on direct sampling result from LLMs. Second, the training objective of LLMs is to maximize the likelihood of the next token given all previous tokens. This indicates that probability may be a potential indicator for attributing texts to authors.
|
| 36 |
+
|
| 37 |
+
Language models are essentially probabilistic models, but we find the probabilistic nature of LLMs and their potential for authorship identification remains underexploited. Our study seeks to bridge this gap. Specifically, we explore the capability of LLMs to perform one-shot authorship attribution among multiple candidates.
|
| 38 |
+
|
| 39 |
+
We propose a novel approach based on a Bayesian framework that utilizes the probability outputs from LLMs. By deriving text-level log probabilities from token-level log probabilities, we establish a reliable measure of likelihood that a query text was written by a specific author given example texts from each candidate author. We also design suitable prompts to enhance the accuracy of these log probabilities. By calculating the posterior probability of authorship, we can infer the most likely author of a document (Figure 1). Due to the pivotal role of log probability in our algorithm, we coined our approach the "Logprob method."
|
| 40 |
+
|
| 41 |
+
Our new method has three main advantages:
|
| 42 |
+
|
| 43 |
+
- No Need for Fine-Tuning: Our approach aligns the classification task with the pretraining objective, both focusing on computing entailment probability. This avoids any objective mismatch introduced by fine-tuning. Moreover, our method leverages the inherent capabilities of pretrained LLMs and avoids knowledge forgetting that often occurs during fine-tuning.
|
| 44 |
+
- Speed and Efficiency: This approach requires only a single forward pass through the model for each author, making it significantly faster and more cost-effective compared to normal question-answering method of language models which involves sampling a sequence of tokens as answer, with one forward pass for each token generated.
|
| 45 |
+
- No Need for Manual Feature Engineering: The pre-training on diverse data enables LLMs to au
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
Figure 1: Illustration of bayesian authorship attribution using LLM.
|
| 49 |
+
|
| 50 |
+
tomatically capture and utilize subtle nuances in language, thus eliminating the need for manually designing complex features, which can be costly and time-consuming.
|
| 51 |
+
|
| 52 |
+
By applying this technique, we have achieved state-of-the-art results in one-shot learning on the IMDb and blog datasets, demonstrating an impressive $85\%$ accuracy across ten authors. This advancement establishes a new baseline for one-shot authorship analysis and illustrates the robust potential of LLMs in forensic linguistics.
|
| 53 |
+
|
| 54 |
+
# 2 Method
|
| 55 |
+
|
| 56 |
+
Our approach to authorship attribution is based on a Bayesian framework. Given a document whose authorship is unknown, our objective is to identify the most probable author from a set using the capabilities of Large Language Models (LLMs).
|
| 57 |
+
|
| 58 |
+
We consider a scenario where we have a set of authors $\mathcal{A} = \{a_1, \ldots, a_n\}$ and a set of all possible texts $\mathcal{E}$ . Given an authorship attribution problem, where each author $a_i$ has written a set of texts $t_{i,1}, t_{i,2}, \ldots, t_{i,m_i} \in \mathcal{E}$ , we denote the collection of known texts of an author $a_i$ as $t(a_i) = (t_{i,1}, t_{i,2}, \ldots, t_{i,m_i})$ . For an unknown text $u \in \mathcal{E}$ , we aim to determine the most likely author from the set $\mathcal{A}$ .
|
| 59 |
+
|
| 60 |
+
To estimate the author of text $u$ , we use a Bayesian framework where the probability that $u$ was written by author $a_{i}$ is given by:
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
P \left(a _ {i} \mid u\right) = \frac {P \left(u \mid a _ {i}\right) P \left(a _ {i}\right)}{P (u)}. \tag {1}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
Here, $P(a_{i})$ is the prior probability of each author, assumed to be equal unless stated otherwise,
|
| 67 |
+
|
| 68 |
+
making the problem focus primarily on estimating $P(u|a_i)$ .
|
| 69 |
+
|
| 70 |
+
Assuming that each author $a_i$ has a unique writing style represented by a probability distribution $P(\cdot | a_i)$ , texts written by $a_i$ are samples from this distribution. To estimate $P(u | a_i)$ , we consider the independence assumption: texts by the same author are independently and identically distributed (i.i.d.). Thus, the unknown text $u$ is also presumed to be drawn from $P(\cdot | a_i)$ for some author $a_i$ and is independent of other texts from that author.
|
| 71 |
+
|
| 72 |
+
Notice that although texts are independent under the i.i.d. assumption when conditioned on a particular author, there exists a correlation between the unknown text $u$ and the set of known texts $t(a)$ in the absence of knowledge about the author. This correlation can be exploited to deduce the most likely author of $u$ using the known texts.
|
| 73 |
+
|
| 74 |
+
Specifically, we have
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\begin{array}{l} P (u | \boldsymbol {t} (a _ {i})) = \sum_ {a _ {j} \in \mathcal {A}} P (u, a _ {j} | \boldsymbol {t} (a _ {i})) \\ = \sum_ {a _ {j} \in \mathcal {A}} P (u | a _ {j}, \boldsymbol {t} (a _ {i})) P (a _ {j} | \boldsymbol {t} (a _ {i})) \\ = \sum_ {a _ {j} \in \mathcal {A}} P (u | a _ {j}) P \left(a _ {j} \mid \boldsymbol {t} \left(a _ {i}\right)\right), \tag {2} \\ \end{array}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
where the last equality uses the i.i.d. assumption, meaning that when conditioned on a specific author $a_{j}$ , $u$ is independent of other texts.
|
| 81 |
+
|
| 82 |
+
We then introduce the "sufficient training set" assumption, where:
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
P \left(a _ {j} \mid \boldsymbol {t} \left(a _ {i}\right)\right) = \left\{ \begin{array}{l l} 1 & a _ {i} = a _ {j} \\ 0 & a _ {i} \neq a _ {j}. \end{array} \right. \tag {3}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
This implies that the training set is sufficiently
|
| 89 |
+
|
| 90 |
+
# Author 1:
|
| 91 |
+
|
| 92 |
+
# Author 2:
|
| 93 |
+
|
| 94 |
+
Tina Fey is a successful professional who has missed out on the baby wagon. All her friends have families and she has promotions. Desperate for a child she tries a sperm bank but it fails when she is told that she is infertile. In desperation she takes on a surrogate who turns her life upside down. Clearly Tina Fey is the smartest one in the room and she walks through this film seemingly on autopilot and above to everyone around her. What is she doing here? She is somewhere beyond this film and it shows. Its cute and amusing but Fey's demeanor promises something on a different plane then the rest of the movie. I think the best way to explain it, or over explain it would be Cary Grant in a Three Stooges movie. I think Fey can do great things if she wants or can find material that matches her abilities. A good little film.
|
| 95 |
+
|
| 96 |
+
Here is the text from the same author:
|
| 97 |
+
|
| 98 |
+
Barbet Schroeder's portrait of French attorney Jacques Vergès. You've seen him defending people like Klaus Barbie, Carlos the Jackal, Pol Pot as well as other dictators and terrorists. This is a complex story of a complex man and it essentially tells the tale of the man from World War 2 until today. ( And even at 140 minutes the film leaves a great deal out). Here is man of his time, who met and defended with many of the famous and infamous people of the last fifty years. He seems to be a man who generally believes in the right of the oppressed to stand up to their oppressors and to have some one to stand up for them. However this is not just the story of a man who fights for the oppressed but it is also the story of a man entangled in things that will cause many to question just how slick a guy is Vergès. Many of the terrorists and dictators he defends are in fact his friends , and he is not doing it for the love of cause but also for the love of the finer things . I liked the film a great deal . To be certain I was lost as to bits of the history and who some people were , but at the same time the film isn't about the history , so much as Verges moving through it . This is the story of the man , his causes and to some degree his women . What exactly are we to make of Verges ? I don't know , but I sure do think that he and his life make for a compelling tale . I loved that my idea of what Verges is changed . I loved that I was completely confused at the end as to what I thought , confused in a way that only a film that forces you to think can do . In the end I don't know what I think ...
|
| 99 |
+
|
| 100 |
+
In the run-up to the 1972 elections, Washington Post reporter Bob Woodward covers what seems to be a minor break-in at the Democratic Party National headquarters. He is surprised to find top lawyers already on the defence case, and the discovery of names and addresses of Republican fund organisers on the accused further arouses his suspicions. The editor of the Post is prepared to run with the story and assigns Woodward and Carl Bernstein to it. They find the trail leading higher and higher in the Republican Party, and eventually into the White House itself . . . whatever peoples opinions on the Watergate ' scandal', whether they believe it was a big cover up, or the media got a lot wrong, no one can deny just how powerful and interesting this film really is. Pakula directs this very slickly and brings the tension on the two main protagonists very slowly throughout the duration of the movie. Redford and Hoffman work really well together and are given great support from the rest of the cast. the narration works amazingly well and there is good use of mise en scene and connotations. for example there are a few scenes with the t. v screen in the foreground showing Nixon winning his presidential seat again, with ...
|
| 101 |
+
|
| 102 |
+
Here is the text from the same author:
|
| 103 |
+
|
| 104 |
+
Barbet Schroeder's portrait of French attorney Jacques Vergès. You've seen him defending people like Klaus Barbie, Carlos the Jackal, Pol Pot as well as other dictators and terrorists. This is a complex story of a complex man and it essentially tells the tale of the man from World War 2 until today. ( And even at 140 minutes the film leaves a great deal out). Here is man of his time, who met and defended with many of the famous and infamous people of the last fifty years. He seems to be a man who generally believes in the right of the oppressed to stand up to their oppressors and to have some one to stand up for them. However this is not just the story of a man who fights for the oppressed but it is also the story of a man entangled in things that will cause many to question just how slick a guy is Vergès. Many of the terrorists and dictators he defends are in fact his friends , and he is not doing it for the love of cause but also for the love of the finer things . I liked the film a great deal . To be certain I was lost as to bits of the history and who some people were , but at the same time the film isn't about the history , so much as Verges moving through it . This is the story of the man , his causes and to some degree his women . What exactly are we to make of Vergès ? I don't know , but I sure do think that he and his life make for a compelling tale . I loved that my idea of what Vergès is changed . I loved that I was completely confused at the end as to what I thought , confused in a way that only a film that forces you to think can do . In the end I don't know what I think ...
|
| 105 |
+
|
| 106 |
+
Logprob: -958.41
|
| 107 |
+
|
| 108 |
+
Most likely author:
|
| 109 |
+
|
| 110 |
+
-964.51
|
| 111 |
+
|
| 112 |
+
X
|
| 113 |
+
|
| 114 |
+
Figure 2: Example of prompt construction and authorship attribution based on log probabilities. The logprob is computed on the orange part, which represents the text from unknown author.
|
| 115 |
+
|
| 116 |
+
comprehensive to unambiguously differentiate authors, leading to:
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
P (u | \boldsymbol {t} (a _ {i})) = P (u | a _ {j}), \tag {4}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where $a_{j}$ is the assumed true author of text $u$
|
| 123 |
+
|
| 124 |
+
We use Large Language Models (LLMs) to estimate $P(u|t(a_i))$ , which represents the probability that a new text $u$ was written by the author of a given set of texts $t(a_i)$ .
|
| 125 |
+
|
| 126 |
+
The probability nature of language models means that they typically calculate the probability of a token or a sequence of tokens given prior context. For a vocabulary set $\Sigma$ , the input to a language model might be a sequence of tokens $x_{1},\ldots ,x_{m}\in \Sigma$ , and the model's output would be the probability distribution $P_{\mathrm{LLM}}(\cdot |x_1,\dots,x_m)$ typically stored in logarithmic scale for numerical stability.
|
| 127 |
+
|
| 128 |
+
When using an autoregressive language model, we can measure not only the probability of the next token but also the probability of a subsequent sequence of tokens. For instance, if we have a prompt consisting of tokens $x_{1},\ldots ,x_{m}\in \Sigma$ , and we want to measure the probability of a sequence
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
\begin{array}{l} y _ {1}, \dots , y _ {s} \in \Sigma , \text {w e c a l c u l a t e :} \\ P _ {\mathrm {L L M}} (y _ {1}, \dots , y _ {s} | x _ {1}, \dots , x _ {m}) \\ = \prod_ {i = 1} ^ {s} P _ {\mathrm {L L M}} \left(y _ {i} \mid x _ {1}, \dots , x _ {m}, y _ {1}, \dots , y _ {i - 1}\right). \tag {5} \\ \end{array}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
To estimate $P(u|t(a_i))$ for authorship attribution, we define:
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
\begin{array}{l} P (u | \mathbf {t} (a _ {i})) \tag {6} \\ = P _ {\text {L L M}} (u | \text {p r o m p t} _ {-} \text {c o n s t r u c t i o n} (t (a _ {i}))). \\ \end{array}
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+
The prompt construction can vary, providing flexibility in how we use the model to estimate probabilities. Our method involves constructing a prompt steering the LLM uses to predict the likelihood that the unknown text was written by the same author (Figure 2).
|
| 141 |
+
|
| 142 |
+
In summary, our approach is straightforward and simple. By leveraging the capabilities of Large Language Models, we calculate the likelihood that an unknown text originates from a known author based on existing samples of their writing. This
|
| 143 |
+
|
| 144 |
+
probability assessment allows us to identify the most likely author from a set without the need for fine-tuning or feature engineering.
|
| 145 |
+
|
| 146 |
+
# 3 Experimental Setups
|
| 147 |
+
|
| 148 |
+
# 3.1 Models & Baselines
|
| 149 |
+
|
| 150 |
+
Models We selected two widely-used LLM families: 1) LLaMA family, which includes LLaMA2 (Touvron et al., 2023), LLaMA-3, CodeLLaMA (Roziere et al., 2023), available in various parameter sizes and configurations, with some models specifically fine-tuned for dialogue use cases; 2) the GPT family (Brown et al., 2020), featuring GPT-3.5-Turbo and GPT-4-Turbo (Achiam et al., 2023), where we specifically used versions gpt4-turbo-2024-04-09 and gpt-3.5-turbo-0125. The LLaMA family models were deployed using the vLLM framework (Kwon et al., 2023) if used for Logprob method and are deployed on Azure if used for question-answering. Apart from Table 1, all ablation studies of Logprob method uses LLaMA3-70B model.
|
| 151 |
+
|
| 152 |
+
Baselines We chose two types of baselines for comparison. 1) embedding-based methods such as BertAA (Fabien et al., 2020) and GAN-BERT (Silva et al., 2023), which require training or finetuning, 2) LLM-based methods such as those described in (Huang et al., 2024), which utilize LLMs for authorship attribution tasks through a question-answering (QA) approach.
|
| 153 |
+
|
| 154 |
+
# 3.2 Evaluations
|
| 155 |
+
|
| 156 |
+
Datasets We evaluated our method on two widely used author attribution datasets: 1) IMDB62 dataset, a truncated version of IMDB dataset (Seroussi et al., 2014) and 2) Blog Dataset (Schler et al., 2006). IMDB62 dataset comprises 62k movie reviews from 62 authors, with each author contributing 1000 samples. Additionally, it also provides some extra information such as the rating score. The Blog dataset, contains 681k blog comments, each with an assigned authorID. Besides the raw text and authorID, each entry includes extra information such as gender and age. Both datasets are accessible via HuggingFace.
|
| 157 |
+
|
| 158 |
+
Benchmark Construction Unlike fixed author sets used in many previous studies, we constructed a random author set for each test to minimize variance. By default, unless specified otherwise, each experiment in our experiments involved a 10-author
|
| 159 |
+
|
| 160 |
+
one-shot setting, and we conducted 100 tests for each experiment to reduce variance. Each test involved the following steps: 1) Ten candidate authors were randomly selected. 2) For each author, one (or n for n-shot) article was randomly selected as the training set. 3) One author was randomly selected from the ten candidates as the test author. 4) One article not in the training set was randomly selected from the test author's articles as the test set (with size of 1). 5) We run the authorship attribution algorithm to classify the test article into 10 categories.
|
| 161 |
+
|
| 162 |
+
Our evaluation pipeline can avoid potential biases from fixed author sets and better measure the efficacy of LLMs in authorship attribution tasks. We also share our pipeline for fair evaluations of future related works.
|
| 163 |
+
|
| 164 |
+
Notably, aforementioned pipeline is suitable for non-training based methods like ours and QA approaches. However, for training-based methods such as embedding approaches, each train-test split is followed by a retraining, demanding significant computational resources. Therefore, in this work, we directly cited scores from the original papers.
|
| 165 |
+
|
| 166 |
+
Evaluation Metrics We adopt three metrics: top-1, top-2 and top-5 accuracies. Specifically, top k accuracy is computed as follows:
|
| 167 |
+
|
| 168 |
+
$$
|
| 169 |
+
\mathrm {T o p} k \mathrm {A c c u r a c y} = \frac {\mathrm {N u m} _ {\mathrm {c o r r e c t}} ^ {\mathrm {k}}}{\mathrm {N u m} _ {\mathrm {a l l}}}, \qquad (7)
|
| 170 |
+
$$
|
| 171 |
+
|
| 172 |
+
where $\mathrm{Num}_{\mathrm{correct}}^{\mathrm{k}}$ represents the number of tests where the actual author is among the top k predictions, and $\mathrm{Num}_{\mathrm{all}}$ represents the total number of tests.
|
| 173 |
+
|
| 174 |
+
# 4 Experiments
|
| 175 |
+
|
| 176 |
+
Firstly, we evaluate different methods for author attribution in Section 4.1, noting that our Log-prob method significantly outperformed QA-based methods in accuracy and stability across datasets. Then, we study the impact of increasing candidate numbers on performance in Section 4.2, where our method maintained high accuracy despite a larger pool of candidates. Next, in Section 4.3, we analyze prompt sensitivity, concluding that while prompt use is crucial, variations in prompt design did not significantly affect the performance. Further, in Section 4.4, we explore bias in author attribution and in Section 4.5, we measure performance variations across different subgroups. Finally, in
|
| 177 |
+
|
| 178 |
+
Section 4.6, we compared the efficiency of different author attribution methods.
|
| 179 |
+
|
| 180 |
+
# 4.1 Author Attribution Performance
|
| 181 |
+
|
| 182 |
+
Table 1 shows the main results for different methods on the IMDB62 and Blog datasets concerning authorship attribution capabilities. We make the following observations:
|
| 183 |
+
|
| 184 |
+
- LLMs with QA-based methods cannot perform author attribution tasks effectively. For example, GPT-4-Turbo can only achieve a top-1 accuracy of $34\%$ on the IMDB62 dataset and $62\%$ on the Blog dataset. Notably, there are two interesting phenomena: 1) GPT-4-Turbo and GPT-3.5-Turbo exhibit inconsistent higher accuracy across different datasets, highlighting inherent instability in the prompt-based approach. 2) Older LLMs with smaller context window lengths are unable to perform author attribution due to the prompt exceeding the context window. These phenomena indicate that QA methods are not a good option for enabling LLMs to conduct author attribution tasks effectively.
|
| 185 |
+
- Our Logprob method helps LLMs perform author attribution tasks more effectively. With LLaMA-3-70B, we achieved top-1 accuracy of $85\%$ , and both top-2 and top-5 accuracies were even higher. This suggests that LLMs equipped with our method can effectively narrow down large candidate sets. Additionally, two another things worth noting are that 1) LLMs with the Logprob method exhibit more stable performance across both tasks, something QA methods struggle with, and 2) LLMs with Logprob can conduct authorship attribution tasks with lower requirements for context window length. For instance, LLaMA-2-70B-Chat with the Logprob method can handle authorship attribution, whereas the same model with a QA approach fails when the collective text of 10 authors exceeds the context window length. These findings highlight the superiority of our Logprob method.
|
| 186 |
+
- Training-free method can achieve comparable or even superior performance to training-based methods. The Blog dataset showed higher top-1 accuracy with LLaMA + Logprob compared to GAN-BERT and BertAA. While the IMDB62 dataset exhibited lower performance relative to embedding-based methods, it is important to note that Logprob achieves this as a
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
Figure 3: Accuracy vs. number of candidates.
|
| 190 |
+
|
| 191 |
+
one-shot method, whereas embedding-based approaches require much more data for training to converge. This demonstrates that Logprob can more effectively capture the nuances necessary for authorship attribution.
|
| 192 |
+
|
| 193 |
+
# 4.2 Performance vs. Number of Candidates
|
| 194 |
+
|
| 195 |
+
One of the challenges in authorship attribution is the difficulty in correctly identifying the author as the number of candidates increases, which generally leads to decreased accuracy. Figure 3 shows the author attribution performance across different candidate counts on the IMDB62 dataset. We made the following observations:
|
| 196 |
+
|
| 197 |
+
- First, performance indeed decreases as the number of candidates increases.
|
| 198 |
+
- Second, across all settings, all metrics maintain relatively high scores. For example, in the setting with 50 candidates, our method achieved $76\%$ top-1 accuracy, $84\%$ top-2 accuracy, and $87\%$ top-5 accuracy.
|
| 199 |
+
- Third, top-2 and top-5 accuracies are more stable compared to top-1 accuracy. The model may not always place the correct author at the top, but it often includes the correct author within the top few predictions. This attribute is also crucial as it allows the narrowing down of a large pool of candidates to a smaller subset of likely candidates.
|
| 200 |
+
|
| 201 |
+
# 4.3 Analysis of Prompt Sensitivity
|
| 202 |
+
|
| 203 |
+
Our method relies on suitable prompt as in Figure 2. Here, we discuss the sensitivity of our accuracy to different prompt constructions in Table 2. We made the following observations:
|
| 204 |
+
|
| 205 |
+
- Using prompts is essential for enhancing the accuracy of our method (#1 vs. #2). This phenomenon is aligned with previous studies
|
| 206 |
+
|
| 207 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Model</td><td colspan="5">IMDB62 Dataset</td><td colspan="5">BLOG Dataset</td></tr><tr><td>#Candidate</td><td>n-Shot</td><td>Top 1 Acc.</td><td>Top 2 Acc.</td><td>Top 5 Acc.</td><td>#Candidate</td><td>n-Shot</td><td>Top 1 Acc.</td><td>Top 2 Acc.</td><td>Top 5 Acc.</td></tr><tr><td rowspan="12">LogProb</td><td>LLaMA-2-7B</td><td>10</td><td>1</td><td>80.0 ± 4.0</td><td>88.0 ± 3.3</td><td>97.0 ± 1.7</td><td>10</td><td>1</td><td>79.0 ± 4.1</td><td>84.0 ± 3.7</td><td>98.0 ± 1.4</td></tr><tr><td>LLaMA-2-7B-Chat</td><td>10</td><td>1</td><td>68.0 ± 4.7</td><td>80.0 ± 4.0</td><td>88.0 ± 3.3</td><td>10</td><td>1</td><td>69.0 ± 4.6</td><td>78.0 ± 4.1</td><td>89.0 ± 3.1</td></tr><tr><td>LLaMA-2-13B</td><td>10</td><td>1</td><td>84.0 ± 3.7</td><td>88.0 ± 3.3</td><td>100.0 ± 0.0</td><td>10</td><td>1</td><td>81.0 ± 3.9</td><td>86.0 ± 3.5</td><td>94.0 ± 2.4</td></tr><tr><td>LLaMA-2-70B</td><td>10</td><td>1</td><td>88.0 ± 3.3</td><td>94.0 ± 2.4</td><td>99.0 ± 1.0</td><td>10</td><td>1</td><td>88.0 ± 3.3</td><td>90.0 ± 3.0</td><td>95.0 ± 2.2</td></tr><tr><td>LLaMA-2-70B-Chat</td><td>10</td><td>1</td><td>79.0 ± 4.1</td><td>85.0 ± 3.6</td><td>95.0 ± 2.2</td><td>10</td><td>1</td><td>83.0 ± 3.8</td><td>85.0 ± 3.6</td><td>97.0 ± 1.7</td></tr><tr><td>Code-LLaMA-7B</td><td>10</td><td>1</td><td>71.0 ± 4.5</td><td>84.0 ± 3.7</td><td>96.0 ± 2.0</td><td>10</td><td>1</td><td>78.0 ± 4.1</td><td>84.0 ± 3.7</td><td>94.0 ± 2.4</td></tr><tr><td>Code-LLaMA-13B</td><td>10</td><td>1</td><td>70.0 ± 4.6</td><td>84.0 ± 3.7</td><td>98.0 ± 1.4</td><td>10</td><td>1</td><td>77.0 ± 4.2</td><td>85.0 ± 3.6</td><td>92.0 ± 2.7</td></tr><tr><td>Code-LLaMA-34B</td><td>10</td><td>1</td><td>75.0 ± 4.3</td><td>84.0 ± 3.7</td><td>98.0 ± 1.4</td><td>10</td><td>1</td><td>78.0 ± 4.1</td><td>83.0 ± 3.8</td><td>94.0 ± 2.4</td></tr><tr><td>LLaMA-3-8B</td><td>10</td><td>1</td><td>82.0 ± 3.8</td><td>89.0 ± 3.1</td><td>98.0 ± 1.4</td><td>10</td><td>1</td><td>84.0 ± 3.7</td><td>89.0 ± 3.1</td><td>95.0 ± 2.2</td></tr><tr><td>LLaMA-3-8B-Instruct</td><td>10</td><td>1</td><td>69.0 ± 4.6</td><td>77.0 ± 4.2</td><td>90.0 ± 3.0</td><td>10</td><td>1</td><td>68.0 ± 4.7</td><td>77.0 ± 4.2</td><td>90.0 ± 3.0</td></tr><tr><td>LLaMA-3-70B</td><td>10</td><td>1</td><td>85.0 ± 3.6</td><td>93.0 ± 2.6</td><td>98.0 ± 1.4</td><td>10</td><td>1</td><td>82.0 ± 3.8</td><td>88.0 ± 3.3</td><td>95.0 ± 2.2</td></tr><tr><td>LLaMA-3-70B-Instruct</td><td>10</td><td>1</td><td>79.0 ± 4.1</td><td>89.0 ± 3.1</td><td>99.0 ± 1.0</td><td>10</td><td>1</td><td>79.0 ± 4.1</td><td>87.0 ± 3.4</td><td>96.0 ± 2.0</td></tr><tr><td rowspan="4">QA</td><td>LLaMA-2-70B-Chat</td><td>10</td><td>1</td><td>Failed</td><td>-</td><td>-</td><td>10</td><td>1</td><td>Failed</td><td>-</td><td>-</td></tr><tr><td>LLaMA-3-70B-Instruct</td><td>10</td><td>1</td><td>31.0 ± 4.6</td><td>-</td><td>-</td><td>10</td><td>1</td><td>22.0 ± 4.1</td><td>-</td><td>-</td></tr><tr><td>GPT-3.5-Turbo</td><td>10</td><td>1</td><td>69.0 ± 4.6</td><td>-</td><td>-</td><td>10</td><td>1</td><td>47.0 ± 5.0</td><td>-</td><td>-</td></tr><tr><td>GPT-4-Turbo</td><td>10</td><td>1</td><td>34.0 ± 4.7</td><td>-</td><td>-</td><td>10</td><td>1</td><td>62.0 ± 4.9</td><td>-</td><td>-</td></tr><tr><td rowspan="2">Other Baseline</td><td>GAN-BERT</td><td>20</td><td>80</td><td>96.0</td><td>-</td><td>-</td><td>20</td><td>80</td><td>40.0</td><td>-</td><td>-</td></tr><tr><td>BertAA</td><td>62</td><td>80</td><td>93.0</td><td>-</td><td>-</td><td>10</td><td>80</td><td>65.0</td><td>-</td><td>-</td></tr></table>
|
| 208 |
+
|
| 209 |
+
Prompt 1: Here is the text from the same author:
|
| 210 |
+
Prompt 2: Analyze the writing styles of the input texts, disregarding the differences in topic and content.
|
| 211 |
+
Here is the text from the same author:
|
| 212 |
+
Prompt 3: Focus on grammatical styles indicative of authorship. Here is the text from the same author:
|
| 213 |
+
Prompt 4: Analyze the writing styles of the input texts, disregarding the differences in topic and content.
|
| 214 |
+
Reasoning based on linguistic features such as phrasal verbs, modal verbs, punctuation, rare words, affixes, quantities, humor, sarcasm, typographical errors, and misspellings. Here is the text from the same author:
|
| 215 |
+
|
| 216 |
+
Table 1: Author attribution results on IMDB62 and Blog dataset. Prompt construction for QA method is in consistent with Huang et al. (2024).
|
| 217 |
+
|
| 218 |
+
<table><tr><td>#</td><td>Prompting</td><td>Top 1 Accuracy</td><td>Top 2 Accuracy</td><td>Top 5 Accuracy</td></tr><tr><td>1</td><td><Example Text> + <Query Text></td><td>70.0 ± 4.6</td><td>81.0 ± 3.9</td><td>92.0 ± 2.7</td></tr><tr><td>2</td><td><Example Text> + <Prompt 1> + <Query Text></td><td>85.0 ± 3.6</td><td>92.0 ± 2.7</td><td>99.0 ± 1.0</td></tr><tr><td>3</td><td><Example Text> + <Prompt 2> + <Query Text></td><td>83.0 ± 3.8</td><td>87.0 ± 3.4</td><td>100.0 ± 0.0</td></tr><tr><td>4</td><td><Example Text> + <Prompt 3> + <Query Text></td><td>86.0 ± 3.5</td><td>90.0 ± 3.0</td><td>100.0 ± 0.0</td></tr><tr><td>5</td><td><Example Text> + <Prompt 4> + <Query Text></td><td>87.0 ± 3.4</td><td>90.0 ± 3.0</td><td>99.0 ± 1.0</td></tr></table>
|
| 219 |
+
|
| 220 |
+
Table 2: Author attribution performance vs. different prompting choices on IMDB62 dataset.
|
| 221 |
+
|
| 222 |
+
<table><tr><td>Gender</td><td>Top 1 Acc.</td><td>Top 2 Acc.</td><td>Top 5 Acc.</td></tr><tr><td>Both</td><td>84.0 ± 1.6</td><td>90.8 ± 1.3</td><td>95.8 ± 1.0</td></tr><tr><td>Male</td><td>81.4 ± 2.5</td><td>88.6 ± 2.1</td><td>95.4 ± 1.4</td></tr><tr><td>Female</td><td>86.3 ± 2.1</td><td>92.8 ± 1.6</td><td>96.2 ± 1.2</td></tr></table>
|
| 223 |
+
|
| 224 |
+
(Wei et al., 2022) that have demonstrated that prompting is beneficial for unlocking the full potential of LLMs.
|
| 225 |
+
|
| 226 |
+
- There is no statistically significant evidence to suggest that specific prompt designs impact performance significantly (#2 vs. #3 vs. #4 vs. #5). The results show very close performance metrics across different prompt constructions.
|
| 227 |
+
|
| 228 |
+
Discussions Prompting sensitivity (Sclar et al., 2023) is a widely acknowledged property in the generation process of LLMs. This also has motivated a trend of research on prompting engineering
|
| 229 |
+
|
| 230 |
+
Table 3: Gender bias in author attribution performance.
|
| 231 |
+
|
| 232 |
+
<table><tr><td>Gender</td><td>Top 1 Acc.</td><td>Top 2 Acc.</td><td>Top 5 Acc.</td></tr><tr><td>Male</td><td>77.0 ± 4.2</td><td>82.0 ± 3.8</td><td>92.0 ± 2.7</td></tr><tr><td>Female</td><td>89.0 ± 3.1</td><td>91.0 ± 2.9</td><td>95.0 ± 2.2</td></tr></table>
|
| 233 |
+
|
| 234 |
+
Table 4: Author attribution performance in each gender subgroup.
|
| 235 |
+
|
| 236 |
+
(Zhang et al., 2023b; Guo et al., 2024) as different promptings can lead to completely different performance. However, our method appears to be relatively insensitive to the choice of prompt, which makes our method more robust, maintaining high performance and stability across various settings.
|
| 237 |
+
|
| 238 |
+
# 4.4 Bias Analysis
|
| 239 |
+
|
| 240 |
+
An algorithm trained on an entire dataset may exhibit different accuracy levels across different subgroups during testing (Chouldechova and G'Sell, 2017; Pastor et al., 2021). This section discusses such bias issues and measures how the algorithm's accuracy varies for different subgroups.
|
| 241 |
+
|
| 242 |
+
<table><tr><td>Interval</td><td>Top 1 Acc.</td><td>Top 2 Acc.</td><td>Top 5 Acc.</td></tr><tr><td>[1 - 2]</td><td>82.0 ± 3.8</td><td>89.0 ± 3.1</td><td>96.0 ± 2.0</td></tr><tr><td>[3 - 4]</td><td>87.0 ± 3.4</td><td>94.0 ± 2.4</td><td>99.0 ± 1.0</td></tr><tr><td>[5 - 6]</td><td>90.0 ± 3.0</td><td>96.0 ± 2.0</td><td>100.0 ± 0.0</td></tr><tr><td>[7 - 8]</td><td>88.0 ± 3.3</td><td>92.0 ± 2.7</td><td>97.0 ± 1.7</td></tr><tr><td>[9 - 10]</td><td>89.0 ± 3.1</td><td>93.0 ± 2.6</td><td>96.0 ± 2.0</td></tr></table>
|
| 243 |
+
|
| 244 |
+
(a) performance in each rating subgroup.
|
| 245 |
+
|
| 246 |
+
<table><tr><td>Age</td><td>Top 1 Acc.</td><td>Top 2 Acc.</td><td>Top 5 Acc.</td></tr><tr><td>[13 - 17]</td><td>90.0 ± 3.0</td><td>94.0 ± 2.4</td><td>99.0 ± 1.0</td></tr><tr><td>[18 - 34]</td><td>84.0 ± 3.7</td><td>89.0 ± 3.1</td><td>95.0 ± 2.2</td></tr><tr><td>[35 - 44]</td><td>80.0 ± 4.0</td><td>87.0 ± 3.4</td><td>94.0 ± 2.4</td></tr><tr><td>[45 - 48]</td><td>81.0 ± 3.9</td><td>85.0 ± 3.6</td><td>95.0 ± 2.2</td></tr></table>
|
| 247 |
+
|
| 248 |
+
(b) performance in each age subgroup.
|
| 249 |
+
|
| 250 |
+
Table 5: Author attribution performance in each rating subgroup and age subgroup.
|
| 251 |
+
|
| 252 |
+
<table><tr><td>#</td><td>Foundation Models</td><td>Deployment Resource</td><td>Method</td><td>Inference Time (s)</td><td>Accuracy</td></tr><tr><td>1</td><td>LLama-3-70B</td><td>8 × A6000 (VLLM)</td><td>Logprob</td><td>462.1</td><td>85.0 ± 3.6</td></tr><tr><td>2</td><td>GPT-4-Turbo</td><td>OpenAI</td><td>QA</td><td>663.1</td><td>34.0 ± 4.7</td></tr><tr><td>3</td><td>LLama-3-70B-Instruct</td><td>Azure</td><td>QA</td><td>2065.6</td><td>31.0 ± 4.6</td></tr></table>
|
| 253 |
+
|
| 254 |
+
Table 6: Efficiency analysis between prompt-based method and logprob-based method on Blog dataset.
|
| 255 |
+
|
| 256 |
+
Influence of Gender We conduct 500 tests which consists of 237 tests for blogs written by male authors and 263 tests for blogs written by female authors and show their accuracy of authorship attribution separately in Table 3. The results indicate that authorship attribution for blogs written by female authors exhibits higher accuracy. This suggests that female-authored blogs might contain more distinct personal styles, making it easier to infer the author.
|
| 257 |
+
|
| 258 |
+
# 4.5 Subgroup Analysis
|
| 259 |
+
|
| 260 |
+
When considering authorship attribution restricted to specific subgroups, the task can either become simpler or more difficult. Certain subgroups may express personal styles more distinctly, making authorship attribution easier, while others may be more homogeneous, making it more challenging. Here, we consider three subgroup factors: gender, age, and rating, to analyze the performance under each group.
|
| 261 |
+
|
| 262 |
+
Subgroup by Gender As shown in Table 4, we evaluated the performance of authorship attribution within different gender subgroups in the Blog dataset. We observed that authorship attribution performed better within the female subgroup, consistent with findings in Section 4.4, suggesting female-authored blogs possess more distinctive personal styles.
|
| 263 |
+
|
| 264 |
+
Subgroup by Rating Table 5 (a) shows the performance of authorship attribution across different rating ranges in the IMDb review dataset. Overall, we can see that rating does influence performance, with review in the $[5 - 6]$ rating range easier to attribute. Despite such difference, our method con
|
| 265 |
+
|
| 266 |
+
sistently obtains good performance across all subgroups.
|
| 267 |
+
|
| 268 |
+
Subgroup of Age Table 5 (b) shows the performance of authorship attribution across different age ranges of bloggers in the Blog dataset. We observed that age significantly influences performance. The youngest age group [13 - 17] exhibited the highest top-1 accuracy at $90\%$ , while accuracy decreased with increasing author age. This suggests that younger authors tend to have more distinct opinions and identifiable writing styles. Despite performance differences, our method maintained relatively overall high performance, with the lowest accuracy still surpassing that of GPT-4-Turbo with QA method.
|
| 269 |
+
|
| 270 |
+
# 4.6 Efficiency Analysis
|
| 271 |
+
|
| 272 |
+
Table 6 shows the efficiency comparison of different methods on the imdb dataset. Our Logprob method operates with notably lower runtime compared to QA methods. This is primarily due to the Logprob method requiring only a single forward pass through the LLM for each author to estimate the log probabilities. In contrast, QA methods generally need multiple iterations of token generations to form a response, which increases computation time substantially. In the mean time, our method achieves an accuracy of up to $85\%$ , surpassing QA method based on GPT-4-Turbo in both efficiency and accuracy.
|
| 273 |
+
|
| 274 |
+
In summary, our method proves to be effective and efficient in performing authorship attribution across various datasets and setups.
|
| 275 |
+
|
| 276 |
+
# 5 Conclusion
|
| 277 |
+
|
| 278 |
+
In this paper, we study the problem of authorship attribution. We demonstrate the effectiveness of utilizing pre-trained Large Language Models (LLMs) for one-shot author attribution. Our Bayesian approach leverages the probabilistic nature of language models like Llama-3 to infer authorship. Our method does not require fine-tuning, therefore reduces computational overhead and data requirements. Our experiments validate that our method is more effective and efficient compared to existing techniques.
|
| 279 |
+
|
| 280 |
+
# 6 Limitations
|
| 281 |
+
|
| 282 |
+
The main limitations arise due to the dependence on LLMs.
|
| 283 |
+
|
| 284 |
+
Our method relies heavily on the capabilities of LLMs, and the performance of our approach is highly affected by the size and training objectives of the LLMs. As shown in Table 1, models that are only pre-trained rather than fine-tuned for dialogue or code task performs better.
|
| 285 |
+
|
| 286 |
+
While larger models generally perform better, they also entail higher costs, posing scalability and accessibility challenges for broader applications.
|
| 287 |
+
|
| 288 |
+
Another limitation is due to training data of LLMs. If the training data lacks diversity or fails to include certain writing styles, the model may not fully capture the intricacies of an author's style, potentially leading to misclassifications. This limitation underscores the importance of using diverse and comprehensive training datasets.
|
| 289 |
+
|
| 290 |
+
Furthermore, any biases present in the training data can also be absorbed by the model. These biases will influence the performance of our authorship attribution method.
|
| 291 |
+
|
| 292 |
+
On the broader societal level, the potential for misuse of this technology is a significant concern. The challenge of regulating and overseeing the use of such powerful tools is still not fully addressed.
|
| 293 |
+
|
| 294 |
+
Lastly, while our approach avoids the need for extensive retraining or fine-tuning, which is an advantage in many cases, this also means that our method might not adapt well to scenarios where lots of training data and computation is available, which justifies more complex and computationally intensive methods.
|
| 295 |
+
|
| 296 |
+
# Acknowledgments
|
| 297 |
+
|
| 298 |
+
ZH, TZ and HH were partially supported by NSF IIS 2347592, 2347604, 2348159, 2348169, DBI
|
| 299 |
+
|
| 300 |
+
2405416, CCF 2348306, CNS 2347617.
|
| 301 |
+
|
| 302 |
+
# References
|
| 303 |
+
|
| 304 |
+
Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.
|
| 305 |
+
Hayri Volkan Agun and Ozgur Yilmazel. 2017. Document embedding approach for efficient authorship attribution. In 2017 2nd International Conference on Knowledge Engineering and Applications (ICKEA), pages 194-198. IEEE.
|
| 306 |
+
Salha M Alzahrani, Naomie Salim, and Ajith Abraham. 2011. Understanding plagiarism linguistic patterns, textual features, and detection methods. IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews), 42(2):133-149.
|
| 307 |
+
Douglas Bagnall. 2015. Author identification using multi-headed recurrent neural networks. arXiv preprint arXiv:1506.04891.
|
| 308 |
+
Ilker Nadi Bozkurt, Ozgur Baghoglu, and Erkan Uyar. 2007. Authorship attribution. In 2007 22nd international symposium on computer and information sciences, pages 1-5. IEEE.
|
| 309 |
+
Leo Breiman. 2001. Random forests. Machine learning, 45:5-32.
|
| 310 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901.
|
| 311 |
+
Sebastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yuanzhi Li, Scott Lundberg, et al. 2023. Sparks of artificial general intelligence: Early experiments with gpt-4. arXiv preprint arXiv:2303.12712.
|
| 312 |
+
Alexandra Chouldechova and Max G'Sell. 2017. Fairer and more accurate, but for whom? 4th Workshop on Fairness, Accountability, and Transparency in Machine Learning.
|
| 313 |
+
Mael Fabien, Esau Villatoro-Tello, Petr Moticek, and Shantipriya Parida. 2020. BertAA: BERT fine-tuning for authorship attribution. In Proceedings of the 17th International Conference on Natural Language Processing (ICON), pages 127-137, Indian Institute of Technology Patna, Patna, India. NLP Association of India (NLPAI).
|
| 314 |
+
Zhenhao Ge, Yufang Sun, and Mark Smith. 2016. Authorship attribution using a neural network language model. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 30.
|
| 315 |
+
|
| 316 |
+
Qingyan Guo, Rui Wang, Junliang Guo, Bei Li, Kaitao Song, Xu Tan, Guoqing Liu, Jiang Bian, and Yujiu Yang. 2024. Connecting large language models with evolutionary algorithms yields powerful prompt optimizers. In The Twelfth International Conference on Learning Representations.
|
| 317 |
+
David I Holmes. 1994. Authorship attribution. Computers and the Humanities, 28:87-106.
|
| 318 |
+
Baixiang Huang, Canyu Chen, and Kai Shu. 2024. Can large language models identify authorship? arXiv preprint arXiv:2403.08213.
|
| 319 |
+
Ziwei Ji, Nayeon Lee, Rita Frieske, Tiezheng Yu, Dan Su, Yan Xu, Etsuko Ishii, Ye Jin Bang, Andrea Madotto, and Pascale Fung. 2023. Survey of hallucination in natural language generation. ACM Computing Surveys, 55(12):1-38.
|
| 320 |
+
Patrick Juola et al. 2008. Authorship attribution. Foundations and Trends® in Information Retrieval, 1(3):233-334.
|
| 321 |
+
Michal Kosinski. 2023. Theory of mind might have spontaneously emerged in large language models. arXiv preprint arXiv:2302.02083.
|
| 322 |
+
Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. 2023. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles.
|
| 323 |
+
Quoc Le and Tomas Mikolov. 2014. Distributed representations of sentences and documents. In International conference on machine learning, pages 1188-1196. PMLR.
|
| 324 |
+
Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, et al. 2022. Holistic evaluation of language models. arXiv preprint arXiv:2211.09110.
|
| 325 |
+
Kevin Liu, Stephen Casper, Dylan Hadfield-Menell, and Jacob Andreas. 2023. Cognitive dissonance: Why do language model outputs disagree with internal representations of truthfulness? In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 4791-4797, Singapore. Association for Computational Linguistics.
|
| 326 |
+
Andrew Kachites McCallum. 1999. Multi-label text classification with a mixture model trained by em. In AAAI'99 workshop on text learning.
|
| 327 |
+
Seif Mechti and Fahad Almansour. 2021. An orderly survey on author attribution methods: From stylistic features to machine learning models. Int. J. Adv. Res. Eng. Technol, 12:528-538.
|
| 328 |
+
|
| 329 |
+
Frederick Mosteller and David L Wallace. 1963. Inference in an authorship problem: A comparative study of discrimination methods applied to the authorship of the disputed federalist papers. Journal of the American Statistical Association, 58(302):275-309.
|
| 330 |
+
Eliana Pastor, Luca de Alfaro, and Elena Baralis. 2021. Identifying biased subgroups in ranking and classification. Measures and Best Practices for Responsible AI at KDD 2021.
|
| 331 |
+
Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, et al. 2023. Code llama: Open foundation models for code. arXiv preprint arXiv:2308.12950.
|
| 332 |
+
Sebastian Ruder, Parsa Ghaffari, and John G Breslin. 2016. Character-level and multi-channel convolutional neural networks for large-scale authorship attribution. arXiv preprint arXiv:1609.06686.
|
| 333 |
+
Jonathan Schler, Moshe Koppel, Shlomo Argamon, and James W Pennebaker. 2006. Effects of age and gender on blogging. In AAAI spring symposium: Computational approaches to analyzing weblogs, volume 6, pages 199-205.
|
| 334 |
+
Melanie Sclar, Yejin Choi, Yulia Tsvetkov, and Alane Suhr. 2023. Quantifying language models' sensitivity to spurious features in prompt design or: How i learned to start worrying about prompt formatting. arXiv preprint arXiv:2310.11324.
|
| 335 |
+
Yanir Seroussi, Ingrid Zukerman, and Fabian Bohnert. 2014. Authorship attribution with topic models. Computational Linguistics, 40(2):269-310.
|
| 336 |
+
Prasha Shrestha, Sebastian Sierra, Fabio A González, Manuel Montes, Paolo Rosso, and Thamar Solorio. 2017. Convolutional neural networks for authorship attribution of short texts. In Proceedings of the 15th conference of the European chapter of the association for computational linguistics: Volume 2, short papers, pages 669-674.
|
| 337 |
+
Kanishka Silva, Burcu Can, Frédéric Blain, Raheem Sarwar, Laura Ugolini, and Ruslan Mitkov. 2023. Authorship attribution of late 19th century novels using gan-bert. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop), pages 310-320.
|
| 338 |
+
Karan Singhal, Shekoofeh Azizi, Tao Tu, S Sara Mahdavi, Jason Wei, Hyung Won Chung, Nathan Scales, Ajay Tanwani, Heather Cole-Lewis, Stephen Pfohl, et al. 2023. Large language models encode clinical knowledge. Nature, 620(7972):172-180.
|
| 339 |
+
Efstathios Stamatos. 2009. A survey of modern authorship attribution methods. Journal of the American Society for information Science and Technology, 60(3):538-556.
|
| 340 |
+
|
| 341 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288.
|
| 342 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems, 30.
|
| 343 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837.
|
| 344 |
+
Sarim Zafar, Muhammad Usman Sarwar, Saeed Salem, and Muhammad Zubair Malik. 2020. Language and obfuscation oblivious source code authorship attribution. IEEE Access, 8:197581-197596.
|
| 345 |
+
Biao Zhang, Barry Haddow, and Alexandra Birch. 2023a. Prompting large language model for machine translation: A case study. In International Conference on Machine Learning, pages 41092-41110. PMLR.
|
| 346 |
+
Richong Zhang, Zhiyuan Hu, Hongyu Guo, and Yongyi Mao. 2018. Syntax encoding with application in authorship attribution. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2742-2753, Brussels, Belgium. Association for Computational Linguistics.
|
| 347 |
+
Tianyi Zhang, Faisal Ladhak, Esin Durmus, Percy Liang, Kathleen McKeown, and Tatsunori B Hashimoto. 2024. Benchmarking large language models for news summarization. Transactions of the Association for Computational Linguistics, 12:39-57.
|
| 348 |
+
Zhuosheng Zhang, Aston Zhang, Mu Li, and Alex Smola. 2023b. Automatic chain of thought prompting in large language models. In The Eleventh International Conference on Learning Representations.
|
| 349 |
+
|
| 350 |
+
# A Ethical Considerations
|
| 351 |
+
|
| 352 |
+
Our method using LLMs for authorship attribution brings several ethical considerations that must be addressed to ensure responsible and fair use of the technology.
|
| 353 |
+
|
| 354 |
+
Privacy and Anonymity The capacity of LLMs to attribute authorship with high accuracy can lead to ethical challenges regarding privacy and anonymity. Individuals who wish to remain anonymous or protect their identity could be compromised if authorship attribution tools are misused. Therefore, it is crucial to establish strict guidelines and ethical standards on the use of such technologies to prevent breaches of privacy.
|
| 355 |
+
|
| 356 |
+
Potential for Abuse Despite multiple beneficial applications, the misuse potential of authorship attribution tools is significant. Risks include the use of this technology to suppress free speech or to endanger personal safety by identifying individuals in contexts where anonymity is crucial for safety. Addressing these risks requires robust governance to prevent misuse and to ensure that the technology is used ethically and responsibly.
|
| 357 |
+
|
| 358 |
+
Bias Issue The performance of authorship attribution methods can vary across different demographics, leading to potential biases. It is important to continually assess and correct these biases to ensure fairness in the application of this technology.
|
| 359 |
+
|
| 360 |
+
Misclassification Issue Given the high stakes involved, especially in forensic contexts, the accuracy of authorship attribution is important. Misclassifications can have serious consequences, including wrongful accusations or legal implications. It is essential for authorship attribution methods to be reliable and for their limitations to be transparently communicated to users.
|
| 361 |
+
|
| 362 |
+
# B Broader Impact
|
| 363 |
+
|
| 364 |
+
Our study of authorship attribution using LLMs contributes to advancements in various domains:
|
| 365 |
+
|
| 366 |
+
Forensic Linguistics Our research contributes to the field of forensic linguistics by providing tools that can solve crimes involving anonymous or disputed texts. This can be particularly useful for law enforcement and legal professionals who need to gather evidence and make more informed decisions.
|
| 367 |
+
|
| 368 |
+
Intellectual Property Protection Our method can serve as a powerful tool in identifying the authors of texts, which can help protect intellectual property rights and resolve disputes in copyright.
|
| 369 |
+
|
| 370 |
+
Historical Text Attribution In literary and historical studies, determining the authorship of texts can provide insights into their origins and contexts, enhancing our understanding and interpretation.
|
| 371 |
+
|
| 372 |
+
Enhanced Content Management Media and content companies can use this technology to manage content more effectively by accurately attributing authorship to various contributors.
|
| 373 |
+
|
| 374 |
+
Educational Applications In educational settings, our method can help prevent plagiarism and promote academic integrity. It can also serve as
|
| 375 |
+
|
| 376 |
+
a teaching tool to help students understand and appreciate stylistic differences between authors.
|
| 377 |
+
|
| 378 |
+
While our method holds promise across multiple applications, it is crucial to deploy it with caution. Ensuring that the technology is used responsibly and ethically will be key to maximizing its benefits while minimizing potential harm.
|
abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b964b05542e7f1006e3b6b33667af89501668471f5f3bce4152bc1bb1c7ecdc
|
| 3 |
+
size 352806
|
abayesianapproachtoharnessingthepowerofllmsinauthorshipattribution/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:312aeaf271d0cd2343a8f5b51b0b19fbdfbcbffef26d8931fa6aa5cc6a34b75d
|
| 3 |
+
size 381867
|
ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/f96ed249-8546-48a4-b23c-b67cd31bf9fc_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0c136225baa1e582aa9faf59b992bea70699628bb395f17d48655013c8f303e
|
| 3 |
+
size 160901
|
ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/f96ed249-8546-48a4-b23c-b67cd31bf9fc_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a9cf7e7044e92bbb6960b6737229144319ef76be1b4f7ffdb2ebe41dd9320505
|
| 3 |
+
size 196385
|
ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/f96ed249-8546-48a4-b23c-b67cd31bf9fc_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:45ed3cbd9babc94eac398178231b9a22ee6bc26ebc48277d6f8377fbcb8b8cd3
|
| 3 |
+
size 3630008
|
ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2b551b72d37296711e5c5b257222b1f4c38cb5ce9fd10c2734d38bcf52472fc4
|
| 3 |
+
size 1337794
|
ablepersonalizeddisabilitysupportwithpolitenessandempathyintegration/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3aeaed6556f4b7fc26a16528e78a12cc6355f85e39168aabb3a6cbd7bb127bc2
|
| 3 |
+
size 737867
|
absevalanagentbasedframeworkforscriptevaluation/5ede6651-5e3a-4a8f-94d6-a388c5a623ea_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7fbe0968629c21d5acffb56db0db2691ad5e1c933cef60ac2e2764b88d469e72
|
| 3 |
+
size 112916
|
absevalanagentbasedframeworkforscriptevaluation/5ede6651-5e3a-4a8f-94d6-a388c5a623ea_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:425ed142d513d5c42d89286bac3829121468b6200015a7a3af1b40b18fe7df21
|
| 3 |
+
size 140481
|
absevalanagentbasedframeworkforscriptevaluation/5ede6651-5e3a-4a8f-94d6-a388c5a623ea_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b1a4a636ce416996f646673c64baf821c60cd1abafc10dbcef99a95c557aa7d
|
| 3 |
+
size 14135371
|
absevalanagentbasedframeworkforscriptevaluation/full.md
ADDED
|
@@ -0,0 +1,570 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ABSEval: An Agent-based Framework for Script Evaluation
|
| 2 |
+
|
| 3 |
+
Sirui Liang $^{1,2}$ , Baoli Zhang $^{1,2}$ , Jun Zhao $^{1,2}$ and Kang Liu $^{1,2,3*}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>The Key Laboratory of Cognition and Decision Intelligence for Complex Systems Institute of Automation, Chinese Academy of Sciences
|
| 6 |
+
|
| 7 |
+
$^{2}$ School of Artificial Intelligence, University of Chinese Academy of Sciences
|
| 8 |
+
|
| 9 |
+
$^{3}$ Shanghai Artificial Intelligence Laboratory
|
| 10 |
+
|
| 11 |
+
liangsirui2024@ia.ac.cn {baoli.zhang,jzhao,kliu}@nlpr.ia.ac.cn
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Recent research indicates that large language models (LLMs) possess a certain degree of script planning capability. However, there is still a lack of focused work on evaluating scripts generated by LLMs. The evaluation of scripts poses challenges due to their logical structure, sequential organization, adherence to commonsense constraints, and open-endedness. In this work, We introduced a novel script evaluation dataset, MCScript, consisting of more than 1,500 script evaluation tasks and steps, and developed an agent-based script evaluation framework, ABSEval, to collaboratively evaluate scripts generated by LLMs. Our experiments demonstrate that ABSEval provides superior accuracy and relevance, aligning closely with human evaluation. We evaluated the script planning capabilities of 15 mainstream LLMs and provided a detailed analysis. Furthermore, we observed phenomena like the key factor influencing the script planning ability of LLM is not parameter size and suggested improvements for evaluating open-ended questions.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Script is a structure that describes an appropriate sequence of events in a particular context (Schank and Abelson, 1975; Abelson, 2014). In daily routines, individuals often rely on meticulously outlined steps to realize their objectives. For instance, Figure 1 illustrates the process of opening a can with a spoon. Recent studies have applied LLMs to script-related tasks, demonstrating that these models have scripting knowledge inside it (Sancheti and Rudinger, 2021), and they can effectively decompose high-level tasks (Huang et al., 2022). However, scripts generated by LLMs may contain errors, making it crucial to evaluate the quality of these LLM-generated scripts.
|
| 20 |
+
|
| 21 |
+
# Task: How to open a can with a spoon? LLM A's Answer: Lever Method
|
| 22 |
+
|
| 23 |
+
1. Hold the Spoon: Grip the spoon handle in your hand.
|
| 24 |
+
2. Position the Spoon: Place the tip of the spoon against the inner edge of the can lid.
|
| 25 |
+
3. Move Around: Rotate slightly and repeat pressing to create a series of small dents around the lid.
|
| 26 |
+
4. Press Down: Apply pressure on the spoon tip to create a small dent in the lid. Sequence error!
|
| 27 |
+
Missing steps: 5. Insert the spoon tip into the
|
| 28 |
+
dents and use it as a lever to pry the I 2LLM B's Answer: Scrape Method
|
| 29 |
+
1. Hold the Knife: Grip a knife firmly. Obey Constr
|
| 30 |
+
2. Position the Spoon: Place the edge of the knife
|
| 31 |
+
against the inner lip of the can lid. Redundant
|
| 32 |
+
3. Scrape: Start scraping back and forth repeatedly
|
| 33 |
+
with the knife along the inner lip to weaken the metal
|
| 34 |
+
4. Repeat: Continue scraping around the entire
|
| 35 |
+
circumference of the lid until the metal becomes thin
|
| 36 |
+
5. Pru Open: Once the lid is thin, insert the knife tip
|
| 37 |
+
and pru open the lid.
|
| 38 |
+
|
| 39 |
+
No missing steps
|
| 40 |
+
No Redundant Steps
|
| 41 |
+
No Duplicate Steps
|
| 42 |
+
Executable
|
| 43 |
+
Satisfy Constraint
|
| 44 |
+
Complete Goal
|
| 45 |
+
Order Correct
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
Figure 1: An example script generated to plan for "How to open a can with a spoon?" and evaluated using ABSEval.
|
| 49 |
+
|
| 50 |
+
Commonsense
|
| 51 |
+
|
| 52 |
+
Execute
|
| 53 |
+
|
| 54 |
+
Critic
|
| 55 |
+
|
| 56 |
+
No missing steps
|
| 57 |
+
|
| 58 |
+
No Redundant Steps
|
| 59 |
+
No Duplicate Steps
|
| 60 |
+
Executable
|
| 61 |
+
Satisfy Constraint
|
| 62 |
+
Complete Goal
|
| 63 |
+
Order Correct
|
| 64 |
+
|
| 65 |
+
A script is a predetermined, stereotyped sequence of actions that define a well-known situation (Schank and Abelson, 1975), which is not only logically and sequentially organized but also adheres to commonsense. Script Evaluation is to evaluate whether a script meets the aforementioned characteristics. Additionally, entirely different steps can achieve the same goal, highlighting the open-ended nature of script tasks. Traditional approaches to script evaluation, such as manual evaluation, require considerable time and expense (Callison-Burch, 2009). Automated evaluation methods like BERTScore (Zhang et al., 2019) and Rouge (Lin, 2004) assess script correctness by calculating semantic similarity which is a struggle to evaluate the sequential order of scripts. These methods require a gold answer for comparison, but it is difficult to obtain a gold answer for scripts. Furthermore, these methods have been shown to exhibit a relatively weak correlation with human judgment (Novikova et al., 2017; Chan et al., 2021).
|
| 66 |
+
|
| 67 |
+
Recent breakthroughs achieved by LLMs
|
| 68 |
+
|
| 69 |
+
spurred a wave of research utilizing LLM as evaluator (Liu et al., 2023; Chiang and Lee, 2023; Zhang et al., 2023). Even though a single LLM has demonstrated the ability to serve as an evaluator, recent research indicates that employing multiple LLMs can enhance evaluation performance (Li et al., 2023; Liang et al., 2023). Assigning distinct roles to LLMs leads to more effectively identifying problems in text (Chan et al., 2023).
|
| 70 |
+
|
| 71 |
+
Existing script datasets are not sufficiently close to the tasks encountered in real-life scenarios, this paper introduces the Multi-Constrained Script planning dataset, i.e., MCScript, which includes more than 1,500 real-life script planning tasks and steps. In addition, we propose the Agent-Based Script Evaluation Framework (ABSEval), an evaluation system that integrates Answer Synthesize Agent, Critic Agent, Execute Agent and CommonSense Agent to comprehensively evaluate the scripts based on their different characteristics. We designed an Answer Synthesize Agent to act as a learner, learn scripts generated by LLMs being evaluated, and produce a more refined answer. Then, a Critic Agent compares the scripts under evaluation with the gold answer provided by the Answer Synthesize Agent, identifying mistakes such as missing, redundant, and duplicate steps. Moreover, an Execute Agent verifies whether the scripts meet the implicit constraints of tasks, achieve the desired goals, and maintain a logical sequence by executing each step of the scripts. Finally, a Commonsense Agent assesses whether each step of the script conforms to commonsense.
|
| 72 |
+
|
| 73 |
+
This paper evaluated 15 widely used LLMs and analyzed their script planning capabilities. From the evaluation results, we observed some interesting phenomena, like the fact that the key factor influencing the script planning ability of LLM is not the LLM's parameters, providing gold answers within appropriate metrics can improve the assessment performance of open-ended questions, etc.
|
| 74 |
+
|
| 75 |
+
Our contributions are as follows: 1) We developed a high-quality script evaluation dataset MCScript, which simulates a real-world situation by adding multiple constraints and contains over 1,500 script tasks and answers. 2) We propose ABSEval, an agent-based evaluation framework that exhibits superior alignment with human evaluations compared to current script assessment methods. 3) Using ABSEval, we assessed the script planning capabilities of 15 LLMs, offering insights into the advancements in LLMs' script planning abilities.
|
| 76 |
+
|
| 77 |
+
# 2 Data Construction
|
| 78 |
+
|
| 79 |
+
Currently, multiple large-scale script datasets are developed via crowdsourcing or automatic methods (Wanzare et al., 2016; Regneri et al., 2010; Lyu et al., 2021). However, these datasets concentrate on abstract tasks (e.g., Create a decision tree.). We aim to create a set of evaluation data that is more closely aligned with real-life specific tasks (e.g., Create a decision tree on computer to help you choose a holiday destination.). We utilized WikiHow (Koupae and Wang, 2018), a comprehensive database of how-to guides on a wide range of subjects, as the primary source for our data. From this resource, we selected abstract questions across ten different topics as shown in Figure 2. As is shown in Table 1, we adopt the in-context learning (Brown et al., 2020) for GPT-4-turbo<sup>1</sup> to expand the initial set of abstract questions by adding one to three constraints to each, thereby enhancing their relevance and realism. After the expansion, a thorough review of the newly formulated questions was conducted to select high-quality evaluation questions. Table 7 in the appendix provides detailed examples of data in MCScript.
|
| 80 |
+
|
| 81 |
+
Prompt: Create possible specific goals according to the abstract goal, here is an example.
|
| 82 |
+
|
| 83 |
+
Abstract task: Create a decision tree
|
| 84 |
+
|
| 85 |
+
Constraint: on computer, to help you choose a holiday destination, with 3 options
|
| 86 |
+
|
| 87 |
+
Constraint task: Create a decision tree on computer to help you choose a holiday destination with 3 options.
|
| 88 |
+
|
| 89 |
+
Obtain abstract task: How to buy Disney World tickets Add constraints: Online, For a family of four, During peak season.
|
| 90 |
+
|
| 91 |
+
Generate constraint question: Research and purchase Disney World tickets online for a family of four during peak season.
|
| 92 |
+
|
| 93 |
+
Table 1: An example of prompt for generating a constraint script task. The abstract tasks and specific tasks highlighted in the example.
|
| 94 |
+
|
| 95 |
+

|
| 96 |
+
Figure 2: Distribution of topic in MCScript.
|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
|
| 100 |
+
Data source: WikiHow Q: How to open a can?
|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
|
| 104 |
+
Step1: Generate restrictions One Limitation: without a can opener Two Limitation: using a spoon or a knife, during a camping trip
|
| 105 |
+
|
| 106 |
+

|
| 107 |
+
|
| 108 |
+
Step2: Generate restricted questions 1: How to open a can without a can opener?
|
| 109 |
+
|
| 110 |
+
Q2: How to open a can using a spoon or a knife while camping?
|
| 111 |
+
|
| 112 |
+

|
| 113 |
+
Figure 3: The evaluation process of LLM using ABSEval. We first obtained abstract problems from wikiHow and used GPT-4-turbo to add constraints, followed by manual screening to select high-quality questions. Subsequently, we utilized the ABSEval framework to complete the evaluation process. Finally, we analyzed the models' script planning capabilities based on the evaluation results.
|
| 114 |
+
|
| 115 |
+
Step3: Manual filtration Open a can without a can Clean books by washing.
|
| 116 |
+
|
| 117 |
+
a. Generate evaluation questions
|
| 118 |
+
|
| 119 |
+

|
| 120 |
+
b. Evaluation framework
|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
c. Ranking and analysising
|
| 124 |
+
|
| 125 |
+
# 3 Evaluation Methodology
|
| 126 |
+
|
| 127 |
+
This section provides an in-depth explanation of the ABSEval evaluation framework. The discussion includes a breakdown of the evaluation metrics, the components in the evaluation framework, and a detailed explanation of the entire evaluation process. The overall workflow is illustrated in Figure 3.
|
| 128 |
+
|
| 129 |
+
# 3.1 Evaluation Metrics
|
| 130 |
+
|
| 131 |
+
As we stated in Section 1, the logical structure, sequential nature, and adherence to the commonsense of scripts present challenges for their evaluation. Evaluating such scripts necessitates methodologies distinct from those applied to traditional text generation. To address these distinctive script features, we devised specialized evaluation criteria.
|
| 132 |
+
|
| 133 |
+
Our evaluation metrics focus on three key aspects. Firstly, we introduced three evaluation criteria to assess the completeness and correctness of the logical structure: (1) No Missing Steps: ensuring all critical steps are included. (2) No Redundant Steps: the script contains no unnecessary steps. (3) No Duplicate Steps: avoiding repetition of actions. Secondly, to evaluate the script's adherence to commonsense knowledge, we introduced (4) Executable: ensuring alignment with common sense knowledge. Finally, to check the sequential order of the script and whether it achieves its goal, we defined the criteria: (5) Satisfy Constraint: meeting implicit task constraints. (6) Complete Goal: achieving the intended objective. (7) Order Correct: maintaining a logical sequence of steps.
|
| 134 |
+
|
| 135 |
+
# 3.2 ABSEval Framework
|
| 136 |
+
|
| 137 |
+
Considering the limitations of script evaluation by a single LLM, our study embraces an agent-based paradigm for our evaluation framework. We demonstrated that collaborative effort affords a more human-aligned assessment than a single LLM in Section 4. By comparing different LLMs against human-annotated standards, we opted for Qwen-110B-Chat $^2$ to serve as the evaluation backbone within our ABSEval framework. Our study concentrates on the deployment of homogeneous sets of LLMs, meaning all agents are represented by the same LLM. The prompt for each agent is detailed in Appendix A.2.
|
| 138 |
+
|
| 139 |
+
Answer Synthesize Agent. Due to the diversity and open-ended nature of scripts, there is no standard answer for reference. It is challenging to directly identify errors within them. To address this, we employed a pooling strategy where the Answer Synthesize Agent learns from the scripts to be evaluated for the same task and synthesizes an enhanced gold answer. By comparing the scripts to this gold answer, it becomes easier to identify implicit errors.
|
| 140 |
+
|
| 141 |
+
Critic Agent. Once the Answer Synthesize Agent has crafted the gold answer, the Critic Agent checks the scripts up for evaluation against this gold answer to identify errors. We demonstrated that these errors tend to be subtle, they can be better identified
|
| 142 |
+
|
| 143 |
+
by comparing them with gold answers generated by Answer Synthesize Agent in Section 4.2. Through the collaboration of the Answer Synthesize Agent and the Critic Agent, we can identify missing steps, redundant steps, and duplicate step errors within the scripts.
|
| 144 |
+
|
| 145 |
+
Execute Agent. To confirm whether a script successfully attains its intended objective without logical or sequential errors, we delegate the role of the executor to an LLM. We first guide the Execute Agent to execute the script to be evaluated step-by-step by providing the prompt "I have provided you with the steps to complete the task:[SCRIPT]. Please follow these steps and answer my questions below..." Then assesses whether the final goal has been achieved, whether the implicit constraints of the task have been satisfied, and whether there are any errors in the sequence of steps.
|
| 146 |
+
|
| 147 |
+
Commonsense Agent. Scripts generated by LLMs occasionally include steps at odds with commonsense reasoning (e.g., Washing the book with water to achieve the purpose of cleaning.). Hence, we incorporate a Commonsense Agent. Its task is to ascertain the concordance of scripted steps with commonsense knowledge. We employ an LLM as our Commonsense Agent to identify if there were parts of the script steps that did not follow commonsense.
|
| 148 |
+
|
| 149 |
+
# 4 Experiments
|
| 150 |
+
|
| 151 |
+
# 4.1 Evaluated Models
|
| 152 |
+
|
| 153 |
+
Our primary focus for evaluation lies in open-source models with parameter ranging from 6 billion to 70 billion, including LLaMa2-7b-Chat (Touvron et al., 2023), LLaMa2-13b-Chat, LLaMa2-70b-Chat, LLaMa3-8b-Instruct, LLaMa3-70b-Instruct, Baichuan-13B-Chat (Yang et al., 2023), Baichuan2-13B-Chat, Qwen-7B-Chat(Bai et al., 2023), Qwen-14B-Chat, Qwen-72B-Chat, Mistral7B-Instruct-v0.2(Jiang et al., 2023), Mistral-7B-Instruct-v0.1, Mistral-8x7B-Instruct-v0.1, Vicuna7b-v1.5, Vicuna-13b-v1.5. We added the prompt "Let's think step by step" to guide the models in generating script responses, which is a simple strategy to enhance the reasoning performance of the models (Kojima et al., 2022).
|
| 154 |
+
|
| 155 |
+
# 4.2 Results
|
| 156 |
+
|
| 157 |
+
Can ABSEval better align with human evaluations? To prove that the proposed ABSEval could be closer to human evaluations compared with the
|
| 158 |
+
|
| 159 |
+
previous evaluation approaches, we randomly selected 200 scripts generated by LLMs for manual annotation. Subsequently, we tested three state-of-the-art LLMs, GPT-3.5-turbo (Ouyang et al., 2022), GPT-4-turbo (Achiam et al., 2023), and Qwen-110B-Chat, for the ABSEval assessment. Additionally, we queried a single LLM directly to evaluate the seven metrics in ABSEval based on the same scripts for comparison. A better evaluation would obtain results similar to those obtained by human annotations.
|
| 160 |
+
|
| 161 |
+
The Mean Squared Error (MSE) values for the seven metrics of ABSEval and Single-LLM against human evaluations were calculated. As shown in Table 2, Qwen-110B-Chat excelled in performance in both the ABSEval and Single-LLM frameworks. A single-LLM evaluation system, while incorporating advanced models, may fall short of providing a comprehensive analysis that matches human evaluators' results effectively. In contrast, the ABSEval evaluation system significantly enhances the alignment of LLM assessments with human judgment.
|
| 162 |
+
|
| 163 |
+
<table><tr><td>LLM</td><td>Mechanism</td><td>MSE</td></tr><tr><td>Qwen-110-Chat</td><td>ABSEval</td><td>0.087</td></tr><tr><td>GPT-4-turbo</td><td>ABSEval</td><td>0.174</td></tr><tr><td>GPT-3.5-turbo</td><td>ABSEval</td><td>0.329</td></tr><tr><td>Qwen-110-Chat</td><td>Single-LLM</td><td>0.257</td></tr><tr><td>GPT-4-turbo</td><td>Single-LLM</td><td>0.29</td></tr><tr><td>GPT-3.5-turbo</td><td>Single-LLM</td><td>0.361</td></tr></table>
|
| 164 |
+
|
| 165 |
+
Table 2: Similarity of evaluation results to human assessments for GPT-3.5-turbo, GPT-4-turbo, and Qwen110B-Chat as LLMs in ABSEval and Single-LLM.
|
| 166 |
+
|
| 167 |
+
Should Gold answers be provided for evaluating the open-end questions? To answer this question, we investigate the potential advantages of including a gold answer when assessing open-ended questions like scripts for the automatic evaluation. Our analysis of the data presented in Figure 4 involved comparing the coherence between the evaluation of Qwen-110B-Chat and human evaluation across various metrics, both with and without a gold answer. The findings of our study indicate that incorporating a gold answer can assist the model in identifying missing steps more effectively. However, it was observed that the presence of a gold answer can also reduce the accuracy of the model's assessments concerning step sequencing correctness, goal achievement, and adherence to implicit constraints. Providing a reference answer
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Figure 4: Comparing the consistency of evaluation results with human assessments when directly using LLM for evaluation, with and without providing an answer.
|
| 171 |
+
|
| 172 |
+
can assist in evaluating some metrics but may also lead to performance degradation for some evaluation metrics. Therefore, it is crucial to establish an appropriate evaluation method, such as ABSEval, to provide gold answers for certain evaluation metrics.
|
| 173 |
+
|
| 174 |
+
Can Answer Synthesize Agent generate high-quality answers? We utilized the answers generated by the Answer Synthesize Agent and Qwen-110B-Chat as the gold answers for the Critic Agent to evaluate. We then compared the consistency of both evaluation results of Critic Agent with human-labeled data. Table 3 demonstrates the performance differences, showing that the Answer Synthesize Agent outperforms the direct answers from Qwen-110B-Chat on all three metrics of No Missing Steps, No Redundant Steps, and No Duplicate Steps.
|
| 175 |
+
|
| 176 |
+
<table><tr><td>Gold answer generation</td><td>NM</td><td>NR</td><td>ND</td></tr><tr><td>Answer Synthesize</td><td>0.895</td><td>0.965</td><td>1.0</td></tr><tr><td>Qwen-110B-Chat</td><td>0.75</td><td>0.855</td><td>1.0</td></tr></table>
|
| 177 |
+
|
| 178 |
+
Can ABSEval effectively identify errors in scripts? To answer this question, we introduced some perturbations to the completely correct script and evaluated it using the ABSEval framework. We used GPT-4-turbo to introduce perturbations into completely correct script steps (e.g., Remove a key step in the script), and the perturbations construction prompt is detailed in Table 10. For each
|
| 179 |
+
|
| 180 |
+
evaluation metric in ABSEval, we constructed 50 perturbation scripts and then used ABSEval to evaluate them. We calculated the Accuracy(Acc.) of ABSEval in identifying each type of interference error, as shown in Table 4, ABSEval effectively identified all types of errors, demonstrating the validity of the ABSEval framework.
|
| 181 |
+
|
| 182 |
+
Table 3: Comparison of the accuracy of different gold answer generation approaches. NM: No Missing Steps, NR: No Redundant Steps, ND: No Duplicate Steps.
|
| 183 |
+
|
| 184 |
+
<table><tr><td>Perturbations category</td><td>Acc.</td></tr><tr><td>Missing steps</td><td>0.84</td></tr><tr><td>Redundant steps</td><td>0.96</td></tr><tr><td>Duplicate steps</td><td>0.96</td></tr><tr><td>Satisfy Constraint</td><td>0.85</td></tr><tr><td>Complete</td><td>0.92</td></tr><tr><td>Step order</td><td>0.84</td></tr></table>
|
| 185 |
+
|
| 186 |
+
Table 4: Accuracy of ABSEval checking perturbations errors.
|
| 187 |
+
|
| 188 |
+
# 4.3 Evaluating Scripts in different LLMs by ABSEval
|
| 189 |
+
|
| 190 |
+
The overall evaluation results of ABSEval are shown in Table 5.
|
| 191 |
+
|
| 192 |
+
# What are the most common errors in all LLMs during script planning?
|
| 193 |
+
|
| 194 |
+
We categorized the LLMs in Table 5 based on their parameter sizes, and plotted a heat map about the overall performance of different parameter levels in Figure 5. As shown in Figure 5, the most frequent issues encountered in LLMs during script planning involve missing steps and failing to achieve the intended goal. In contrast, the problems of redundant steps appear to be relatively uncommon. An increase in the model's parameter size correlates with improved accuracy across various metrics. Despite this, even LLMs with up to 70 billion parameters struggle to perform well across all metrics.
|
| 195 |
+
|
| 196 |
+
# How do LLMs perform across different script planning topics?
|
| 197 |
+
|
| 198 |
+
The heat map in Figure 9 in the appendix shows that LLMs perform best on topics related to Education and Communications, while their weakest performance is on topics related to Health. Notably, the heatmap uncovers substantial performance variations across different topics. We believe that the existence of this difference is related to the knowledge stored within the LLMs.
|
| 199 |
+
|
| 200 |
+
<table><tr><td>Model Name</td><td>Rank</td><td>Size</td><td>NM</td><td>NR</td><td>ND</td><td>EX</td><td>SC</td><td>CG</td><td>OC</td></tr><tr><td>Baichuan-Chat</td><td>14th</td><td>13B</td><td>0.029</td><td>0.787</td><td>0.994</td><td>0.833</td><td>0.673</td><td>0.572</td><td>0.632</td></tr><tr><td>Baichuan2-Chat</td><td>13th</td><td>13B</td><td>0.139</td><td>0.777</td><td>0.992</td><td>0.813</td><td>0.677</td><td>0.580</td><td>0.604</td></tr><tr><td>Vicuna-v1.5</td><td>10th</td><td>7B</td><td>0.044</td><td>0.811</td><td>0.995</td><td>0.876</td><td>0.713</td><td>0.611</td><td>0.696</td></tr><tr><td>Vicuna-v1.5</td><td>9th</td><td>13B</td><td>0.074</td><td>0.858</td><td>0.999</td><td>0.888</td><td>0.708</td><td>0.624</td><td>0.720</td></tr><tr><td>LLaMa2-chat</td><td>11th</td><td>7B</td><td>0.250</td><td>0.728</td><td>0.999</td><td>0.836</td><td>0.661</td><td>0.566</td><td>0.709</td></tr><tr><td>LLaMa2-chat</td><td>7th</td><td>13B</td><td>0.211</td><td>0.807</td><td>0.999</td><td>0.871</td><td>0.715</td><td>0.622</td><td>0.722</td></tr><tr><td>LLaMa2-chat</td><td>2nd</td><td>70B</td><td>0.379</td><td>0.773</td><td>0.999</td><td>0.886</td><td>0.711</td><td>0.665</td><td>0.727</td></tr><tr><td>LLaMa3-instruct</td><td>5th</td><td>8B</td><td>0.103</td><td>0.880</td><td>1.000</td><td>0.889</td><td>0.758</td><td>0.681</td><td>0.725</td></tr><tr><td>LLaMa3-instruct</td><td>1st</td><td>70B</td><td>0.154</td><td>0.894</td><td>1.000</td><td>0.902</td><td>0.755</td><td>0.711</td><td>0.745</td></tr><tr><td>Mistral-Instruct-v0.1</td><td>15th</td><td>7B</td><td>0.048</td><td>0.703</td><td>0.998</td><td>0.816</td><td>0.671</td><td>0.565</td><td>0.610</td></tr><tr><td>Mistral-Instruct-v0.2</td><td>6th</td><td>7B</td><td>0.220</td><td>0.810</td><td>1.000</td><td>0.889</td><td>0.713</td><td>0.666</td><td>0.718</td></tr><tr><td>Mistral-8x7B-Instruct-v0.1</td><td>4th</td><td>8x7B</td><td>0.092</td><td>0.888</td><td>0.999</td><td>0.902</td><td>0.753</td><td>0.685</td><td>0.766</td></tr><tr><td>Qwen-Chat</td><td>12th</td><td>7B</td><td>0.089</td><td>0.831</td><td>0.996</td><td>0.862</td><td>0.678</td><td>0.564</td><td>0.668</td></tr><tr><td>Qwen-Chat</td><td>8th</td><td>14B</td><td>0.139</td><td>0.878</td><td>0.997</td><td>0.879</td><td>0.719</td><td>0.593</td><td>0.703</td></tr><tr><td>Qwen-Chat</td><td>3rd</td><td>72B</td><td>0.129</td><td>0.913</td><td>0.998</td><td>0.900</td><td>0.763</td><td>0.654</td><td>0.763</td></tr><tr><td>ALL</td><td>-</td><td>-</td><td>0.137</td><td>0.824</td><td>0.998</td><td>0.870</td><td>0.712</td><td>0.624</td><td>0.700</td></tr></table>
|
| 201 |
+
|
| 202 |
+
Table 5: The accuracy rate of all evaluation LLMs for different metrics on the MCScript data set. NM: No Missing Steps, NR: No Redundant Steps, ND: No Duplicate Steps, EX: Executable, SC: Satisfy Constraint, CG: Complete Goal, OC: Order Correct.
|
| 203 |
+
|
| 204 |
+

|
| 205 |
+
Figure 5: The heat map depicts the relation of model size and evaluation criteria.
|
| 206 |
+
|
| 207 |
+
# 5 Deepthinking ABSEval
|
| 208 |
+
|
| 209 |
+
We present the performance of all LLMs be evaluated across various metrics in Figure 11 in the appendix. To enhance the clarity of our observations, we employ a consistent color scheme to delineate LLMs within the same series (e.g., $LLaMa3$ is shown in red), with varying shades denoting differences in LLM parameters. Our analysis has several interesting observations.
|
| 210 |
+
|
| 211 |
+
# Distinct LLM series employ domain-specific strengths.
|
| 212 |
+
|
| 213 |
+
In our comparative analysis, no single LLM demonstrated superiority across every evaluation metric. For instance, both the LLaMa2 and LLaMa3 models exhibit prowess in reducing missing steps, ensuring adherence to constraints, and effectively realizing intended goals. Meanwhile, Qwen displays a remarkable ability to reduce redundant actions, demonstrating heightened efficiency in certain problem-solving scenarios. The Vicuna model's strength lies in its strong compliance with commonsense constraints. Overall, different models have advantages in different evaluation metrics. These findings underscore the potential for future enhancements in the domain-specific proficiencies of LLMs.
|
| 214 |
+
|
| 215 |
+
# Larger parameter size does not necessarily guarantee superior metric performance.
|
| 216 |
+
|
| 217 |
+
As shown in Figure 5, a larger number of model parameters generally leads to improved performance in script planning tasks. More parameters are associated with fewer missing steps, improved goal accomplishment, and better sequence maintenance. However, this trend is not consistent across all cri
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
Figure 6: Comparison of different evaluation metrics, including out ABSEval, Sigle-LLM evaluation, Rouge, and BERTScore.
|
| 221 |
+
|
| 222 |
+
teria. Notably, within the LLaMa2 series, a higher parameter count led to an increase in redundant steps, contrary to expectations. This decline in performance with increased parameters may be linked to decreased efficiency in following instructions, resulting in responses that include content beyond the task requirements.
|
| 223 |
+
|
| 224 |
+
# Factors beyond parameter size impact LLMs' script planning capabilities.
|
| 225 |
+
|
| 226 |
+
While some metrics show improved outcomes with larger parameters, models within the same series maintain a consistent rank order across different metrics. For instance, the LLaMa2 and LLaMa3 series generally outperform the Qwen series in the 'No Missing Steps' metric. Remarkably, the Qwen-72B-Chat model, with 70 billion parameters, did not outperform the LLaMa2 and LLaMa3 series models in this metric, despite its significantly larger parameter count. Additionally, in the 'No Redundant Steps' metric, the Qwen and LLaMa3 series models often outperformed the LLaMa2 series models. Even the LLaMa2-70B-Chat model failed to surpass the Qwen-7B-Chat. We believe that diverse training conditions such as pre-training data, architecture, and methodologies unique to
|
| 227 |
+
|
| 228 |
+
each model series play a crucial role in determining script planning proficiency. Thus, factors beyond mere parameter size play a significant role in enhancing the script planning capabilities of LLMs.
|
| 229 |
+
|
| 230 |
+
# LLMs perform better on tasks with more steps.
|
| 231 |
+
|
| 232 |
+
We analyzed the relationship between LLMs' performance on four metrics (Correct Order, Executable, No Redundant Steps, and Satisfy Constraints.) and the number of steps in the script. As illustrated in Figure 12 in the appendix, we observed that as the steps of script tasks increased, LLMs exhibited improved accuracy in maintaining logical sequences and adhering to constraints. Furthermore, as the steps of script tasks increased, the occurrence of redundant steps decreased. This trend may arise from LLMs' tendency to focus on crucial steps and avoid unnecessary redundancy when addressing complex issues. Overall, LLMs demonstrate better performance on script with more steps, indicating their existing capability in handling complex planning tasks.
|
| 233 |
+
|
| 234 |
+
# Limitations of current script evaluation methods
|
| 235 |
+
|
| 236 |
+
A sample of 1,000 questions from MCScript was randomly selected for critical analysis of the limita-
|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
Figure 7: The consistency of Single-Agent and ABSEval with manual evaluation in each metric.
|
| 240 |
+
|
| 241 |
+
tions of different evaluation methods. The evaluation was conducted on 15,000 scripts generated by 15 different LLMs using ABSEval, Single-LLM, ROUGE, and BERTScore. The comparison of the rankings generated by each method can be seen in Figure 6.
|
| 242 |
+
|
| 243 |
+
In contrast to traditional methods such as BERTScore (Zhang et al., 2019) and ROUGE (Lin, 2004), our evaluation approach presents several advantages. The open-ended nature of scripts allows for a variety of sequences to achieve the same goal. BERTScore evaluates text by comparing the cosine similarity of each embedding vector in the generated text with the reference text, while ROUGE assesses similarity based on the longest common subsequence between the two texts. These methods heavily depend on the reference answer, leading to significant inaccuracies when evaluating scripts that differ greatly from the reference but still meet the objective. Additionally, these methods struggle to assess the sequential flow of script steps and logical structure. Therefore, traditional evaluation methods do not offer a fair and comprehensive evaluation of scripts, resulting in varying LLM performance rankings compared to our evaluation method.
|
| 244 |
+
|
| 245 |
+
As discussed in Section 4, ABSEval more closely aligns with human preferences compared to Single-LLM. Figure 7 highlights the comparison of ABSEval and Single-Agent in terms of consistency with human annotations across various evaluation metrics. Notably, Single-Agent performed poorly in categories such as No Missing Steps, No Redundant Steps, and Satisfy Constraints, which demonstrates that distributing detail tasks in agents can effectively optimize evaluation performance.
|
| 246 |
+
|
| 247 |
+
# 6 Related Work
|
| 248 |
+
|
| 249 |
+
Scripts A structure describing a sequence of events in a particular scenario is script (Schank and Abelson, 1975). The current work is focused on extracting script knowledge from LLMs. For instance, Lyu et al. (2021) introduced a model that generates a series of steps designed to achieve a specified objective. Huang et al. (2022) showed that LLMs can effectively break down high-level tasks into mid-level plans even without additional training. Yuan et al. (2023) proposed a method to enhance LLMs by first over-generating and then filtering their output, thereby refining script generation when multiple constraints are in play. The emphasis of these advancements has largely been on improving the generative aspects of models. There is a notable scarcity of research on establishing comprehensive and fair evaluation methods for evaluating the script planning abilities of LLMs. Open-ended Text Evaluation Evaluating open-ended text poses significant challenges due to the intensive nature of human-based methods. Traditional metrics like BLEU (Papineni et al., 2002) and ROUGE (Lin, 2004) often diverge from human judgments. The capabilities of LLMs offer new thinking for text assessment. For instance, G-EVAL (Liu et al., 2023) employs LLMs with chain-of-thought processes and a form-filling approach to evaluate NLG outputs. Advances with collaborative LLMs show promise in aligning more closely with human ratings. Mandi et al. (2023) introduced a method for multi-robot collaboration for both strategic communication and detailed path planning, Chan et al. (2023) created an agent-based debate framework for text evaluation.
|
| 250 |
+
|
| 251 |
+
# 7 Conclusion
|
| 252 |
+
|
| 253 |
+
In this study, we introduced a new script evaluation dataset, MCScript, comprising over 1,500 script tasks and steps. We proposed a more fair, fine-grained, and human-aligned script evaluation method known as ABSEval. By utilizing ABSEval, we conducted a comprehensive analysis of the script planning abilities of 15 current LLMs and identified the shortcomings of existing script evaluation methods. Our discussion and analysis provide insights for the evaluation of open-ended tasks similar to scripts. Our objective is to establish a new framework within the LLM community for assessing and analyzing the script planning capabilities of LLMs.
|
| 254 |
+
|
| 255 |
+
# 8 Limitation
|
| 256 |
+
|
| 257 |
+
In our proposed ABSEval, we use homogeneous LLMs, meaning all roles are performed by the same LLM. Future work could explore using heterogeneous LLMs, assigning tasks based on the strengths of different LLMs to further enhance the potential of the evaluation framework. Additionally, Our dataset still contains a small number of errors because the data volume is too large for manual checking, which is overly time-consuming. Last but not least, all our evaluation metrics are binary (True or False). It can further optimize the evaluation granularity by assessing the degree of completion for each metric (e.g., how many steps are missing, how many constraints are not met, etc.).
|
| 258 |
+
|
| 259 |
+
# 9 Acknowledgement
|
| 260 |
+
|
| 261 |
+
This work was supported by the National Key R&D Program of China (No.2022ZD0160503) and Beijing Natural Science Foundation (L243006). This work was also sponsored by CCF-BaiChuan-Ebtech Foundation Model Fund.
|
| 262 |
+
|
| 263 |
+
# 10 References
|
| 264 |
+
|
| 265 |
+
# References
|
| 266 |
+
|
| 267 |
+
Robert P Abelson. 2014. Script processing in attitude formation and decision making. In Cognition and social behavior, pages 33-45. Psychology Press.
|
| 268 |
+
Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.
|
| 269 |
+
Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, et al. 2023. Qwen technical report. arXiv preprint arXiv:2309.16609.
|
| 270 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901.
|
| 271 |
+
Chris Callison-Burch. 2009. Fast, cheap, and creative: Evaluating translation quality using amazon's mechanical turk. In Proceedings of the 2009 conference on empirical methods in natural language processing, pages 286-295.
|
| 272 |
+
Chi-Min Chan, Weize Chen, Yusheng Su, Jianxuan Yu, Wei Xue, Shanghang Zhang, Jie Fu, and Zhiyuan Liu. 2023. Chateval: Towards better llm-based evaluators through multi-agent debate. arXiv preprint arXiv:2308.07201.
|
| 273 |
+
|
| 274 |
+
CS Richard Chan, Charuta Pethe, and Steven Skiena. 2021. Natural language processing versus rule-based text analysis: Comparing bert score and readability indices to predict crowdfunding outcomes. Journal of Business Venturing Insights, 16:e00276.
|
| 275 |
+
Cheng-Han Chiang and Hung-yi Lee. 2023. Can large language models be an alternative to human evaluations? arXiv preprint arXiv:2305.01937.
|
| 276 |
+
Wenlong Huang, Pieter Abbeel, Deepak Pathak, and Igor Mordatch. 2022. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. In International Conference on Machine Learning, pages 9118-9147. PMLR.
|
| 277 |
+
Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. 2023. Mistral 7b. arXiv preprint arXiv:2310.06825.
|
| 278 |
+
Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. 2022. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35:22199-22213.
|
| 279 |
+
Mahnaz Koupae and William Yang Wang. 2018. Wikihow: A large scale text summarization dataset. arXiv preprint arXiv:1810.09305.
|
| 280 |
+
Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, Dmitrii Khizbullin, and Bernard Ghanem. 2023. Camel: Communicative agents for" mind" exploration of large scale language model society.
|
| 281 |
+
Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Zhaopeng Tu, and Shuming Shi. 2023. Encouraging divergent thinking in large language models through multi-agent debate. arXiv preprint arXiv:2305.19118.
|
| 282 |
+
Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81.
|
| 283 |
+
Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. 2023. Gpteval: Nlg evaluation using gpt-4 with better human alignment. arXiv preprint arXiv:2303.16634.
|
| 284 |
+
Qing Lyu, Li Zhang, and Chris Callison-Burch. 2021. Goal-oriented script construction. arXiv preprint arXiv:2107.13189.
|
| 285 |
+
Zhao Mandi, Shreeya Jain, and Shuran Song. 2023. Roco: Dialectic multi-robot collaboration with large language models. arXiv preprint arXiv:2307.04738.
|
| 286 |
+
Jekaterina Novikova, Ondrej Dusek, Amanda Cercas Curry, and Verena Rieser. 2017. Why we need new evaluation metrics for nlg. arXiv preprint arXiv:1707.06875.
|
| 287 |
+
|
| 288 |
+
Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. 2022. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744.
|
| 289 |
+
|
| 290 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pages 311-318.
|
| 291 |
+
|
| 292 |
+
Michaela Regneri, Alexander Koller, and Manfred Pinkal. 2010. Learning script knowledge with web experiments. In Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics, pages 979-988.
|
| 293 |
+
|
| 294 |
+
Abhilasha Sancheti and Rachel Rudinger. 2021. What do large language models learn about scripts? arXiv preprint arXiv:2112.13834.
|
| 295 |
+
|
| 296 |
+
Roger C Schank and Robert P Abelson. 1975. Scripts, plans, and knowledge. In *IJCAI*, volume 75, pages 151-157. New York.
|
| 297 |
+
|
| 298 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288.
|
| 299 |
+
|
| 300 |
+
Lilian DA Wanzare, Alessandra Zarcone, Stefan Thater, and Manfred Pinkal. 2016. A crowdsourced database of event sequence descriptions for the acquisition of high-quality script knowledge.
|
| 301 |
+
|
| 302 |
+
Aiyuan Yang, Bin Xiao, Bingning Wang, Borong Zhang, Ce Bian, Chao Yin, Chenxu Lv, Da Pan, Dian Wang, Dong Yan, et al. 2023. Baichuan 2: Open large-scale language models. arXiv preprint arXiv:2309.10305.
|
| 303 |
+
|
| 304 |
+
Siyu Yuan, Jiangjie Chen, Ziquan Fu, Xuyang Ge, Soham Shah, Charles Robert Jankowski, Yanghua Xiao, and Deqing Yang. 2023. Distilling script knowledge from large language models for constrained language planning. arXiv preprint arXiv:2305.05252.
|
| 305 |
+
|
| 306 |
+
Baoli Zhang, Haining Xie, Pengfan Du, Junhao Chen, Pengfei Cao, Yubo Chen, Shengping Liu, Kang Liu, and Jun Zhao. 2023. Zhujiu: A multi-dimensional, multi-faceted chinese benchmark for large language models. arXiv preprint arXiv:2308.14353.
|
| 307 |
+
|
| 308 |
+
Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2019. Bertscore: Evaluating text generation with bert. arXiv preprint arXiv:1904.09675.
|
| 309 |
+
|
| 310 |
+
# A Appendices
|
| 311 |
+
|
| 312 |
+
# A.1 Prompt Format
|
| 313 |
+
|
| 314 |
+
The detailed prompt of construct MCScript is shown in Table 6 and the prompt of each agent in ABSEval is shown in Table 8.
|
| 315 |
+
|
| 316 |
+
# A.2 MCScript Details
|
| 317 |
+
|
| 318 |
+
# A.2.1 Topic selection criteria
|
| 319 |
+
|
| 320 |
+
We randomly selected the 20 most common topics from WikiHow. Then we manually reviewed them and choose the ten topics most suitable for evaluation. The selection criteria were based on relevance to everyday life scenarios and exclusion of sensitive topics such as culture, religion, and beliefs. For each topic, we selected the questions from WikiHow according to the highest view counts, as we believe these questions are of greater interest to people in their everyday lives.
|
| 321 |
+
|
| 322 |
+
# A.2.2 Example data
|
| 323 |
+
|
| 324 |
+
We provide a specific example evaluated using the ABSEval framework in Table 9.
|
| 325 |
+
|
| 326 |
+
# A.2.3 Crowd-sourcing Details
|
| 327 |
+
|
| 328 |
+
In this work, our annotation task involved providing a script task and the script generated by the LLM, with human annotators marking whether they contained missing steps, redundant steps, repeated steps, completion of the goal, adherence to constraints, correct step sequence, and adherence to commonsense constraints. We hired one annotator, and for each question, the annotator needed to make a judgment by answering yes or no. Screenshots of the instructions and annotation page are shown in Figure 8.
|
| 329 |
+
|
| 330 |
+
# A.3 Experiment Details
|
| 331 |
+
|
| 332 |
+
Table 10 illustrates the specific prompts with added perturbations. To demonstrate the validity of the ABSEval framework, we removed the Answer Synthesize Agent and queried Qwen-110B-Chat to directly generate the standard answers. We compared the consistency of the Critic Agent's evaluation results with human annotations between the two methods. As shown in Table 3, generating the gold answer through the Answer Synthesize Agent significantly improves the accuracy of the Critic Agent's judgments.
|
| 333 |
+
|
| 334 |
+
# A.4 The performance of the model in ABSEval
|
| 335 |
+
|
| 336 |
+
Topic heat map. Figure 9 presents a heatmap of the performance of the LLMs participating in the evaluation across all topics.
|
| 337 |
+
|
| 338 |
+
Overall performance of metrics. Figure 10 shows the overall performance of all LLMs across the seven evaluation metrics in ABSEval.
|
| 339 |
+
|
| 340 |
+
# Step1: Obtain abstract question
|
| 341 |
+
|
| 342 |
+
Source: WikiHow
|
| 343 |
+
|
| 344 |
+
Question: How to buy Disney World tickets
|
| 345 |
+
|
| 346 |
+
# Step2: Add constraint and generate questions with constraints
|
| 347 |
+
|
| 348 |
+
# Prompt:
|
| 349 |
+
|
| 350 |
+
Create possible specific goals according to the abstract Goal, here are some examples
|
| 351 |
+
|
| 352 |
+
Abstract Goal: Create a Decision Tree
|
| 353 |
+
|
| 354 |
+
{ "Constraint": "on Computer",
|
| 355 |
+
|
| 356 |
+
"Specific Goal": "Create a Decision Tree on a Computer"
|
| 357 |
+
|
| 358 |
+
} Here is my question:
|
| 359 |
+
|
| 360 |
+
Abstract Goal: {ABSTRACT QUESTION}
|
| 361 |
+
|
| 362 |
+
Please answer me in JSON format {"Constraint": "...", "Specific Goal": "..."}
|
| 363 |
+
|
| 364 |
+
# One constraint: Online
|
| 365 |
+
|
| 366 |
+
Two constraints: Online, For a family of four
|
| 367 |
+
|
| 368 |
+
Three constraints: Online, For a family of four, During peak season
|
| 369 |
+
|
| 370 |
+
Question one: Learn how to buy Disney World tickets online
|
| 371 |
+
|
| 372 |
+
Question Two: Research how to buy Disney World tickets online for a family of four
|
| 373 |
+
|
| 374 |
+
Question Three: Research and purchase Disney World tickets online for a family of four during peak season.
|
| 375 |
+
|
| 376 |
+
Table 6: An example of generating a restricted script task.
|
| 377 |
+
|
| 378 |
+
<table><tr><td colspan="101">Script Eval Platform</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="100">data/database.script.db</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td rowspan="2"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="100">Arts and Entertainment</td><td rowspan="2" colspan="100">×</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td colspan="98">×</td><td>×</td></tr><tr><td colspan="101">baichuan-inc/Baichuan-13B-Chat</td><td colspan="98">×</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr></table>
|
| 379 |
+
|
| 380 |
+
Figure 8: Screenshots of the instructions and annotation page.
|
| 381 |
+
|
| 382 |
+
Detailed analysis of each metrics. Figure 11 analyzes the performance of each participating LLM for each metric. Models from the same series are drawn in the same color, with darker shades representing larger parameter sizes.
|
| 383 |
+
|
| 384 |
+
The relationship between LLM performance and script length. Figure 12 illustrates the relationship between model performance and the
|
| 385 |
+
|
| 386 |
+
length of script tasks across four different metrics.
|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
Figure 9: The heat map of all LLMs in different question topics. The scores in each cell represent the average scores for all questions within the corresponding topic (the column means the topics, and the row means the different LLMS). Each question's score is calculated by dividing the number of correct metrics by the total number of metrics.
|
| 390 |
+
|
| 391 |
+

|
| 392 |
+
Figure 10: The accuracy of all LLMs in the metric of ABSEval.
|
| 393 |
+
|
| 394 |
+

|
| 395 |
+
|
| 396 |
+

|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
|
| 400 |
+

|
| 401 |
+
|
| 402 |
+

|
| 403 |
+
|
| 404 |
+

|
| 405 |
+
|
| 406 |
+

|
| 407 |
+
Figure 11: Performance of all LLMs in ABSEval metrics
|
| 408 |
+
|
| 409 |
+

|
| 410 |
+
|
| 411 |
+

|
| 412 |
+
|
| 413 |
+

|
| 414 |
+
Figure 12: Performance of all LLMs about response length.
|
| 415 |
+
|
| 416 |
+

|
| 417 |
+
|
| 418 |
+
# Example1
|
| 419 |
+
|
| 420 |
+
Abstract Task: How to buy Disney world tickets.
|
| 421 |
+
|
| 422 |
+
Constraint: online.
|
| 423 |
+
|
| 424 |
+
Specific Task: Learn how to buy Disney World tickets online.
|
| 425 |
+
|
| 426 |
+
Category: Arts and Entertainment.
|
| 427 |
+
|
| 428 |
+
# Step:
|
| 429 |
+
|
| 430 |
+
1. Determine your visit dates and duration.
|
| 431 |
+
2. Choose the ticket type: base, park hopper, or park hopper plus.
|
| 432 |
+
3. Visit the official Disney World website.
|
| 433 |
+
4. Select "Tickets and Passes" or a similar option.
|
| 434 |
+
5. Input your visit dates and the number of days for the ticket.
|
| 435 |
+
6. Choose any add-ons like Park Hopper or special experiences.
|
| 436 |
+
7. Enter the number of tickets needed for adults and children.
|
| 437 |
+
8. Provide personal and payment information.
|
| 438 |
+
9. Review your order for accuracy.
|
| 439 |
+
10. Confirm and complete the purchase.
|
| 440 |
+
11. Save or print your e-tickets.
|
| 441 |
+
12. Enjoy your visit to Disney World!
|
| 442 |
+
|
| 443 |
+
# Example2
|
| 444 |
+
|
| 445 |
+
Abstract Task: How to maintain your health.
|
| 446 |
+
|
| 447 |
+
Constraint: How to maintain your health.
|
| 448 |
+
|
| 449 |
+
Specific Task: Develop a daily mindfulness meditation routine using a mobile app to improve mental health.
|
| 450 |
+
|
| 451 |
+
Category: Health.
|
| 452 |
+
|
| 453 |
+
# Step:
|
| 454 |
+
|
| 455 |
+
1. Choose a mobile app: Select a mobile app that specializes in guided mindfulness meditations, such as Headspace, Calm, or Insight Timer.
|
| 456 |
+
2. Set a consistent time: Decide on a specific time to meditate each day. Morning meditations can help set the tone for the day, while evening meditations can aid in relaxation before sleep.
|
| 457 |
+
3. Start small: Begin with short, 5-minute meditations and gradually increase the length of your sessions as your comfort level grows.
|
| 458 |
+
4. Create a calm environment: Find a quiet, comfortable, and distraction-free environment to focus on your meditation.
|
| 459 |
+
5. Follow the guided meditation: Use the mobile app to follow guided mindfulness meditations. Pay attention to the instructions given and try to focus on your breath or other focal points as suggested.
|
| 460 |
+
6. Make it a daily habit: Make sure to meditate daily. Repetition is key in forming habits.
|
| 461 |
+
7. Track your progress: Utilize the tracking features in your chosen app to monitor your progress over time.
|
| 462 |
+
8. Practice patience: Changes in mental health will not be instantaneous. Understand that mindfulness is a skill that takes time and patience to develop.
|
| 463 |
+
9. Be consistent: Stick to your chosen time and duration of mindfulness meditation everyday for best results.
|
| 464 |
+
10. Seek professional help when needed: While mindfulness meditation is a great tool for maintaining mental health, always seek professional help if you are struggling with mental health issues.
|
| 465 |
+
|
| 466 |
+
Table 7: An example of data in MCScript.
|
| 467 |
+
|
| 468 |
+
# Answer Synthesize Agent
|
| 469 |
+
|
| 470 |
+
Now I want you to play the role of a learner, I hope you can help me complete this planning task through your own knowledge and learning from other examples.
|
| 471 |
+
|
| 472 |
+
The task is: [Tasks]
|
| 473 |
+
|
| 474 |
+
Here are some examples, but note that these examples may have flaws. I hope you can provide me with comprehensive guidance based on these examples. [EXAMPLES]
|
| 475 |
+
|
| 476 |
+
If you do not think these examples are useful, you can give your answer directly.
|
| 477 |
+
|
| 478 |
+
Please pay attention! Answer me in the following format and ensure that each step is concise: 1...,2...,3.... Do not answer irrelevant content.
|
| 479 |
+
|
| 480 |
+
# Cticic Agent
|
| 481 |
+
|
| 482 |
+
Please play the role of an evaluator, the question that needs your evaluation is [Tasks].
|
| 483 |
+
|
| 484 |
+
The standard answer is: [Gold Answer]
|
| 485 |
+
|
| 486 |
+
The answer I need your evaluation is:[Model Answer]
|
| 487 |
+
|
| 488 |
+
I would like you to check if there are any missing, redundant, or duplicated steps in these steps. missing steps: The script is missing any steps.
|
| 489 |
+
|
| 490 |
+
redundant steps: There are steps unrelated to achieving the goal.
|
| 491 |
+
|
| 492 |
+
duplicatesteps:There are duplicatesteps present.
|
| 493 |
+
|
| 494 |
+
Let's think step by step.
|
| 495 |
+
|
| 496 |
+
Please answer me in the JSON format:
|
| 497 |
+
|
| 498 |
+
{"missing_steps": "True",
|
| 499 |
+
|
| 500 |
+
"redundant_steps": "True",
|
| 501 |
+
|
| 502 |
+
"duplicate_steps": "True",
|
| 503 |
+
|
| 504 |
+
"explain": "This script is missing key step XXX. Step x is not related to the target and belongs to redundant steps, but there are no duplicate steps..."
|
| 505 |
+
|
| 506 |
+
# Execute Agent
|
| 507 |
+
|
| 508 |
+
Now please play the role of an executor to complete this task: [QUESTION]. There are Constraints to the task: [CONSTRAINT]. I have provided you with the steps to complete the task:[MODEL INFERENCE]. Please follow these steps and answer my questions below
|
| 509 |
+
|
| 510 |
+
1. The script meets the constraints [CONSTRAINT], meet Constraints is True. The script doesn't meet the constraints [CONSTRAINT], meet Constraint is False.
|
| 511 |
+
2. The script achieves the goal, and the complete goal is True. If the script doesn't achieve the goal, the complete_goal is False.
|
| 512 |
+
3. The sequence of scripts is correct, and step_order.correct is True. The sequence of scripts is wrong, and the step_order.correct is False.
|
| 513 |
+
|
| 514 |
+
Let's think step by step.
|
| 515 |
+
|
| 516 |
+
Please answer me in the JSON format:
|
| 517 |
+
|
| 518 |
+
{"meet CONSTRAINT": "False",
|
| 519 |
+
|
| 520 |
+
"complete_goal": "True",
|
| 521 |
+
|
| 522 |
+
"step_order.correct": "False"
|
| 523 |
+
|
| 524 |
+
"explain": "This script did not meet the constraints given in the question. In addition, there is an error in the sequence of steps, because ..."}
|
| 525 |
+
|
| 526 |
+
# Commonsense Agent
|
| 527 |
+
|
| 528 |
+
Please play the role of an evaluator.
|
| 529 |
+
|
| 530 |
+
Please evaluate this answer: [MODEL INFERENCE]
|
| 531 |
+
|
| 532 |
+
Please determine if any areas in this script do not conform to commonsense.
|
| 533 |
+
|
| 534 |
+
If each step of this script conforms to common sense, please answer me True, otherwise, answer me False. Please answer me only with True or False.
|
| 535 |
+
|
| 536 |
+
Table 8: The detailed prompt of each agent in ABSEval
|
| 537 |
+
|
| 538 |
+
<table><tr><td>Question: Develop an Online Calculator to Determine the Correct Amount of Baking Soda to Drink for Health Benefits Based on Individual Body Weight and Age</td></tr></table>
|
| 539 |
+
|
| 540 |
+
Constraints: For Health Benefits, Using Measured Amounts
|
| 541 |
+
|
| 542 |
+
<table><tr><td>Mistral-7B-Instruct-v0.2 Answer:
|
| 543 |
+
1. Dissolve 2-3 teaspoons of baking soda in a glass of lukewarm water (about 4 ml).
|
| 544 |
+
2. Stir the mixture until the baking soda is completely dissolved.
|
| 545 |
+
3. Drink the baking soda water slowly.
|
| 546 |
+
4. Consume the solution on an empty stomach for optimal effectiveness.
|
| 547 |
+
5. Follow up with a full glass of water to help neutralize stomach acid.
|
| 548 |
+
Note: Consult a healthcare professional before consuming baking soda for medicinal purposes, especially if you have underlying health conditions.</td></tr></table>
|
| 549 |
+
|
| 550 |
+
# Eval Result:
|
| 551 |
+
|
| 552 |
+
1. Missing Steps: True
|
| 553 |
+
2. Redundant Steps: False
|
| 554 |
+
3. Duplicate Steps: False
|
| 555 |
+
4. Executable: True
|
| 556 |
+
5. Satisfy Constraints: False
|
| 557 |
+
6. Complete Goal: False
|
| 558 |
+
7. Step Order: True
|
| 559 |
+
|
| 560 |
+
Explain: The provided steps are missing the development process of an online calculator, which was the core task. Instead, they describe how to consume baking soda water, unrelated to creating a digital tool for calculating the recommended dosage based on individual body weight and age. There are no redundant or duplicate steps within the given script, but the entire script is off-target and does not address the development of an online calculator. The script does not meet the constraints because it does not include a mechanism to calculate the correct amount of baking soda based on individual body weight and age, which was a key requirement of the task. The step order is correct in terms of the process of preparing and consuming baking soda water, but the script as described does not include the initial calculation step necessary to achieve the goal.
|
| 561 |
+
|
| 562 |
+
Table 9: An example of eval result
|
| 563 |
+
|
| 564 |
+
<table><tr><td>Perturbed category: Missing Steps
|
| 565 |
+
Prompt: Please remove the most crucial steps from the script, so that the script cannot complete the goal. The goal of this script is [Goal]. The script is [Script]. Please answer my script after deleting the key steps.</td></tr><tr><td>Perturbed category: Redundant Steps
|
| 566 |
+
Prompt: Please add a redundant step that is completely unrelated to the target to the script. The goal of this script is [Goal]. The script is [Script]. Please provide a complete answer to the script I have added.</td></tr><tr><td>Perturbed category: Complete Goal
|
| 567 |
+
Prompt: Modify this script to make it CAN NOT achieve the goal [Goal]. The script is [Script]. Please provide me with the modified script.</td></tr><tr><td>Perturbed category: Satisfy Constraint
|
| 568 |
+
Prompt: Please modify this script to not meet the restrictions [Constraint]. The script is [Script]. Please answer me in this format. 2. 3. ...</td></tr></table>
|
| 569 |
+
|
| 570 |
+
Table 10: Prompt for adding perturbation to questions
|
absevalanagentbasedframeworkforscriptevaluation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2c069c50139a5cac447a42eaccdaa627c5fbfc52066447cdd0771a26734667bb
|
| 3 |
+
size 1278753
|
absevalanagentbasedframeworkforscriptevaluation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6af4f3f21d91a9ca3ef14e0fe31c3d3f3c785f8b9eee8e41396284cee24faa14
|
| 3 |
+
size 509460
|
academicscancontributetodomainspecializedlanguagemodels/8f961a0e-5519-4e77-ad99-b5d1e1517080_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fe835e23833d74e62bfe4b16014d16f4af110e083df1aab7026c3bb6e7adbb05
|
| 3 |
+
size 71158
|
academicscancontributetodomainspecializedlanguagemodels/8f961a0e-5519-4e77-ad99-b5d1e1517080_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0cc890df8896bd66a00ddb18b4005e57305d5161f1adba6610d667c643f2f2a4
|
| 3 |
+
size 93487
|
academicscancontributetodomainspecializedlanguagemodels/8f961a0e-5519-4e77-ad99-b5d1e1517080_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c8596e57b7021a260c8a00a567dc57c2358e22d238ad99566eb9d6e576a7a1e
|
| 3 |
+
size 254060
|
academicscancontributetodomainspecializedlanguagemodels/full.md
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Academics Can Contribute to Domain-Specialized Language Models
|
| 2 |
+
|
| 3 |
+
Mark Dredze<sup>1,2</sup>, Genta Indra Winata<sup>3*</sup>, Prabhanjan Kambadur<sup>1</sup>, Shijie Wu<sup>4*</sup>, Ozan Irsoy<sup>1</sup>, Steven Lu<sup>1</sup>, Vadim Dabravolski<sup>1</sup>, David S Rosenberg<sup>1</sup>, Sebastian Gehrmann<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
$^{1}$ Bloomberg $^{2}$ Johns Hopkins University $^{3}$ Capital One $^{4}$ Anthropic mdredze@bloomberg.net
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Commercially available models dominate academic leaderboards. While impressive, this has concentrated research on creating and adapting general-purpose models to improve NLP leaderboard standings for large language models. However, leaderboards collect many individual tasks and general-purpose models often underperform in specialized domains; domain-specific or adapted models yield superior results. This focus on large general-purpose models excludes many academics and draws attention away from areas where they can make important contributions. We advocate for a renewed focus on developing and evaluating domain- and task-specific models, and highlight the unique role of academics in this endeavor.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Natural language processing (NLP) research has historically produced domain- and task-specific supervised models. The field has shifted course in the past few years, with a singular focus on general-purpose generative large language models (LLMs) that, rather than focusing on a single task or domain, do well across many tasks (Brown et al., 2020; Chowdhery et al., 2022; Workshop et al., 2022; Zhang et al., 2022; Touvron et al., 2023b). By training on massive amounts of data from many sources, these models can do well on extremely broad professional and linguistic examinations (Achiam et al., 2023; Anil et al., 2023), college-level knowledge questions (Hendrycks et al., 2021; Lai et al., 2023), and collections of reasoning tasks (Suzgun et al., 2023).
|
| 14 |
+
|
| 15 |
+
While the trend to develop a single, general-purpose generative model is a net positive change that has resulted in impressive results, it has also slowed down progress in other areas of NLP. First, we are less focused on problems that cannot be
|
| 16 |
+
|
| 17 |
+
solved with a chat-like interface. Second, the best-performing LLMs are often commercial systems, which are sometimes opaque about training data, system architecture, and training details. Third, frequent model updates hinder reproducibility.
|
| 18 |
+
|
| 19 |
+
The resources required to train large general language models naturally constrain research to large organizations, and researchers (or academics) outside of these organizations have become dependent on closed commercial systems, or open systems with limited transparency regarding their training data. This is partly reflected in broader AI trends: Zhang et al. (2021) found that roughly $30\%$ of papers at AI conferences (including $*\mathrm{CL}$ ) have a Fortune 500 tech affiliation. Increased resources contribute to the success of transformer-based LLMs (Vaswani et al., 2017), with available hardware (Hooker, 2021) and benchmarks (Dehghani et al., 2021) both playing a deciding role in what models end up being developed. By optimizing the average score across hundreds of shallow tasks, we are smoothing out any signal that would be gained from deeply engaging with individual tasks. Developing domain-specific models can help identify model and training choices that yield improvements on tasks within those domains.
|
| 20 |
+
|
| 21 |
+
In this paper, we argue for renewed attention to domain-specific models with rigorous and domain-expert informed evaluations. Because many academics are excluded from LLM development due to resource constraints, attention has been drawn away from research areas where academics can make the greatest contributions: deep dives on specific challenging problems. Thus, we propose several research questions to reorient the research community towards developing domain-specific models and applications, where academics are uniquely suited to lead.
|
| 22 |
+
|
| 23 |
+
# 2 LLMs: A Brief History
|
| 24 |
+
|
| 25 |
+
While modern LMs date back to Jelinek (1976), we summarize very recent history to describe the current environment. In the wake of the popularization of neural word embeddings by word2vec (Mikolov et al., 2013), contextualized representations of language as features for supervised systems were realized by ELMo (Peters et al., 2018) followed by BERT (Devlin et al., 2019; Liu et al., 2019). BERT and subsequent models became the base models for supervised systems utilizing task-specific finetuning and continued pre-training for new domains (Gururangan et al., 2020), e.g., for clinical tasks ELMo (Schumacher and Dredze, 2019) and clinicalBERT (Huang et al., 2019).
|
| 26 |
+
|
| 27 |
+
Parallel work utilized transformers for autoregressive LLMs, resulting in GPT (Radford et al., 2018), GPT-2 (Radford et al., 2019), BART (Lewis et al., 2020a; Liu et al., 2020), CTRL (Keskar et al., 2019), T5 (Raffel et al., 2020; Xue et al., 2021), and XGLM (Lin et al., 2021). These models had some few-shot capabilities, but they could each be adapted (fine-tuned) for a specific task of interest. Some models were available to academics, though training a new model was beyond reach for many.
|
| 28 |
+
|
| 29 |
+
GPT-3 (Brown et al., 2020) greatly increased model size and changed our understanding of LLMs. Impressive in-context (few-shot) learning pushed the idea that a single large model could solve a wide range of tasks. While the cost of resources meant training was restricted to a few groups, work focused on training bigger models (Chowdhery et al., 2022; Anil et al., 2023; Zhang et al., 2022; Touvron et al., 2023a; Rae et al., 2021).
|
| 30 |
+
|
| 31 |
+
While only a few could train large models, many studied how best to use them: prompt engineering (Liu et al., 2023), prompt tuning (Han et al., 2022; Wei et al., 2022), evaluation (Liang et al., 2022), among many other topics. Commercial LLM APIs, and eventually open source models (Zhang et al., 2022; Workshop et al., 2022; Touvron et al., 2023a,b; Groeneveld et al., 2024), facilitated this work. Ignat et al. (2024) noted the massive research shift to LLMs reflected in Google Scholar citations. Subsequent work in instruction tuning (Ouyang et al., 2022) and fine-tuning (Wei et al., 2022; Chung et al., 2022; Longpre et al., 2023) have further centralized research around general-purpose models. Many consider fine-tuning for specific applications to be obsolete: why would you tune a model for a specific task when you can tune
|
| 32 |
+
|
| 33 |
+
a single model to do well on all tasks?1
|
| 34 |
+
|
| 35 |
+
Despite this view, multiple domain-specific LLMs have demonstrated that domain-specific data leads to models that outperform much larger models (Wu et al., 2023; Taylor et al., 2022). MedPaLM has shown that adapting even giant LLMs to a specific domain leads to vastly increased performance (Singhal et al., 2022, 2023). Furthermore, the release of LLaMA (Touvron et al., 2023a) led quickly to Alpaca (Taori et al., 2023) and a wave of new fine-tuned versions of LLaMA for specific tasks. This trend strongly indicates that domain-specific models, especially for constrained sizes, are still highly relevant.
|
| 36 |
+
|
| 37 |
+
To be clear, our concern is not with closed models, which play an important role in the model ecosystem. Models range from full to limited to no access, with some closed models providing incredibly detailed information (Hoffmann et al., 2022; Rae et al., 2019; Wu et al., 2023) and others providing none (Achiam et al., 2023). Our lament over this focus on general models, either open or closed, is that it draws attention away from work on task- and domain-specific models and evaluations. Academics have become product testers, instead of focusing on tasks where they can play a unique role. Moreover, existing academic benchmarks increasingly serve a reduced purpose for commercial models; we are hill-climbing on benchmarks without a way to ensure existing LLMs have not been trained to excel on these benchmarks (Dodge et al., 2021). Furthermore, we rely on benchmarks in place of deep engagement with an application and its stakeholders.
|
| 38 |
+
|
| 39 |
+
# 3 The Need for Domain-Specific LLMs
|
| 40 |
+
|
| 41 |
+
In general, web data does not reflect the needs of all NLP systems. Historically, the community has developed systems for specialized domains such as finance, law, bio-medicine, and science. Accordingly, there have been efforts to build LLMs for these domains (Wu et al., 2023; Taylor et al., 2022; Singhal et al., 2022; Bolton et al., 2023; Luo et al., 2022; Lehman et al., 2023; García-Ferrero et al., 2024). We need a deep investment in how best to develop and evaluate these models in partnership with domain experts. How should we best integrate
|
| 42 |
+
|
| 43 |
+
insights gained from the development of general-purpose models with these efforts? We propose several research directions.
|
| 44 |
+
|
| 45 |
+
How can general-purpose models inform domain-specific models? Building domain-specific models should benefit from insights and investments into general-purpose models. There are several strategies: training domain-specific models from scratch (Taylor et al., 2022; Bolton et al., 2023), mixing general and domain-specific data (Wu et al., 2023), and fine-tuning existing models (Singhal et al., 2022, 2023). Focusing on domain-specific needs, applications, and knowledge with guidance from topic experts will benefit us in acquiring a better model for specific NLP tasks. Which approach yields the best results for task performance and overall cost?
|
| 46 |
+
|
| 47 |
+
What is the role of in-context learning and finetuning? Both LIMA (Zhou et al., 2023) and MedPaLM (Singhal et al., 2022) use a small number of examples to tune a model. With expanding context size, we may soon rely entirely on in-context learning (Petroni et al., 2020). This blurs the lines between changing model parameters and conditioning during inference. Beyond inference speed tradeoffs between the two, there may be value in tuning on tens of thousands (or more) of examples. Which domain-specific examples are the most effective to include and in what manner?
|
| 48 |
+
|
| 49 |
+
How can LLMs be integrated with domain-specific knowledge? Specialized knowledge is key in many domains. RAG (Lewis et al., 2020b; Guu et al., 2020) and KILT-derived works (Petroni et al., 2021) focus on knowledge-intensive tasks by including retrieval steps. Work on attributed QA (Bohnet et al., 2022) takes a similar approach, as do search LLMs that require interaction with retrieved data (Nakano et al., 2021). Rich updated knowledge sources will always exist beyond the model, especially in environments like medicine, finance, and many academic disciplines.
|
| 50 |
+
|
| 51 |
+
# 4 Evaluation of Domain-Specific Models
|
| 52 |
+
|
| 53 |
+
The evaluation of NLP systems is at a crossroads, and the downstream usage of LLMs and evaluation approaches have diverged. Benchmarks assume that their results translate to insights into similar tasks and usefulness for commercial applications. But benchmarks have become increasingly narrow
|
| 54 |
+
|
| 55 |
+
in scope, oftentimes assessing one metric on a single, often flawed, dataset (Mitchell et al., 2019; Kiela et al., 2021; Ethayarajh and Jurafsky, 2020). The primary evaluation approach for LLMs has been to evaluate on a broad set of these narrow benchmarks (Liang et al., 2022, HELM) (Srivastava et al., 2022, BIG-Bench). High average performance argues for a broad range of capabilities; however, one size may not fit all. Since specific uses of LLMs are typically much more narrow, we identify three major issues and associated research opportunities with this approach.
|
| 56 |
+
|
| 57 |
+
Depth-first Evaluation Current approaches focus on a single model doing everything well on average instead of being useful in a single domain. However, it is widely acknowledged that the standard benchmarks for most tasks are insufficient (e.g., for summarization, Fabbri et al., 2021; Goyal et al., 2022). Task-specific evaluations have thus adopted additional protocols that measure how well models transfer to different domains, how robust they are, and whether they stand up to concept drift (Mille et al., 2021; Dhole et al., 2021). These details disappear when benchmarking on $100+$ tasks. Yet, a model's usefulness is not solely defined by doing okay on everything but rather by how well it performs in specific and narrow tasks that provide value. This value is only realized if the model does not suffer from catastrophic failures.
|
| 58 |
+
|
| 59 |
+
Exemplar studies that perform deep dives on LLMs for specific tasks exist in healthcare (Zack et al., 2024; Eriksen et al., 2023; Ayers et al., 2023; Han et al., 2024; Chen et al., 2024; Strong et al., 2023), law (Blair-Stanek et al., 2023b,a; Magesh et al., 2024), and physics (Kim et al., 2024), among other areas. We encourage more work on evaluation practices for specific tasks that can handle various model setups and yield informative insights (Zhang et al., 2023; Liang et al., 2022).
|
| 60 |
+
|
| 61 |
+
Sound Metrics For convenience, most benchmark tasks are formulated as multiple choice question answering or classification. This is not how LLMs are often used. For much more common generation tasks, researchers have been ringing alarms about broken evaluations (Gehrmann et al., 2023). It is dubious whether we gain insights into nontask-specific generation through NLU benchmarks. If we are performing the depth-first evaluation of a generation task, a remaining hurdle – and why researchers fall back to NLU tasks – is the lack of robust metrics. While there is much recent work on
|
| 62 |
+
|
| 63 |
+
better metrics (Celikyilmaz et al., 2020; Gehrmann et al., 2023), a troubling trend is the use of LLMs as evaluators (e.g., Sellam et al., 2020; Chiang et al., 2023). This approach poses many risks, including the implicit assumption that the evaluating model has access to the ground truth judgment. While there are some promising results, using an LLM out of the box should be avoided (e.g., Wang et al., 2023a,b). Moreover, it is unclear how to evaluate the evaluator when it is a non-deterministic API, or how to scale the development of learned metrics and quantify the strength of a metric.
|
| 64 |
+
|
| 65 |
+
Products are not Baselines If we really do want to evaluate $100+$ tasks, there are many issues with the soundness of evaluation setups. At this scope, it is impossible to run careful ablation studies or to assess the effect of changes to methodology in a causal manner. Moreover, different LLMs respond differently to prompts. The BLOOM evaluation averaged over multiple prompts and found significant variance (Workshop et al., 2022). This variance leads to a lack of reproducibility: LLaMA (Touvron et al., 2023a) claimed high MMLU (Hendrycks et al., 2021) performance but didn't release the prompts that led to them. Similarly, the evaluation scheme makes a difference (Liang et al., 2022, Fig. 33). High evaluation costs mean benchmarks pick a small number of setups (sometimes only one) for each task, which introduces further bias, making it hard to construct fair benchmarks on many tasks.
|
| 66 |
+
|
| 67 |
+
An additional issue with the current benchmarking approach is that the best-performing models are often commercial APIs. With limited transparency regarding data and training, we cannot fairly evaluate these models (e.g., data leakage). Furthermore, task-specific tuning may have been selected based on these specific benchmarks. Moreover, the underlying models change frequently, so it is unclear whether a result will hold for long.
|
| 68 |
+
|
| 69 |
+
These evaluation issues prompt significant open questions: 1) How do we develop consistent evaluation setups across models that give true measures of performance? 2) How do we develop evaluation setups and metrics more closely aligned with downstream usage? 3) How do we develop evaluation suites that support depth-first evaluation and not breadth-first benchmarking?
|
| 70 |
+
|
| 71 |
+
# 5 The Role of Academics
|
| 72 |
+
|
| 73 |
+
A focus on general-purpose LLMs has forced academics to work with large base models and perhaps, shifted the focus to solve problems of immediate industrial interest. Many academics feel excluded from current research trends (Ignat et al., 2024) and the academic and industry relationship is changing (Littman et al., 2022). Shifting attention back to domain-specific applications emphasizes areas where academics hold an advantage: partnerships with domain experts to invest in specific tasks, and consideration of broader societal needs.
|
| 74 |
+
|
| 75 |
+
Developing domain-specific models requires domain expertise and universities are diverse academic environments that house experts in many domains. Collaborations with these experts can identify data sources, tasks, and challenges important within each domain. Furthermore, these collaborations are the best avenues for better alignment of evaluations with use cases (Winata et al., 2024), and can support the development of proper metrics. These collaborations are necessary to explore wide open interdisciplinary topics, such as models for protein structure prediction (Tunyasuvunakool et al., 2021; Vig et al., 2021) and games as proxies for reasoning (Silver et al., 2016; Agostinelli et al., 2019; Schrittwieser et al., 2020). This includes developing domain-specific resources, which require domain experts to properly design and construct the datasets. Further, areas where industry underinvests are those where academics could focus attention. For example, low-resource languages are not served by a general-purpose multilingual LLM, nor will we reasonably have enough data to support current LLM training methods. Dialects and variations in languages are still wide open topics (Aji et al., 2022; Winata et al., 2023; Nicholas and Bhatia, 2023).
|
| 76 |
+
|
| 77 |
+
General-purpose LLMs are unlikely to solve problems in many important domains, with many open research problems that can only be solved by domain-specific approaches. Focusing on domain-specific knowledge will benefit us in acquiring a better model and developing application strategies more aligned with how humans learn domain-specific knowledge (Tricot and Sweller, 2014). For many interdisciplinary areas, subject matter experts are essential, and the problems must be defined clearly. The first pass from an LLM is often impressive, but it hides the trenches and areas where things are most interesting. We need a renewed fo
|
| 78 |
+
|
| 79 |
+
cus on developing and evaluating domain-specific models and applications, an area where academics can play a leading role. Let us not be distracted by claims that a single model solves all tasks, and instead deeply explore and understand the needs and challenges of specific domains.
|
| 80 |
+
|
| 81 |
+
# Limitations
|
| 82 |
+
|
| 83 |
+
The literature that we explored in this opinion paper is limited to the area of LLMs. We study the history of LLMs from the literature on word embeddings, encoder-only, and generative transformers to the latest advancement of API-based LLMs.
|
| 84 |
+
|
| 85 |
+
# Ethics Statement
|
| 86 |
+
|
| 87 |
+
Our work does not include any experiments or use of data. No potential ethical issues in this work.
|
| 88 |
+
|
| 89 |
+
# References
|
| 90 |
+
|
| 91 |
+
Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.
|
| 92 |
+
Forest Agostinelli, Stephen McAleer, Alexander Shmakov, and Pierre Baldi. 2019. Solving the Rubik's Cube with deep reinforcement learning and search. Nature Machine Intelligence, 1(8):356-363.
|
| 93 |
+
Alham Aji, Genta Indra Winata, Fajri Koto, Samuel Cahyawijaya, Ade Romadhony, Rahmad Mahendra, Kemal Kurniawan, David Moeljadi, Radityo Eko Prasojo, Timothy Baldwin, et al. 2022. One country, $700+$ languages: NLP challenges for underrepresented languages and dialects in Indonesia. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7226-7249.
|
| 94 |
+
Rohan Anil, Andrew M Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, et al. 2023. Palm 2 technical report. arXiv preprint arXiv:2305.10403.
|
| 95 |
+
John W Ayers, Adam Poliak, Mark Dredze, Eric C Leas, Zechariah Zhu, Jessica B Kelley, Dennis J Faix, Aaron M Goodman, Christopher A Longhurst, Michael Hogarth, et al. 2023. Comparing physician and artificial intelligence chatbot responses to patient questions posted to a public social media forum. JAMA Internal Medicine, 183(6):589-596.
|
| 96 |
+
Andrew Blair-Stanek, Nils Holzenberger, and Benjamin Van Durme. 2023a. Can gpt-3 perform statutory reasoning? In Proceedings of the Nineteenth International Conference on Artificial Intelligence and Law, pages 22-31.
|
| 97 |
+
|
| 98 |
+
Andrew Blair-Stanek, Nils Holzenberger, and Benjamin Van Durme. 2023b. Openai cribbed our tax example, but can gpt-4 really do tax? arXiv preprint arXiv:2309.09992.
|
| 99 |
+
Bernd Bohnet, Vinh Q. Tran, Pat Verga, Roee Aharoni, Daniel Andor, Livio Baldini Soares, Jacob Eisenstein, Kuzman Ganchev, Jonathan Herzig, Kai Hui, Tom Kwiatkowski, Ji Ma, Jianmo Ni, Tal Schuster, William W. Cohen, Michael Collins, Dipanjan Das, Donald Metzler, Slav Petrov, and Kellie Webster. 2022. Attributed question answering: Evaluation and modeling for attributed large language models. CoRR, abs/2212.08037.
|
| 100 |
+
Elliot Bolton, David Hall, Michihiro Yasunaga, Tony Lee, Chris Manning, and Percy Liang. 2023. BioMedLM. https://github.com/stanford-crfm/BioMedLM.
|
| 101 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in Neural Information Processing Systems, 33:1877-1901.
|
| 102 |
+
Asli Celikyilmaz, Elizabeth Clark, and Jianfeng Gao. 2020. Evaluation of text generation: A survey. arXiv, 2006.14799.
|
| 103 |
+
Hanjie Chen, Zhouxiang Fang, Yash Singla, and Mark Dredze. 2024. Benchmarking large language models on answering and explaining challenging medical questions. arXiv preprint arXiv:2402.18060.
|
| 104 |
+
Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. 2023. Vicuna: An open-source chatbot impressing gpt-4 with $90\%$ * chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6.
|
| 105 |
+
Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, Parker Schuh, Kensen Shi, Sasha Tsvyashchenko, Joshua Maynez, Abhishek Rao, Parker Barnes, Yi Tay, Noam Shazeer, Vinodkumar Prabhakaran, Emily Reif, Nan Du, Ben Hutchinson, Reiner Pope, James Bradbury, Jacob Austin, Michael Isard, Guy Gur-Ari, Pengcheng Yin, Toju Duke, Anselm Levskaya, Sanjay Ghemawat, Sunipa Dev, Henryk Michalewski, Xavier Garcia, Vedant Misra, Kevin Robinson, Liam Fedus, Denny Zhou, Daphne Ippolito, David Luan, Hyeontaek Lim, Barret Zoph, Alexander Spiridonov, Ryan Sepassi, David Dohan, Shivani Agrawal, Mark Omernick, Andrew M. Dai, Thanumalayan Sankaranarayana Pillai, Marie Pellat, Aitor Lewkowycz, Erica Moreira, Rewon Child, Oleksandr Polozov, Katherine Lee, Zongwei Zhou, Xuezhi Wang, Brennan Saeta, Mark Diaz, Orhan First, Michele Catasta, Jason Wei, Kathy Meier-Hellstern, Douglas Eck, Jeff Dean, Slav Petrov, and Noah Fiedel. 2022. Palm: Scaling language modeling with pathways. CoRR, abs/2204.02311.
|
| 106 |
+
|
| 107 |
+
Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. 2022. Scaling instruction-finetuned language models. arXiv, 2210.11416.
|
| 108 |
+
Mostafa Dehghani, Yi Tay, Alexey A. Gritsenko, Zhe Zhao, Neil Houlsby, Fernando Diaz, Donald Metzler, and Oriol Vinyals. 2021. The benchmark lottery. CoRR, abs/2107.07002.
|
| 109 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 110 |
+
Kaustubh D. Dhole, Varun Gangal, Sebastian Gehrmann, Aadesh Gupta, Zhenhao Li, Saad Mahamood, Abinaya Mahendiran, Simon Mille, Ashish Srivastava, Samson Tan, Tongshuang Wu, Jascha Sohl-Dickstein, Jinho D. Choi, Eduard H. Hovy, Ondrej Dusek, Sebastian Ruder, Sajant Anand, Nagender Aneja, Rabin Banjade, Lisa Barthe, Hanna Behnke, Ian Berlot-Attwell, Connor Boyle, Caroline Brun, Marco Antonio Sobrevilla Cabezudo, Samuel Cahyawijaya, Emile Chapuis, Wanxiang Che, Mukund Choudhary, Christian Clauss, Pierre Colombo, Filip Cornell, Gautier Dagan, Mayukh Das, Tanay Dixit, Thomas Dopierre, Paul-Alexis Dray, Suchitra Dubey, Tatiana Ekeinhor, Marco Di Giovanni, Rishabh Gupta, Rishabh Gupta, Louanes Hamla, Sang Han, Fabrice Harel-Canada, Antoine Honore, Ishan Jindal, Przemyslaw K. Joniak, Denis Kleyko, Venelin Kovatchev, and et al. 2021. Nlaugmenter: A framework for task-sensitive natural language augmentation. CoRR, abs/2112.02721.
|
| 111 |
+
Jesse Dodge, Maarten Sap, Ana Marasovic, William Agnew, Gabriel Ilharco, Dirk Groeneveld, Margaret Mitchell, and Matt Gardner. 2021. Documenting large webtext corpora: A case study on the colossal clean crawled corpus. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 1286-1305.
|
| 112 |
+
Alexander V Eriksen, Soren Møller, and Jesper Ryg. 2023. Use of gpt-4 to diagnose complex clinical cases.
|
| 113 |
+
Kawin Ethayarajh and Dan Jurafsky. 2020. Utility is in the eye of the user: A critique of NLP leaderboards. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 4846-4853, Online. Association for Computational Linguistics.
|
| 114 |
+
Alexander R. Fabbri, Wojciech Krysciński, Bryan McCann, Caiming Xiong, Richard Socher, and Dragomir Radev. 2021. SummEval: Re-evaluating summarization evaluation. Transactions of the Association for Computational Linguistics, 9:391-409.
|
| 115 |
+
|
| 116 |
+
Iker Garcia-Ferrero, Rodrigo Agerri, Aitziber Atutxa Salazar, Elena Cabrio, Iker de la Iglesia, Alberto Lavelli, Bernardo Magnini, Benjamin Molinet, Johana Ramirez-Romero, German Rigau, et al. 2024. Medical mT5: an open-source multilingual text-to-text LLM for the medical domain. arXiv preprint arXiv:2404.07613.
|
| 117 |
+
Sebastian Gehrmann, Elizabeth Clark, and Thibault Sellam. 2023. Repairing the cracked foundation: A survey of obstacles in evaluation practices for generated text. Journal of Artificial Intelligence Research, 77:103-166.
|
| 118 |
+
Tanya Goyal, Junyi Jessy Li, and Greg Durrett. 2022. News summarization and evaluation in the era of GPT-3. CoRR, abs/2209.12356.
|
| 119 |
+
Dirk Groeneveld, Iz Beltagy, Pete Walsh, Akshitaa Bhagia, Rodney Kinney, Oyvind Tafjord, Ananya Harsh Jha, Hamish Ivison, Ian Magnusson, Yizhong Wang, et al. 2024. Olmo: Accelerating the science of language models. arXiv preprint arXiv:2402.00838.
|
| 120 |
+
Suchin Gururangan, Ana Marasovic, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A Smith. 2020. Don't stop pretraining: Adapt language models to domains and tasks. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8342-8360.
|
| 121 |
+
Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Ming-Wei Chang. 2020. Retrieval augmented language model pre-training. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pages 3929-3938. PMLR.
|
| 122 |
+
Tessa Han, Aounon Kumar, Chirag Agarwal, and Himabindu Lakkaraju. 2024. Towards safe large language models for medicine. In ICML 2024 Workshop on Models of Human Feedback for AI Alignment.
|
| 123 |
+
Xu Han, Weilin Zhao, Ning Ding, Zhiyuan Liu, and Maosong Sun. 2022. PTR: Prompt tuning with rules for text classification. AI Open, 3:182-192.
|
| 124 |
+
Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2021. Measuring massive multitask language understanding. In International Conference on Learning Representations.
|
| 125 |
+
Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. 2022. An empirical analysis of compute-optimal large language model training. Advances in Neural Information Processing Systems, 35:30016-30030.
|
| 126 |
+
Sara Hooker. 2021. The hardware lottery. Commun. ACM, 64(12):58-65.
|
| 127 |
+
|
| 128 |
+
Cheng-Yu Hsieh, Chun-Liang Li, Chih-kuan Yeh, Hootan Nakhost, Yasuhisa Fujii, Alex Ratner, Ranjay Krishna, Chen-Yu Lee, and Tomas Pfister. 2023. Distilling step-by-step! outperforming larger language models with less training data and smaller model sizes. In *Findings of the Association for Computational Linguistics: ACL* 2023, pages 8003-8017.
|
| 129 |
+
Kexin Huang, Jaan Altosaar, and Rajesh Ranganath. 2019. Clinicalbert: Modeling clinical notes and predicting hospital readmission. arXiv preprint arXiv:1904.05342.
|
| 130 |
+
Oana Ignat, Zhijing Jin, Artem Abzaliev, Laura Biester, Santiago Castro, Naihao Deng, Xinyi Gao, Aylin Ece Gunal, Jacky He, Ashkan Kazemi, et al. 2024. Has it all been solved? open nlp research questions not solved by large language models. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 8050-8094.
|
| 131 |
+
Frederick Jelinek. 1976. Continuous speech recognition by statistical methods. Proceedings of the IEEE, 64(4):532-556.
|
| 132 |
+
Nitish Shirish Keskar, Bryan McCann, Lav R Varshney, Caiming Xiong, and Richard Socher. 2019. Ctrl: A conditional transformer language model for controllable generation. arXiv preprint arXiv:1909.05858.
|
| 133 |
+
Douwe Kiela, Max Bartolo, Yixin Nie, Divyansh Kaushik, Atticus Geiger, Zhengxuan Wu, Bertie Vidgen, Grusha Prasad, Amanpreet Singh, Pratik Ringshia, Zhiyi Ma, Tristan Thrush, Sebastian Riedel, Zeerak Waseem, Pontus Stenetorp, Robin Jia, Mohit Bansal, Christopher Potts, and Adina Williams. 2021. Dynabench: Rethinking benchmarking in NLP. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4110-4124, Online. Association for Computational Linguistics.
|
| 134 |
+
Eun-Ah Kim, Haining Pan, Nayantara Mudur, William Taranto, Subhashini Venugopalan, Yasaman Bahri, and Michael Brenner. 2024. Performing Hartree-Fock many-body physics calculations with large language models. Bulletin of the American Physical Society.
|
| 135 |
+
Viet Lai, Chien Nguyen, Nghia Ngo, Thuat Nguyen, Franck Dernoncourt, Ryan Rossi, and Thien Nguyen. 2023. Okapi: Instruction-tuned large language models in multiple languages with reinforcement learning from human feedback. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 318-327.
|
| 136 |
+
Eric Lehman, Evan Hernandez, Diwakar Mahajan, Jonas Wulff, Micah J Smith, Zachary Ziegler, Daniel Nadler, Peter Szolovits, Alistair Johnson, and Emily Alsentzer. 2023. Do we still need clinical language models? In Conference on health, inference, and learning, pages 578-597. PMLR.
|
| 137 |
+
|
| 138 |
+
Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020a. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880.
|
| 139 |
+
Patrick S. H. Lewis, Ethan Perez, Aleksandra Pik-tus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen-tau Yih, Tim Roektaschel, Sebastian Riedel, and Douwe Kiela. 2020b. Retrieval-augmented generation for knowledge-intensive NLP tasks. In Advances in Neural Information Processing Systems.
|
| 140 |
+
Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, Benjamin Newman, Binhang Yuan, Bobby Yan, Ce Zhang, Christian Cosgrove, Christopher D. Manning, Christopher Ré, Diana Acosta-Navas, Drew A. Hudson, Eric Zelikman, Esin Durmus, Faisal Ladhak, Frieda Rong, Hongyu Ren, Huaxiu Yao, Jue Wang, Keshav Santhanam, Laurel J. Orr, Lucia Zheng, Mert Yuksekgönül, Mirac Suzgun, Nathan Kim, Neel Guha, Niladri S. Chatterji, Omar Khattab, Peter Henderson, Qian Huang, Ryan Chi, Sang Michael Xie, Shibani Santurkar, Surya Ganguli, Tatsunori Hashimoto, Thomas Icard, Tianyi Zhang, Vishrav Chaudhary, William Wang, Xuechen Li, Yifan Mai, Yuhui Zhang, and Yuta Koreeda. 2022. Holistic evaluation of language models. CoRR, abs/2211.09110.
|
| 141 |
+
Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, et al. 2021. Few-shot learning with multilingual language models. arXiv preprint arXiv:2112.10668.
|
| 142 |
+
Michael L Littman, Ifeoma Ajunwa, Guy Berger, Craig Boutilier, Morgan Currie, Finale Doshi-Velez, Gillian Hadfield, Michael C Horowitz, Charles Isbell, Hiroaki Kitano, et al. 2022. Gathering strength, gathering storms: The one hundred year study on artificial intelligence (ai100) 2021 study panel report. arXiv preprint arXiv:2210.15767.
|
| 143 |
+
Pengfei Liu, Weizhe Yuan, Jinlan Fu, Zhengbao Jiang, Hiroaki Hayashi, and Graham Neubig. 2023. Pretrain, prompt, and predict: A systematic survey of prompting methods in natural language processing. ACM Computing Surveys, 55(9):1-35.
|
| 144 |
+
Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pretraining for neural machine translation. Transactions of the Association for Computational Linguistics, 8:726-742.
|
| 145 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019.
|
| 146 |
+
|
| 147 |
+
Roberta: A robustly optimized BERT pretraining approach. arXiv.
|
| 148 |
+
Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V Le, Barret Zoph, Jason Wei, et al. 2023. The FLAN collection: Designing data and methods for effective instruction tuning. arXiv, 2301.13688.
|
| 149 |
+
Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon, and Tie-Yan Liu. 2022. Biogpt: generative pre-trained transformer for biomedical text generation and mining. Briefings in bioinformatics, 23(6):bbac409.
|
| 150 |
+
Varun Magesh, Faiz Surani, Matthew Dahl, Mirac Suzgun, Christopher D Manning, and Daniel E Ho. 2024. Hallucination-free? assessing the reliability of leading ai legal research tools. arXiv preprint arXiv:2405.20362.
|
| 151 |
+
Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.
|
| 152 |
+
Simon Mille, Kaustubh D. Dhole, Saad Mahamood, Laura Perez-Beltrachini, Varun Gangal, Mihir Sanjay Kale, Emiel van Miltenburg, and Sebastian Gehrmann. 2021. Automatic construction of evaluation suites for natural language generation datasets. In Advances in Neural Information Processing Systems.
|
| 153 |
+
Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru. 2019. Model cards for model reporting. In Proceedings of the Conference on Fairness, Accountability, and Transparency, FAT* 2019, Atlanta, GA, USA, January 29-31, 2019, pages 220-229. ACM.
|
| 154 |
+
Reiichiro Nakano, Jacob Hilton, Suchir Balaji, Jeff Wu, Long Ouyang, Christina Kim, Christopher Hesse, Shantanu Jain, Vineet Kosaraju, William Saunders, et al. 2021. Webgpt: Browser-assisted question-answering with human feedback. arXiv preprint arXiv:2112.09332.
|
| 155 |
+
Gabriel Nicholas and Aliya Bhatia. 2023. Lost in translation: Large language models in non-english content analysis. arXiv preprint arXiv:2306.07377.
|
| 156 |
+
Harsha Nori, Nicholas King, Scott Mayer McKinney, Dean Carignan, and Eric Horvitz. 2023. Capabilities of gpt-4 on medical challenge problems. arXiv preprint arXiv:2303.13375.
|
| 157 |
+
Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. 2022. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35:27730-27744.
|
| 158 |
+
|
| 159 |
+
Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word representations. arXiv, 1802.05365.
|
| 160 |
+
Fabio Petroni, Patrick Lewis, Aleksandra Piktus, Tim Rocttäschel, Yuxiang Wu, Alexander H. Miller, and Sebastian Riedel. 2020. How context affects language models' factual predictions. In *Automated Knowledge Base Construction*.
|
| 161 |
+
Fabio Petroni, Aleksandra Piktus, Angela Fan, Patrick Lewis, Majid Yazdani, Nicola De Cao, James Thorne, Yacine Jernite, Vladimir Karpukhin, Jean Maillard, Vassilis Plachouras, Tim Rocttäschel, and Sebastian Riedel. 2021. KILT: a benchmark for knowledge intensive language tasks. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2523-2544, Online. Association for Computational Linguistics.
|
| 162 |
+
Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. 2018. Improving language understanding by generative pre-training.
|
| 163 |
+
Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.
|
| 164 |
+
Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, et al. 2021. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446.
|
| 165 |
+
Jack W Rae, Anna Potapenko, Siddhant M Jayakumar, and Timothy P Lillicrap. 2019. Compressive transformers for long-range sequence modelling. arXiv preprint arXiv:1911.05507.
|
| 166 |
+
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. The Journal of Machine Learning Research, 21(1):5485-5551.
|
| 167 |
+
Julian Schrittwieser, Ioannis Antonoglou, Thomas Hubert, Karen Simonyan, Laurent Sifre, Simon Schmitt, Arthur Guez, Edward Lockhart, Demis Hassabis, Thore Graepel, et al. 2020. Mastering Atari, Go, Chess and Shogi by planning with a learned model. Nature, 588(7839):604-609.
|
| 168 |
+
Elliot Schumacher and Mark Dredze. 2019. Learning unsupervised contextual representations for medical synonym discovery. JAMIA Open, 2(4):538-546.
|
| 169 |
+
Thibault Sellam, Dipanjan Das, and Ankur Parikh. 2020. BLEURT: Learning robust metrics for text generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7881-7892.
|
| 170 |
+
|
| 171 |
+
David Silver, Aja Huang, Chris J Maddison, Arthur Guez, Laurent Sifre, George Van Den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Veda Panneershelvam, Marc Lanctot, et al. 2016. Mastering the game of Go with deep neural networks and tree search. Nature, 529(7587):484-489.
|
| 172 |
+
|
| 173 |
+
Karan Singhal, Shekoofeh Azizi, Tao Tu, S. Sara Mahdavi, Jason Wei, Hyung Won Chung, Nathan Scales, Ajay Kumar Tanwani, Heather Cole-Lewis, Stephen Pfohl, Perry Payne, Martin Seneviratne, Paul Gamble, Chris Kelly, Nathaneal Scharli, Aakanksha Chowdhery, Philip Andrew Mansfield, Blaise Aguera y Arcas, Dale R. Webster, Gregory S. Corrado, Yossi Matias, Katherine Chou, Juraj Gottweis, Nenad Tomasev, Yun Liu, Alvin Rajkomar, Joelle K. Barral, Christopher Semturs, Alan Karthikesalingam, and Vivek Natarajan. 2022. Large language models encode clinical knowledge. CoRR, abs/2212.13138.
|
| 174 |
+
|
| 175 |
+
Karan Singhal, Tao Tu, Juraj Gottweis, Rory Sayres, Ellery Wulczyn, Le Hou, Kevin Clark, Stephen Pfohl, Heather Cole-Lewis, Darlene Neal, Mike Schaekermann, Amy Wang, Mohamed Amin, Sami Lachgar, Philip Andrew Mansfield, Sushant Prakash, Bradley Green, Ewa Dominowska, Blaise Aguera y Arcas, Nenad Tomasev, Yun Liu, Renee Wong, Christopher Semturs, S. Sara Mahdavi, Joelle K. Barral, Dale R. Webster, Gregory S. Corrado, Yossi Matias, Shekoofeh Azizi, Alan Karthikesalingam, and Vivek Natarajan. 2023. Towards expert-level medical question answering with large language models. CoRR, abs/2305.09617.
|
| 176 |
+
|
| 177 |
+
Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R. Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, Agnieszka Kluska, Aitor Lewkowycz, Akshit Agarwal, Alethea Power, Alex Ray, Alex Warstadt, Alexander W. Kocurek, Ali Safaya, Ali Tazarv, Alice Xiang, Alicia Parrish, Allen Nie, Aman Hussain, Amanda Askell, Amanda Dsouza, Ameet Annasaheb Rahane, Anantharaman S. Iyer, Anders Andreassen, Andrea Santilli, Andreas Stuhlmuller, Andrew M. Dai, Andrew D. La, Andrew Kyle Lampinen, Andy Zou, Angela Jiang, Angelica Chen, Anh Vuong, Animesh Gupta, Anna Gottardi, Antonio Norelli, Anu Venkatesh, Arash Gholamidavoodi, Arfa Tabassum, Arul Menezes, Arun Kirubarajan, Asher Mullokandov, Ashish Sabharwal, Austin Herrick, Avia Efrat, Aykut Erdem, Ayla Karakacs, Bridget R. Roberts, Bao Sheng Loe, Barret Zoph, Bartlomiej Bojanowski, Batuhan Ozyurt, Behnam Hedayatnia, Behnam Neyshabur, Benjamin Inden, Benno Stein, Berk Ekmekci, Bill Yuchen Lin, Blake Stephen Howald, Cameron Diao, Cameron Dour, Catherine Stinson, Cedrick Argueta, C'esar Ferri Ram'irez, Chandan Singh, Charles Rathkopf, Chenlin Meng, Chitta Baral, Chiyu Wu, Chris Callison-Burch, Chris Waites, Christian Voigt, Christopher D. Manning, Christopher Potts, Cindy Tatiana Ramirez, Clara Rivera, Clemencia Siro, Colin Raffel, Courtney Ashcraft, Cristina Garbacea, Damien Sileo, Daniel H Garrette
|
| 178 |
+
|
| 179 |
+
Dan Hendrycks, Dan Kilman, Dan Roth, Daniel Freeman, Daniel Khashabi, Daniel Levy, Daniel Gonz'alez, Danny Hernandez, Danqi Chen, Daphne Ippolito, Dar Gilboa, David Dohan, D. Drakard, David Jurgens, Debajyoti Datta, Deep Ganguli, Denis Emelin, Denis Kleyko, Deniz Yuret, Derek Chen, Derek Tam, Dieuwke Hupkes, Diganta Misra, Dillyar Buzan, Dimitri Coelho Mollo, Diyi Yang, DongHo Lee, Ekaterina Shutova, Ekin Dogus Cubuk, Elad Segal, Eleanor Hagerman, Elizabeth Barnes, Elizabeth P. Donoway, Ellie Pavlick, Emanuele Rodola, Emma FC Lam, Eric Chu, Eric Tang, Erkut Erdem, Ernie Chang, Ethan A. Chi, Ethan Dyer, Ethan J. Jerzak, Ethan Kim, Eunice Engefu Manyasi, Evgenii Zheltonozhskii, Fan Xia, Fatemeh Siar, Fernando Mart'inez-Plumed, Francesca Happ'e, Francois Chollet, Frieda Rong, Gaurav Mishra, Genta Indra Winata, Gerard de Melo, German Kruszewski, Giambattista Parascandolo, Giorgio Mariani, Gloria Wang, Gonzalo JaimovitchL'opez, Gregor Betz, Guy Gur-Ari, Hana Galijasevic, Han Sol Kim, Hannah Rashkin, Hanna Hajishirzi, Harsh Mehta, Hayden Bogar, Henry Shevlin, Hinrich Schutze, Hiromu Yakura, Hongming Zhang, Hubert Wong, Ian Aik-Soon Ng, Isaac Noble, Jaap Jumelet, Jack Geissinger, John Kernion, Jacob Hilton, Jaehoon Lee, Jaime Fernandez Fisac, J. Brooker Simon, James Koppel, James Zheng, James Zou, Jan Koco'n, Jana Thompson, Jared Kaplan, Jarema Radom, Jascha Narain Sohl-Dickstein, Jason Phang, Jason Wei, Jason Yosinski, Jekaterina Novikova, Jelle Bosscher, Jenni Marsh, Jeremy Kim, Jeroen Taal, Jesse Engel, Jesujoba Oluwadara Alabi, Jiacheng Xu, Jiaming Song, Jillian Tang, Jane W Waweru, John Burden, John Miller, John U. Balis, Jonathan Berant, Jorg Frohberg, Jos Rozen, Jose Hernández-Orallo, Joseph Boudeman, Joseph Jones Joshua B. TenenbaumJoshua S. RuleJoyce Chua Kamil KanclerzKaren LivescuKarl Krauth,Karthik Gopalakrishnan,Katerina Ignatyeva,Katja Markert Kaustubh D.Dhole Kevin Gimpel Kevin Ochieng' Omondi Kory Wallace Mathewson Kristen Chiafullo,Ksenia Shkaruta Kumar ShridharKyle McDonell Kyle Richardson,Laria ReynoldsLeo Gao Li Zhang,Liam Dugan,Lianhui Qin,Lidia Contreras-Ochando,Louis-Philippe MorencyLuca Moschella Luca LamLucy NobleLudwig Schmidt,Luheng He,Luis Oliveros Col'onLuke MetzLutfi Kerem cSenel Maarten Bosma Maarten Sap Maartje ter Hoeve,Madotto AndreaMaheen Saleem Farooqi, Manaal Faruqui,Mantas Mazeika Marco Baturan Marco Marelli Marco Maru M Quintana Marie Tolkiehn Mario Giulianielli Martha Lewis Martin Potthast Matthew Leavitt Matthias Hagen M'atyas Schubert Medina Baitemirova Melissa Arnaud Melvin Andrew McElrath Michael A. Yee Michael CohenMi GuMichael I.IvanitskiyMichael Starritt Michael Strube Michal Swkedrowski Michele Bevilacqua Michihiro Yasunaga Mihir Kale Mike CainMimee XuMirac Suzgun Monica Tiwari Mohit Bansal Moin Aminnaseri Mor Geva Mozhdeh Gheini T MukundVarma Nanyun Peng Nathan Chi,Nayeon LeeNeta Gur-Ari Krakover Nicholas CameronNicholas S.RobertsNicholas Doiron
|
| 180 |
+
|
| 181 |
+
Nikita Nangia, Niklas Deckers, Niklas Muennighoff, Nitish Shirish Keskar, Niveditha Iyer, Noah Constant, Noah Fiedel, Nuan Wen, Oliver Zhang, Omar Agha, Omar Elbaghdadi, Omer Levy, Owain Evans, Pablo Antonio Moreno Casares, Parth Doshi, Pascale Fung, Paul Pu Liang, Paul Vicol, Pegah Alipoormolabashi, Peiyuan Liao, Percy Liang, Peter W. Chang, Peter Eckersley, Phu Mon Htut, PiBei Hwang, P. Milkowski, Piyush S. Patil, Pouya Pezeshkpour, Priti Oli, Qiaozhu Mei, QING LYU, Qinlang Chen, Rabin Banjade, Rachel Etta Rudolph, Raefer Gabriel, Rahul Habacker, Ram'on Risco Delgado, Raphael Milliere, Rhythm Garg, Richard Barnes, Rif A. Saurous, Riku Arakawa, Robbe Raymaekers, Robert Frank, Rohan Sikand, Roman Novak, Roman Sitelew, Ronan Le Bras, Rosanne Liu, Rowan Jacobs, Rui Zhang, Ruslan Salakhuddinov, Ryan Chi, Ryan Lee, Ryan Stovall, Ryan Teehan, Ryan Yang, Sahib J. Singh, Saif M. Mohammad, Sajant Anand, Sam Dillavou, Sam Shleifer, Sam Wiseman, Samuel Gruetter, Sam Bowman, Samuel S. Schoenholz, Sanghyun Han, Sanjeev Kwatra, Sarah A. Rous, Sarik Ghazarian, Sayan Ghosh, Sean Casey, Sebastian Bischoff, Sebastian Gehrmann, Sebastian Schuster, Sepideh Sadeghi, Shadi S. Hamdan, Sharon Zhou, Shashank Srivastava, Sherry Shi, Shikhar Singh, Shima Asaadi, Shixiang Shane Gu, Shubh Pachchigar, Shubham Toshniwal, Shyam Upadhyay, Shyamolima Debnath, Siamak Shakeri, Simon Thormeyer, Simone Melzi, Siva Reddy, Sneha Priscilla Makini, Soohwan Lee, Spencer Bradley Torene, Sriharsha Hatwar, Stanislas Dehaene, Stefan Divic, Stefano Ermon, Stella Rose Biderman, Stephanie C. Lin, S. Prasad Steven T. Piantadosi, Stuart M. Shieber, Summer Misherghi, Svetlana Kiritchenko, Swaroop Mishra Tal Linzen Tal Schuster Tao Li Tao Yu Tariq A. Ali, Tatsuo Hashimoto Te-Lin Wu Theo Desbordes Theodore Rothschild Thomas Phan Tianle Wang Tiberius Nkyili Timo Schick T.N.Kornev Timothy Telleen-Lawton Titus Tunduny Tobias Gerstenberg Trenton Chang Trishala Neeraj Tushar Khot Tyler OBrien ShultzUri Shaham,Vedant Misra Vera Demberg Victoria Nyamai Vikas Raunak Vinay Venkatesh Ramasesh Vinay Uday Prabhu Vishakh Padmakumar Vivek Srikumar William Fedus William Saunders William ZhangW Vossen Xiang Ren Xiaoyu Tong Xinyi WuXudong Shen Yadollah Yaghoobzadeh Yair Lakretz Yang Song Yasaman Bahri Ye Ji Choi Yichi Yang Yiding Hao Yifu ChenYonatan BelinkovYu HouYu HouYuntao BaiZachary SeidZhao XinranZhuoye Zhao Zi Fu Wang Zijie J.WangZirui Wang Ziyi Wu Sahib Singh and Uri Shaham. 2022. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. arXiv, abs/2206.04615.
|
| 182 |
+
|
| 183 |
+
Eric Strong, Alicia DiGiammarino, Yingjie Weng, Andre Kumar, Poonam Hosamani, Jason Hom, and Jonathan H Chen. 2023. Chatbot vs medical student performance on free-response clinical reasoning examinations. JAMA Internal Medicine, 183(9):1028-1030.
|
| 184 |
+
|
| 185 |
+
Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc Le, Ed Chi, Denny Zhou, et al. 2023. Challenging big-bench tasks and whether chain-of-thought can solve them. In Findings of the Association for Computational Linguistics: ACL 2023, pages 13003-13051.
|
| 186 |
+
|
| 187 |
+
Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. 2023. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca.
|
| 188 |
+
|
| 189 |
+
Ross Taylor, Marcin Kardas, Guillem Cucurull, Thomas Scialom, Anthony Hartshorn, Elvis Saravia, Andrew Poulton, Viktor Kerkez, and Robert Stojnic. 2022. Galactica: A large language model for science. CoRR, abs/2211.09085.
|
| 190 |
+
|
| 191 |
+
Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurélien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. 2023a. Llama: Open and efficient foundation language models. CoRR, abs/2302.13971.
|
| 192 |
+
|
| 193 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023b. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288.
|
| 194 |
+
|
| 195 |
+
Andre Tricot and John Sweller. 2014. Domain-specific knowledge and why teaching generic skills does not work. Educational Psychology Review, 26:265-283.
|
| 196 |
+
|
| 197 |
+
Kathryn Tunyasuvunakool, Jonas Adler, Zachary Wu, Tim Green, Michal Zielinski, Augustin Žídek, Alex Bridgland, Andrew Cowie, Clemens Meyer, Agata Laydon, et al. 2021. Highly accurate protein structure prediction for the human proteome. Nature, 596(7873):590-596.
|
| 198 |
+
|
| 199 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 5998-6008.
|
| 200 |
+
|
| 201 |
+
Jesse Vig, Ali Madani, Lav R. Varshney, Caiming Xiong, richard socher, and Nazneen Rajani. 2021. BERTology meets biology: Interpreting attention in protein language models. In International Conference on Learning Representations.
|
| 202 |
+
|
| 203 |
+
Peiyi Wang, Lei Li, Liang Chen, Dawei Zhu, Binghuai Lin, Yunbo Cao, Qi Liu, Tianyu Liu, and Zhifang Sui. 2023a. Large language models are not fair evaluators. arXiv preprint arXiv:2305.17926.
|
| 204 |
+
|
| 205 |
+
Yizhong Wang, Hamish Ivison, Pradeep Dasigi, Jack Hessel, Tushar Khot, Khyathi Raghavi Chandu, David Wadden, Kelsey MacMillan, Noah A Smith,
|
| 206 |
+
|
| 207 |
+
Iz Beltagy, et al. 2023b. How far can camels go? Exploring the state of instruction tuning on open resources. arXiv preprint arXiv:2306.04751.
|
| 208 |
+
Jason Wei, Maarten Bosma, Vincent Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M. Dai, and Quoc V Le. 2022. Finetuned language models are zero-shot learners. In International Conference on Learning Representations.
|
| 209 |
+
Genta Indra Winata, Alham Fikri Aji, Samuel Cahyawijaya, Rahmad Mahendra, Fajri Koto, Ade Romadhony, Kemal Kurniawan, David Moeljadi, Radityo Eko Prasojo, Pascale Fung, et al. 2023. NusaX: Multilingual parallel sentiment dataset for 10 Indonesian local languages. In Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, pages 815-834.
|
| 210 |
+
Genta Indra Winata, Hanyang Zhao, Anirban Das, Wenpin Tang, David D Yao, Shi-Xiong Zhang, and Sambit Sahu. 2024. Preference tuning with human feedback on language, speech, and vision tasks: A survey. arXiv preprint arXiv:2409.11564.
|
| 211 |
+
BigScience Workshop, Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagné, Alexandra Sasha Luccioni, François Yvon, et al. 2022. Bloom: A 176b-parameter open-access multilingual language model. arXiv preprint arXiv:2211.05100.
|
| 212 |
+
Shijie Wu, Ozan Irsoy, Steven Lu, Vadim Dabravolski, Mark Dredze, Sebastian Gehrmann, Prabhanjan Kambadur, David Rosenberg, and Gideon Mann. 2023. BloombergGPT: A large language model for finance. arXiv preprint arXiv:2303.17564.
|
| 213 |
+
Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, and Colin Raffel. 2021. mt5: A massively multilingual pre-trained text-to-text transformer. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 483-498.
|
| 214 |
+
Travis Zack, Eric Lehman, Mirac Suzgun, Jorge A Rodriguez, Leo Anthony Celi, Judy Gichoya, Dan Jurafsky, Peter Szolovits, David W Bates, Raja-Elie E Abdulnour, et al. 2024. Assessing the potential of GPT-4 to perpetuate racial and gender biases in health care: A model evaluation study. The Lancet Digital Health, 6(1):e12-e22.
|
| 215 |
+
Daniel Zhang, Saurabh Mishra, Erik Brynjolfsson, John Etchemendy, Deep Ganguli, Barbara Grosz, Terah Lyons, James Manyika, Juan Carlos Niebles, Michael Sellitto, et al. 2021. The AI index 2021 annual report. arXiv preprint arXiv:2103.06312.
|
| 216 |
+
Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. 2022. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068.
|
| 217 |
+
|
| 218 |
+
Tianyi Zhang, Faisal Ladhak, Esin Durmus, Percy Liang, Kathleen R. McKeown, and Tatsunori B. Hashimoto. 2023. Benchmarking large language models for news summarization. CoRR, abs/2301.13848.
|
| 219 |
+
Chunting Zhou, Pengfei Liu, Puxin Xu, Srini Iyer, Jiao Sun, Yuning Mao, Xuezhe Ma, Avia Efrat, Ping Yu, Lili Yu, Susan Zhang, Gargi Ghosh, Mike Lewis, Luke Zettlemoyer, and Omer Levy. 2023. LIMA: Less is more for alignment. CoRR, abs/2305.11206.
|
academicscancontributetodomainspecializedlanguagemodels/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5270e8ab00b79d84f24f0f9e6701506d40f28b8b0902b802999f9ca812c17eb9
|
| 3 |
+
size 275006
|
accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/4149d901-7794-481a-b333-412366837bec_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:00e7967f2bedec61f82fcf3461ce5b69e020ce197a0d9e08af89dae168892b21
|
| 3 |
+
size 53272
|
accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/4149d901-7794-481a-b333-412366837bec_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:533d9b5bba25abf5eb46dd35b48f033da67644919a6a565934448335b1b8f51e
|
| 3 |
+
size 64951
|
accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/4149d901-7794-481a-b333-412366837bec_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9517d032f75739183c4ddb12d2894eafb25b0e5e5bcbb8fa537f70fcf282e0d8
|
| 3 |
+
size 426628
|
accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/full.md
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accurate and Data-Efficient Toxicity Prediction when Annotators Disagree
|
| 2 |
+
|
| 3 |
+
Harbani Jaggi*
|
| 4 |
+
|
| 5 |
+
UC Berkeley
|
| 6 |
+
|
| 7 |
+
Kashyap Murali*
|
| 8 |
+
|
| 9 |
+
UC Berkeley
|
| 10 |
+
|
| 11 |
+
Eve Fleisig
|
| 12 |
+
|
| 13 |
+
UC Berkeley
|
| 14 |
+
|
| 15 |
+
Erdem Biyik
|
| 16 |
+
|
| 17 |
+
USC
|
| 18 |
+
|
| 19 |
+
# Abstract
|
| 20 |
+
|
| 21 |
+
When annotators disagree, predicting the labels given by individual annotators can capture nuances overlooked by traditional label aggregation. We introduce three approaches to predict individual annotator ratings on the toxicity of text by incorporating individual annotator-specific information: a neural collaborative filtering (NCF) approach, an in-context learning (ICL) approach, and an intermediate embedding-based architecture. We also study the utility of demographic information for rating prediction. NCF showed limited utility; however, integrating annotator history, demographics, and survey information permits both the embedding-based architecture and ICL to substantially improve prediction accuracy, with the embedding-based architecture outperforming the other methods. We also find that, if demographics are predicted from survey information, using these imputed demographics as features performs comparably to using true demographic data. This suggests that demographics may not provide substantial information for modeling ratings beyond what is captured in survey responses. Our findings raise considerations about the relative utility of different types of annotator information and provide new approaches for modeling annotators in subjective NLP tasks.
|
| 22 |
+
|
| 23 |
+
# 1 Introduction
|
| 24 |
+
|
| 25 |
+
Disagreement among data annotators can reveal nuances in NLP tasks that lack a simple ground truth, such as hate speech detection. For instance, what one group of annotators deems acceptable might be considered offensive by another. The current standard for resolving such disagreement, aggregation via majority voting, casts aside variance in annotator labels as noise, when in subjective tasks this variance is key to understanding the perspectives that arise from the annotators' individuality and backgrounds.
|
| 26 |
+
|
| 27 |
+
To address this problem, recent research has explored alternatives to majority voting. Most notably, studies have taken the approach of predicting the ratings of individual annotators (Davani et al., 2022; Fleisig et al., 2023; Gordon et al., 2022). We aim to improve the prediction of rating behavior, guided by the following questions:
|
| 28 |
+
|
| 29 |
+
- Does incorporating annotator information via collaborative filtering, embedding-based architecture, or in-context learning improve downstream rating predictions?
|
| 30 |
+
- What annotator information best informs toxicity rating predictions? Do demographics provide useful information beyond what survey information can provide?
|
| 31 |
+
|
| 32 |
+
We proposed and tested a neural collaborative filtering (NCF) module, an embedding-based architecture, and an in-context learning (ICL) module for individual rating prediction. First, we incorporated NCF to the classification head of a RoBERTab-based model (Liu et al., 2019). Embedded annotator information<sup>1</sup> was combined with a separate embedding of annotators' rating history to predict individual annotator toxicity ratings. Secondly, we used embedding models to encode annotator information, then performed regression to predict toxicity ratings. Lastly, we prompted LLMs such as Mistral (Jiang et al., 2023) and GPT-3.5 (Brown et al., 2020) to study different ways of integrating annotator information.
|
| 33 |
+
|
| 34 |
+
Our findings indicate that while NCF does not outperform baseline models, ICL and our embedding-based architecture improve performance, with the embedding-based architecture significantly outperforming all other approaches tested. In addition, our research on the effectiveness of demographic information as a feature indicates that imputing demographics from survey data
|
| 35 |
+
|
| 36 |
+
performs similarly to using direct demographic inputs, suggesting that survey responses already capture the relevant demographic information for rating prediction. This suggests that, on this task, demographics have little predictive power beyond what survey information provides.
|
| 37 |
+
|
| 38 |
+
# 2 Motivation and Related Work
|
| 39 |
+
|
| 40 |
+
Our work is fundamentally motivated by the need for alternatives to majority-vote label aggregation in NLP tasks. Pavlick and Kwiatkowski (2019) find that disagreement among annotators is partially attributed to differences in human judgment. Basile et al. (2021) underscore the importance of the consideration of a system's output over instances where annotators disagree.
|
| 41 |
+
|
| 42 |
+
Newer work in this field aims to directly model individual annotator rating behavior. Davani et al. (2022) employ a multi-task based approach, where predicting each annotators' judgment is a subtask to their larger architecture. Fleisig et al. (2023) use a RoBERTa-based model to predict an individual annotators' ratings. Gordon et al. (2022) put together a jury of annotators, predicting individual judgments.
|
| 43 |
+
|
| 44 |
+
For the individual annotator rating prediction task, Deng et al. (2023) create individual annotator embeddings and annotation embeddings. This idea of learning embeddings based on user-specific data has been applied in various domains successfully, e.g., imitation learning (Beliaev et al., 2022) or recommendation systems (Biyik et al., 2023).
|
| 45 |
+
|
| 46 |
+
Collaborative filtering (CF) learns user embeddings based on their past behaviors (Bokde et al., 2015). He et al. (2017) show that neural collaborative filtering (NCF) offers better performance than more naive CF implementations. This motivates our NCF approach to learning annotator embeddings. Intuitively, this approach would be effective in learning deeply rooted preferences and behaviors of annotators. Thus, we hypothesized that this method would more accurately predict individual annotator ratings.
|
| 47 |
+
|
| 48 |
+
Several recent approaches use sociodemographic traits of individual annotators to learn for the rating prediction task (Fleisig et al., 2023; Davani et al., 2022), but Andrus et al. (2021) warn that legal and organizational constraints, such as privacy laws and concerns around self-reporting, often make collecting demographic data challenging. Gupta et al. (2018) suggest using semantically related fea
|
| 49 |
+
|
| 50 |
+
tures in the absence of sensitive demographic data. For instance, in the absence of gender information, (Zhao et al., 2019) use other demographic features - age, relation, and marital status - for their prediction task. This work motivates our objective of incorporating auxiliary annotator information (survey information and annotator history) in the prediction task.
|
| 51 |
+
|
| 52 |
+
Lastly, Orlikowski et al. (2023) challenge the utility of demographic information, since they do not find strong evidence that explicitly modeling demographics helps to predict annotation behavior. In concurrent work, Hu and Collier (2024) argue that there is an inherent limit to how much predictive power can be provided by demographics. Their findings indicate that while incorporating demographic variables can provide modest improvements in prediction accuracy, these gains are often constrained by the relatively low variance explained by these variables. This motivates our final objective, studying the efficacy of demographics as a useful mediating variable for rating prediction.
|
| 53 |
+
|
| 54 |
+
# 3 Approach
|
| 55 |
+
|
| 56 |
+
Our approach includes creating three separate modules based on neural collaborative filtering (NCF), an embedding-based architecture, and in-context learning (ICL). We evaluate each approach's efficacy in predicting annotator rating behavior. The latter two modules are used to investigate our second research question; we integrate different ablations of annotator information as input to the rating prediction models to study their effect on toxicity rating prediction.
|
| 57 |
+
|
| 58 |
+
We used Kumar et al. (2021)'s dataset to evaluate the performance of our rating prediction modules. This dataset consists of sentences rated for toxicity $0 =$ least toxic, $4 =$ most toxic). Each sentence has been labeled by 5 annotators and each annotator has labeled 20 distinct sentences. For each annotator, the dataset contains their rating behavior; demographic information (race, gender, importance of religion, LGBT status, education, parental status, and political stance); and survey information, e.g., their preferred forums, social media, whether they have seen toxic content, if they think toxic content is a problem, and their opinion on whether technology impacts peoples' lives.
|
| 59 |
+
|
| 60 |
+
For ablations, we took distinct combinations of annotator information (rating history, demographics, survey information) along with the text to be
|
| 61 |
+
|
| 62 |
+
rated, assessing the impact of each on the model's performance. To study whether demographics are a necessary feature for predicting annotator ratings, we also used a separate model to predict annotator demographics using rating history and survey information and applied these predicted demographics as input for our ablations.
|
| 63 |
+
|
| 64 |
+
For all three methods, we used Mean Absolute Error (MAE) of predicting individual annotators' ratings as the evaluation metric, allowing us to quantify the performance of different model configurations.
|
| 65 |
+
|
| 66 |
+
# 3.1 Neural Collaborative Filtering
|
| 67 |
+
|
| 68 |
+
Our NCF method integrates textual and annotatorspecific information to predict annotator ratings for the toxicity detection task (Figure 1). We aimed to create both a textual embedding and an annotor embedding for each (text, annotator) pair and capture latent interactions between both entities by using a hybrid neural architecture inspired by neural collaborative filtering. The goal was to learn more complex, non-linear relationships between annotator preferences and the text itself to more accurately predict an annotator's toxicity rating.
|
| 69 |
+
|
| 70 |
+
To create embedded representations of the textual information which has ranging levels of toxicity, we leveraged a RoBERTa model (Liu et al., 2019) fine-tuned on the Jigsaw Toxic Comment Classification Challenge dataset (cjadams et al., 2017) and the hate speech detection datasets introduced by Kumar et al. (2021). In parallel, we initialized and stored random embeddings for each annotator in the RoBERTa classification head. During training, these embeddings were concatenated with text embeddings and passed through 4 dense layers before predicting the rating.
|
| 71 |
+
|
| 72 |
+
In developing this hybrid model architecture, we explored variations in the dimensionality of the annotator embeddings, methods for integrating the sentence and annotator embeddings, and the impact of freezing the RoBERTa model (Appendix A describes variations tested).
|
| 73 |
+
|
| 74 |
+
# 3.2 Embedding-Based Architecture
|
| 75 |
+
|
| 76 |
+
We generated embeddings for the concatenated annotator information and the current text to be rated using two text embedding models, OpenAI's text-embedding-3-small and text-embedding-3-large. These embeddings then served as input for a custom regression model with multiple fully connected layers, which was trained to predict toxicity ratings based on the extracted features (Figure 2).
|
| 77 |
+
|
| 78 |
+

|
| 79 |
+
Figure 1: Design of our neural collaborative filtering (NCF) architecture. Annotator information and the text being rated were passed into an embedding model, then concatenated with the annotator embedding, and passed through a series of dense layers to predict the rating.
|
| 80 |
+
|
| 81 |
+
# 3.3 In-Context Learning
|
| 82 |
+
|
| 83 |
+
Our in-context learning architecture prompts a language model to process a range of combinations of annotator information. Each combination serves as input to the model (Mistral or GPT-3.5), enabling it to account for the specific context of the annotator when predicting toxicity ratings. The model was prompted to generate predictions based on the contextual information provided. This approach aims to enhance the model's ability to make informed predictions by integrating diverse sources of information relevant to the rating task. A sample prompt of this approach is shown in Figure 3.
|
| 84 |
+
|
| 85 |
+

|
| 86 |
+
Figure 2: Design of our embedding-based architecture.
|
| 87 |
+
|
| 88 |
+
# 4 Results
|
| 89 |
+
|
| 90 |
+
Our three approaches predicted annotators' toxicity ratings on a scale from 0 to 4, based on both textual data and various combinations of annotator-specific information (demographics, survey responses, rating history). We also examine how well these models handle predicted demographic data rather than using the ground truth demographic values for each annotator. This helps to assess the data efficiency and effect of demographics as an input to the rating prediction task.
|
| 91 |
+
|
| 92 |
+
For our ablations that studied the improvement on rating predictions, we compared our results to previous baselines that predicted ratings of annotators using the same dataset.
|
| 93 |
+
|
| 94 |
+
# Q1: Does incorporating annotator information via collaborative filtering, the embedding-based architecture, or in-context learning improve downstream rating predictions?
|
| 95 |
+
|
| 96 |
+
Our embedding-based architecture outperformed all other experiments with an MAE of 0.61; the best ICL approach (with Mistral) reached an MAE of 0.69. Both the ICL approach and embedding-based architecture outperform the most recent baseline for the dataset (Fleisig et al., 2023) and the embedding-based architecture matches the best previous MAE on this dataset (Gordon et al., 2022). The best-performing models use all available annotator-specific information as input (annotator demographics, survey information, and historical rating data). At its best, our ICL configuration with Mistral had an MAE of 0.69 (using annotator demographics, survey information, and historical rating data). The NCF approach had consistently poorer results, with a best MAE of 0.79 when including all annotator-specific information.
|
| 97 |
+
|
| 98 |
+
When creating the NCF architecture, we tested several variations. We first created a baseline from which we compared different outputs of our NCF module. Evaluating the finetuned RoBERTa model with all annotator-specific information as input along with the text to be rated yielded a baseline MAE of 0.81. We experimented with integrating embeddings through dot product vs. concatenation, freezing RoBERTa during the training process, and placing the collaborative filtering task in different parts of the RoBERTa architecture. Our best performing model froze the pretrained RoBERTa model, used concatenation, and placed the collaborative filtering piece in the classification head. However, it was only able to achieve an MAE of 0.80,
|
| 99 |
+
|
| 100 |
+
not significantly improving on our baseline.
|
| 101 |
+
|
| 102 |
+
Our embedding-based architecture consistently outperformed other approaches on every ablation, suggesting that a feature-extraction and regression hybrid approach most effectively uses annotatorspecific information in rating predictions.
|
| 103 |
+
|
| 104 |
+
# Q2: What annotator information best informs toxicity rating predictions? Do demographics provide useful information beyond what survey information can provide?
|
| 105 |
+
|
| 106 |
+
Incorporating demographic information improves performance over using only survey information, rating history, or both across ablations. However, we find that much of this gap can be compensated for by distilling demographic information out of survey information. Compared to the text-only baseline, incorporating predicted demographics with survey information and annotator history achieved MAE reductions of $10.26\%$ with Mistral, $8.64\%$ with GPT-3.5, $11.84\%$ with text-embedding-3-small, and $12\%$ with text-embedding-3-large. Replacing true demographic information with predicted demographic information results in nearly as strong performance for Mistral, GPT-3.5, and text-embedding-3-small.
|
| 107 |
+
|
| 108 |
+
Incorporating predicted demographics alongside survey information and annotator history notably improves accuracy. This occurs despite the fact that the accuracy of predicted demographics varies widely (highest for race and gender, but near-random for some demographics; see Table 4). Although the true demographics are somewhat helpful, annotator ratings can be effectively predicted without direct demographic data. This finding suggests that detailed demographic data may not be especially useful as a feature in individual rating prediction, beyond what can be inferred from individual preferences in survey responses.
|
| 109 |
+
|
| 110 |
+
Predicting Demographics. The performance of predicting demographics was evaluated across various configurations (Table 4). The baseline approach incorporating only survey information achieved the highest accuracies, with $47\%$ for race and $63\%$ for gender. Combining survey information with text slightly reduced the performance, potentially indicating the noise that the text to be rated added. The majority class approach is indicated as a baseline comparison to highlight the performance improvements for the different categories.
|
| 111 |
+
|
| 112 |
+
Our findings indicate that successively incorporating annotator demographics, rating history,
|
| 113 |
+
|
| 114 |
+
<table><tr><td>Model</td><td>Mistral</td><td>GPT 3.5</td><td>text-embedding-3-small</td><td>text-embedding-3-large</td></tr><tr><td>Text only</td><td>0.78</td><td>0.81</td><td>0.76</td><td>0.75</td></tr><tr><td>+ demo.</td><td>0.76</td><td>0.79</td><td>0.73</td><td>0.71</td></tr><tr><td>+ demo. + history</td><td>0.75</td><td>0.78</td><td>0.73</td><td>0.69</td></tr><tr><td>+ history</td><td>0.73</td><td>0.75</td><td>0.70</td><td>0.66</td></tr><tr><td>+ survey</td><td>0.73</td><td>0.75</td><td>0.70</td><td>0.70</td></tr><tr><td>+ demo. + survey</td><td>0.71</td><td>0.73</td><td>0.68</td><td>0.64</td></tr><tr><td>+ history + survey</td><td>0.70</td><td>0.73</td><td>0.67</td><td>0.69</td></tr><tr><td>+ predicted demo. + history + survey</td><td>0.70</td><td>0.74</td><td>0.67</td><td>0.66</td></tr><tr><td>+ demo. + history + survey</td><td>0.69</td><td>0.72</td><td>0.66</td><td>0.61</td></tr></table>
|
| 115 |
+
|
| 116 |
+
Table 1: Comparison of mean absolute error across different model configurations for the test set (with or without annotator demographics, rating history, and survey responses). Both ICL and embedding-based architectures improve on the baseline, with embedding-based architectures performing best.
|
| 117 |
+
|
| 118 |
+
and survey information improves performance for nearly all configurations tested (Table 1). Overall, the comprehensive model incorporating demographics, annotator history, and survey data consistently outperformed other configurations, demonstrating the value of integrating multiple data sources for demographic and rating predictions.
|
| 119 |
+
|
| 120 |
+
# 5 Conclusion
|
| 121 |
+
|
| 122 |
+
Leveraging the embedding-based architecture and ICL methods substantially improved toxicity rating predictions. NCF, by contrast, was not a competitive method for predicting ratings. Incorporating annotator information significantly enhances model performance. The best-performing embedding-based architecture achieved the lowest MAE of 0.61 by integrating demographics, annotator history, and survey data. This suggests that personalized predictions based on individual annotator preferences can lead to more accurate outcomes. Meanwhile, the ability to predict some demographics from survey information, and the fact that these imputed demographics nearly match performance with the true demographics, suggest that although demographics are helpful, individual annotator ratings can be predicted effectively without demographic data. This finding suggests that some differences in annotator opinions may be best captured by modeling individual preferences rather than demographic trends. In addition, the effectiveness of our embedding-based architecture suggests that it could help to inform future frameworks for annotator rating prediction.
|
| 123 |
+
|
| 124 |
+
# 6 Limitations
|
| 125 |
+
|
| 126 |
+
While our study advances the accuracy of annotator rating predictions, several limitations exist. The generalizability of our findings is limited to English text from the U.S. and Canada, which hinders applicability in other linguistic and cultural contexts.
|
| 127 |
+
|
| 128 |
+
The integration of detailed annotator information poses ethical and privacy risks and can amplify existing biases in the data. Additionally, the complexity and computational demands of our models challenge scalability and interpretability. Future research should address these issues to enhance the robustness and fairness of predictive models in subjective NLP tasks. It should also focus on expanding these methods to other domains and exploring the ethical implications of incorporating inferred data for predictions. By continuing to refine these approaches, we can develop more accurate and reliable models that better capture the complexities of human behavior and preferences.
|
| 129 |
+
|
| 130 |
+
# 7 Ethical Considerations
|
| 131 |
+
|
| 132 |
+
We found that individual ratings can be predicted well without demographic information. This is helpful in that it permits individualized rating prediction without collecting demographic information. Unfortunately, that does not mean the ratings are predicted independent of demographic information: in fact, we also found that survey information is a close enough proxy that demographics can be predicted with substantially better than random accuracy, especially for race and gender, off of survey information responses. Incorporating these predicted demographics further improves accuracy. However, our finding thus uncovered the potential privacy issue that collecting seemingly innocuous survey information data carries the risk of revealing annotator demographics. This suggests that future research in this area must proceed with caution: collecting or inferring demographic information improves prediction accuracy, but risks tokenism (where opinions within a demographic group are assumed to be homogeneous). Instead, future research could identify survey information questions that help to improve rating prediction but do not risk revealing annotator demographics.
|
| 133 |
+
|
| 134 |
+
# References
|
| 135 |
+
|
| 136 |
+
McKane Andrus, Elena Spitzer, Jeffrey Brown, and Alice Xiang. 2021. What we can't measure, we can't understand: Challenges to demographic data procurement in the pursuit of fairness. In Proceedings of the 2021 ACM conference on fairness, accountability, and transparency, pages 249-260.
|
| 137 |
+
Valerio Basile, Michael Fell, Tommaso Fornaciari, Dirk Hovy, Silviu Paun, Barbara Plank, Massimo Poesio, Alexandra Uma, et al. 2021. We need to consider disagreement in evaluation. In Proceedings of the 1st workshop on benchmarking: past, present and future, pages 15-21. Association for Computational Linguistics.
|
| 138 |
+
Mark Beliaev, Andy Shih, Stefano Ermon, Dorsa Sadigh, and Ramtin Pedarsani. 2022. Imitation learning by estimating expertise of demonstrators. In International Conference on Machine Learning, pages 1732-1748. PMLR.
|
| 139 |
+
Erdem Biyik, Fan Yao, Yinlam Chow, Alex Haig, Chih-wei Hsu, Mohammad Ghavamzadeh, and Craig Boutilier. 2023. Preference elicitation with soft attributes in interactive recommendation. arXiv preprint arXiv:2311.02085.
|
| 140 |
+
Dheeraj Bokde, Sheetal Girase, and Debajyoti Mukhopadhyay. 2015. Matrix factorization model in collaborative filtering algorithms: A survey. *Proceeding Computer Science*, 49:136-146.
|
| 141 |
+
Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners.
|
| 142 |
+
cjadams, Jeffrey Sorensen, Julia Elliott, Lucas Dixon, Mark McDonald, nithum, and Will Cukierski. 2017. Toxic comment classification challenge.
|
| 143 |
+
Aida Mostafazadeh Davani, Mark Diaz, and Vinodkumar Prabhakaran. 2022. Dealing with disagreements: Looking beyond the majority vote in subjective annotations. Transactions of the Association for Computational Linguistics, 10:92-110.
|
| 144 |
+
Naihao Deng, Xinliang Frederick Zhang, Siyang Liu, Winston Wu, Lu Wang, and Rada Mihalcea. 2023. You are what you annotate: Towards better models through annotator representations. arXiv preprint arXiv:2305.14663.
|
| 145 |
+
Eve Fleisig, Rediet Abebe, and Dan Klein. 2023. When the majority is wrong: Modeling annotator disagreement for subjective tasks. In Proceedings of the 2023
|
| 146 |
+
|
| 147 |
+
Conference on Empirical Methods in Natural Language Processing, pages 6715-6726.
|
| 148 |
+
Mitchell L. Gordon, Michelle S. Lam, Joon Sung Park, Kayur Patel, Jeff Hancock, Tatsunori Hashimoto, and Michael S. Bernstein. 2022. Jury learning: Integrating dissenting voices into machine learning models. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems, CHI '22, New York, NY, USA. Association for Computing Machinery.
|
| 149 |
+
Maya Gupta, Andrew Cotter, Mahdi Milani Fard, and Serena Wang. 2018. Proxy fairness.
|
| 150 |
+
Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web, pages 173-182.
|
| 151 |
+
Tiancheng Hu and Nigel Collier. 2024. Quantifying the persona effect in llm simulations. arXiv preprint arXiv:2402.10811.
|
| 152 |
+
Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. 2023. Mistral 7b.
|
| 153 |
+
Deepak Kumar, Patrick Gage, Sunny Consolvo, Joshua Mason, Elie Burszttein, Zakir Durmeric, Kurt Thomas, and Michael Bailey. 2021. Designing toxic content classification for a diversity of perspectives. In SOUPS. Usenix.
|
| 154 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692.
|
| 155 |
+
Matthias Orlikowski, Paul Röttger, Philipp Cimiano, and Dirk Hovy. 2023. The ecological fallacy in annotation: Modeling human label variation goes beyond sociodemographics. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 1017-1029, Toronto, Canada. Association for Computational Linguistics.
|
| 156 |
+
Ellie Pavlick and Tom Kwiatkowski. 2019. Inherent disagreements in human textual inferences. Transactions of the Association for Computational Linguistics, 7:677-694.
|
| 157 |
+
Han Zhao, Amanda Coston, Tameem Adel, and Geoffrey J Gordon. 2019. Conditional learning of fair representations. arXiv preprint arXiv:1910.07162.
|
| 158 |
+
|
| 159 |
+
# System Prompt
|
| 160 |
+
|
| 161 |
+
You are a model that predicts the toxicity rating of text from 0 to 4, where 0 is the least toxic and 4 is the most toxic.
|
| 162 |
+
|
| 163 |
+
# User Prompt
|
| 164 |
+
|
| 165 |
+
The annotator has annotated these texts: "This is a harmless comment" is rated as 0, "You're an idiot" is rated as 3, "I respectfully disagree" is rated as 1 [SEP] The reader uses social media, news sites, and video sites. The reader has seen toxic comments, has been personally targeted by toxic comments, thinks technology has a positive impact on people's lives, and thinks toxic comments are a serious problem. [SEP] The reader is a 25-34 year old Asian female who has a Bachelor's degree, is politically liberal, is not a parent, and thinks religion is not important. [SEP] Annotate this text: "Why don't you go jump off a cliff?"
|
| 166 |
+
|
| 167 |
+
Figure 3: Sample prompt for toxicity prediction model. The system prompt (in teal) defines the model's role. The user prompt (in olive) provides historical annotations, survey results, demographic information, and the text to be rated.
|
| 168 |
+
|
| 169 |
+
# A Appendix
|
| 170 |
+
|
| 171 |
+
# Approaches Taken
|
| 172 |
+
|
| 173 |
+
1. Tried to cluster the annotator embeddings (PCA) - they weren't linearly separable based on demographics
|
| 174 |
+
2. Where to incorporate recommender systems
|
| 175 |
+
|
| 176 |
+
(a) Classification head start - features
|
| 177 |
+
(b) Later layer
|
| 178 |
+
(c) before appending to 'features'
|
| 179 |
+
|
| 180 |
+
3. Tried to train plan RoBERTa on the entire dataset using the pretrainedMULTITASK-demographic dataset
|
| 181 |
+
4. Different dimensions of annotator embeddings
|
| 182 |
+
|
| 183 |
+
(a) Tried dim 8: little to no predictive power for annotator demographics
|
| 184 |
+
(b) Changed to 512
|
| 185 |
+
(c) Now using dim 768
|
| 186 |
+
|
| 187 |
+
# 5. Dual RoBERTa
|
| 188 |
+
|
| 189 |
+
(a) Instead of randomly instantiating an embedding layer, we tried using RoBERTa to represent annotators based on their IDs.
|
| 190 |
+
|
| 191 |
+
# Text Structure
|
| 192 |
+
|
| 193 |
+
For these predictions, the input is formatted as $h_1 \ldots h_n$ [SEP] $s_1 \ldots s_n$ [SEP] $d_1 \ldots d_n$ [SEP] $w_1 \ldots w_n$ , where $h_1 \ldots h_n$ represents the other texts reviewed and their ratings as provided by the annotator, $s_1 \ldots s_n$ is a template string describing the annotator's survey information data, $d_1 \ldots d_n$ is a template string containing the annotator's demographic information (e.g., "The reader is a 55-64 year old white female who has a bachelor's degree, is politically independent, is a parent, and thinks religion is very important. The reader is straight and cisgender"), $w_1 \ldots w_n$ is the text being rated, and [SEP] is a separator token. We use a template string instead of categorical variables in order to best take advantage of the model's language pretraining objective (e.g., underlying associations about the experiences of different demographic groups).
|
| 194 |
+
|
| 195 |
+
# Dataset Size
|
| 196 |
+
|
| 197 |
+
The dataset we used to evaluate the performance of our approaches - (Kumar et al., 2021) - has 3 splits: train, dev, and test. The training set has 488,100 samples, the dev set has 25,000 samples, and the test set also has 25,000 samples.
|
| 198 |
+
|
| 199 |
+
# Model Information
|
| 200 |
+
|
| 201 |
+
For the collaborative filtering approach, we used a RoBERTa model that has 355 million trainable parameters, and it took 2 GPU hours per epoch when fine-tuned on 2 NVIDIA Quadro RTX 8000 GPUs.
|
| 202 |
+
|
| 203 |
+
For the ICL approach, we used an API version of OpenAI's text-embedding-3-large model, which we don't have access to, so as to determine its size, and infrastructure requirements.
|
| 204 |
+
|
| 205 |
+
# Experimental Setup
|
| 206 |
+
|
| 207 |
+
We observed the best performance when having 4 dense layers after the embedding was outputted, which transformed the embedding from 3072 dimensions to 1024 dimensions, then keeps it at 1024 dimensions for another 2 layers after which the last layer is then shrunk to 5 dimensions.
|
| 208 |
+
|
| 209 |
+
Demographics Prediction Task Figure 1: Neural Collaborative Filtering
|
| 210 |
+
|
| 211 |
+
<table><tr><td>Model</td><td>Mistral</td><td>GPT 3.5</td><td>text-embedding-3-small</td><td>text-embedding-3-large</td></tr><tr><td>Text only</td><td>0.74</td><td>0.77</td><td>0.73</td><td>0.72</td></tr><tr><td>+ D</td><td>0.73</td><td>0.76</td><td>0.71</td><td>0.68</td></tr><tr><td>+ D + H</td><td>0.71</td><td>0.74</td><td>0.69</td><td>0.66</td></tr><tr><td>+ H</td><td>0.70</td><td>0.72</td><td>0.67</td><td>0.63</td></tr><tr><td>+S</td><td>0.69</td><td>0.71</td><td>0.66</td><td>0.67</td></tr><tr><td>+ D + S</td><td>0.67</td><td>0.69</td><td>0.64</td><td>0.61</td></tr><tr><td>+ H + S</td><td>-</td><td>-</td><td>-</td><td>0.65</td></tr><tr><td>+ PD + H + S</td><td>-</td><td>-</td><td>-</td><td>0.62</td></tr><tr><td>+ D + H + S</td><td>0.65</td><td>0.68</td><td>0.62</td><td>0.58</td></tr></table>
|
| 212 |
+
|
| 213 |
+
Table 2: Comparison of mean absolute error across different model configurations (dev set results). Ablations that included both annotator history and survey information were only performed on the best-performing model. D refers to Annotator Demographics, H refers to other texts an annotator has rated, S refers to survey responses, PD refers to predicted demographics.
|
| 214 |
+
|
| 215 |
+
<table><tr><td>Experiment Description</td><td>Individual MAE</td></tr><tr><td>Initial training with Collaborative Filtering approach and RoBERTa</td><td>1.12</td></tr><tr><td>Adjusted annotation embedding dimensions from 8 to 512</td><td>0.89</td></tr><tr><td>Freezing RoBERTa after pre-training on (Kumar et al., 2021)</td><td>0.80</td></tr></table>
|
| 216 |
+
|
| 217 |
+
Table 3: Significant Experiments and Their Impact on Mean Absolute Error (MAE)
|
| 218 |
+
|
| 219 |
+
<table><tr><td>Generated Data</td><td>Race</td><td>Gender</td><td>Importance of Religion</td><td>LGBT Status</td><td>Education</td><td>Political Stance</td></tr><tr><td>Survey Info</td><td>47%</td><td>63%</td><td>37%</td><td>38%</td><td>57%</td><td>48%</td></tr><tr><td>Survey Info + Text</td><td>43%</td><td>60%</td><td>33%</td><td>34%</td><td>52%</td><td>44%</td></tr><tr><td>Majority Class</td><td>9%</td><td>52%</td><td>31%</td><td>81%</td><td>52%</td><td>40%</td></tr></table>
|
| 220 |
+
|
| 221 |
+
Table 4: Comparison of demographic prediction accuracy across different data configurations.
|
accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93a0e572a22b87fd98d9371feca8435f39a4bec542c30ea3ae96c0c297a9f7b6
|
| 3 |
+
size 166326
|
accurateanddataefficienttoxicitypredictionwhenannotatorsdisagree/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cd0defcec9a090b8d656462c73804059edd1f1a766760be64f40e1b4e87487f8
|
| 3 |
+
size 217467
|
aceallmbasednegotiationcoachingsystem/009d36b4-85b3-47cc-8be1-d64ca1120806_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b9e9bcefeb96f39b7bba9955faa4336437e88feb1e13d2b7ef0d8980a6782574
|
| 3 |
+
size 166155
|
aceallmbasednegotiationcoachingsystem/009d36b4-85b3-47cc-8be1-d64ca1120806_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4feb6ed0f7d5df66a0980057bface023dc909b1b4d02567cc88d90135e6d8bd4
|
| 3 |
+
size 196509
|
aceallmbasednegotiationcoachingsystem/009d36b4-85b3-47cc-8be1-d64ca1120806_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:48b0091086209b90504400aec57e32aa48c7924db143378268edc0b2925d3a3c
|
| 3 |
+
size 3191814
|
aceallmbasednegotiationcoachingsystem/full.md
ADDED
|
@@ -0,0 +1,791 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ACE: A LLM-based Negotiation Coaching System
|
| 2 |
+
|
| 3 |
+
Ryan Shea*, Aymen Kallala*, Xin Lucy Liu*, Michael W. Morris†, Zhou Yu†
|
| 4 |
+
|
| 5 |
+
Columbia University, New York, NY
|
| 6 |
+
|
| 7 |
+
rs4235@columbia.edu
|
| 8 |
+
|
| 9 |
+
{ak5078,x12855,mwm82,zy2461}@columbia.edu
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
The growing prominence of LLMs has led to an increase in the development of AI tutoring systems. These systems are crucial in providing underrepresented populations with improved access to valuable education. One important area of education that is unavailable to many learners is strategic bargaining related to negotiation. To address this, we develop a LLM-based Assistant for Coaching nEgoitation (ACE). ACE not only serves as a negotiation partner for users but also provides them with targeted feedback for improvement. To build our system, we collect a dataset of negotiation transcripts between MBA students. These transcripts come from trained negotiators and emulate realistic bargaining scenarios. We use the dataset, along with expert consultations, to design an annotation scheme for detecting negotiation mistakes. ACE employs this scheme to identify mistakes and provide targeted feedback to users. To test the effectiveness of ACE-generated feedback, we conducted a user experiment with two consecutive trials of negotiation and found that it improves negotiation performances significantly compared to a system that doesn't provide feedback and one which uses an alternative method of providing feedback.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
The rapid progress of LLMs in recent years has spurred the creation of more sophisticated AI tutoring systems (Sonkar et al., 2023). These systems give learners easier access to training in areas outside what is typically available for most learners (Liang et al., 2023). One area that falls into this category are social competencies, such as negotiation tactics. Negotiation skills are crucial as they help individuals maximize their gains in competitive situations across different areas of their professional
|
| 18 |
+
|
| 19 |
+
and personal life. However, these skills are hard to learn from a traditional classroom lecture because they involve reflexive behavioral habits. They are typically taught through small seminars centered on role-playing exercises and instructor coaching, which are expensive and limited in access.
|
| 20 |
+
|
| 21 |
+
Therefore, effective negotiation training is not available to many populations who lack it yet need it the most, such as women and minorities. These groups particularly lack negotiation skills and are less accustomed to advocating for themselves (Babcock and Laschever, 2003) which is a significant factor contributing to their relatively lower starting salaries and fewer promotion opportunities (Lu and Zhao, 2023). As such, increased coaching is needed to address gender and ethnic differences in negotiation performance (Amanatullah and Morris, 2010). Recent research also finds that linguistic assertiveness skills, such as those used in negotiations, vary as a function of cultural/ethnic backgrounds, partly account for ethnic differences in promotions to leadership positions, and are amenable to training interventions (Lu et al., 2020, 2022).
|
| 22 |
+
|
| 23 |
+
To democratize access to high-quality negotiation coaching, we proposed a LLM-based Assistant for Coaching nEgotiation (ACE). ACE is designed to serve as a negotiation partner that provides learners with targeted feedback similar to what they would receive from an instructor in a seminar setting. To provide this feedback, we begin by collecting a dataset of negotiation transcripts between students in an Master's of business administration (MBA) negotiation class. The scenarios in these negotiations come from a rigorous curriculum and are designed to mirror real-world negotiation settings. Furthermore, the participants in these negotiations have been trained in bargaining tactics, unlike previous datasets which rely on crowd-workers (Lewis et al., 2017; He et al., 2018).
|
| 24 |
+
|
| 25 |
+
Using this dataset along with expert consulta
|
| 26 |
+
|
| 27 |
+
tions, we develop an annotation scheme to identify and correct mistakes that users make during their negotiations. Our annotations mirror actual error categories that instructors look for in negotiation classes. We then build ACE according to this scheme. ACE uses the annotation categories to identify users' mistakes and then provides targeted feedback based on the error definitions, along with in-context examples of feedback written by experts. Our system also includes a prompt-based negotiation chatbot agent which serves as a simulated negotiation partner for learners.
|
| 28 |
+
|
| 29 |
+
To test the efficacy of ACE, particularly the feedback it provides, we recruited a group of 374 users who participated in two trials of negotiation. We found that ACE significantly improves learning outcomes compared to users who negotiate with no coaching from our system. Our contributions are summarized as follows:
|
| 30 |
+
|
| 31 |
+
- We propose a novel negotiation coaching system called ACE, which utilizes LLMs to provide targeted, individualized feedback to users.
|
| 32 |
+
- We develop a negotiation strategy annotation scheme to identify and correct user errors. Our annotation scheme is based on expert input and mirrors the kind of mistakes negotiation instructors look for in a seminar setting.
|
| 33 |
+
- We release an annotated dataset of spoken negotiation transcripts between MBA students. The negotiation scenarios in our dataset are based on standardized business school curricula and are carefully designed to mimic real world settings.
|
| 34 |
+
- We conduct an efficacy experiment with two trials of negotiations, finding that ACE boosted subjective and objective measures of tactical learning relative to a control system which does not provide feedback and an alternative method of providing negotiation feedback.
|
| 35 |
+
|
| 36 |
+
# 2 Related Work
|
| 37 |
+
|
| 38 |
+
# 2.1 AI Tutoring Systems
|
| 39 |
+
|
| 40 |
+
Generating automated training systems has been a long-standing issue for researchers in AI for education (Keuning et al., 2018). One common application of such systems is the area of computing education (Koutcheme et al., 2024). Recent work
|
| 41 |
+
|
| 42 |
+
has explored the use of LLMs to generate automatic feedback for students on programming assignments (Pankiewicz and Baker, 2023) or for creating coding exercises (Denny et al., 2024). These works have shown that state-of-the-art LLMs such as GPT-4 (OpenAI, 2023) can provide effective feedback to users and can achieve up to $80\%$ agreement with humans in certain scenarios (Zheng et al., 2023).
|
| 43 |
+
|
| 44 |
+
Another application of these systems has been the field of language learning. These systems typically involve a chat agent which guides users through a conversation on a specific topic followed by feedback on the users' conversation (Qian et al., 2023; Li et al., 2022). These systems have been judged as helpful by English languages learners and have also had a demonstrable effect on actual learning outcomes (Liang et al., 2023).
|
| 45 |
+
|
| 46 |
+
Prior work on developing systems for negotiation training has been fairly limited. These systems can only be interacted with by selecting from a list of pre-written options and deliver "canned" responses as a reply. Despite these limitations, prior work has shown that interacting with virtual agents can improve learners' understanding of negotiations (Gratch et al., 2016).
|
| 47 |
+
|
| 48 |
+
# 2.2 Negotiation
|
| 49 |
+
|
| 50 |
+
Methods that have applied LLMs to the area of negotiation have been focused on building negotiation chat agents. These methods typically use existing negotiation data to perform supervised learning or offline reinforcement learning on a negotiation model (Lewis et al., 2017; He et al., 2018; Verma et al., 2022; Zhan et al., 2024). More recent work has focused on examining and enhancing the negotiation capabilities of prompt-based negotiation agents (Schneider et al., 2023; Fu et al., 2023; Zhan et al., 2024).
|
| 51 |
+
|
| 52 |
+
Previous work on bargaining has focused on building chatbots to serve as negotiation partners. There is little work on providing user feedback. However, negotiation research has found that people do not learn negotiation tactics simply from the experience of bargaining; they need structured feedback and instruction (Loewenstein and Thompson, 2006). ACE is the first system to provide quality feedback using rigorous business school curricula to improve learning outcomes of negotiation.
|
| 53 |
+
|
| 54 |
+
# 3 Background
|
| 55 |
+
|
| 56 |
+
Negotiation is a general task and there are many ways to formulate a bargaining problem. In this section we give an overview of the types of negotiation problems we consider when designing our coaching system and annotation scheme.
|
| 57 |
+
|
| 58 |
+
The negotiation settings we consider here are ones which involve one agent selling a single item to another agent. This type of negotiation is known as single-issue distributive bargaining (Lewicki et al., 2021). The agent selling the item is referred to as the "seller" and the agent buying the item is called the "buyer". Both the buyer and seller have access to a "role" (also referred to as a "negotiation scenario") which provides private information about their options and preferences and public information about details of the object for sale and the range of market prices for this item. In a role-play simulation, participants use their role to prepare a strategy, this includes their upper limit or "walk-away" price, their target price, and the opening price that they will mention. An example of buyer and seller roles for a used car negotiation can be found in Appendix D.
|
| 59 |
+
|
| 60 |
+
# 4 Dataset
|
| 61 |
+
|
| 62 |
+
Our dataset was collected in collaboration with an instructor who teaches a course on negotiation to MBA students. During this course, 50 students were randomly assigned to dyads for a negotiation task. They conducted three successive negotiations against three different counterparts, involving different kinds of cars. Before negotiating, students answered standard preparation questions and then began the negotiation while recording their conversation audio. We transcribed the conversation audio using OpenAI's Whisper API (Radford et al., 2022) along with manual edits to construct our dataset.
|
| 63 |
+
|
| 64 |
+
A total of 42 dialogues were collected from this process, of which 40 were usable. A summary of the dataset statistics is in Table 1. We show the statistics for the entire dataset along with a breakdown by the negotiation task/scenario. The scenarios are based on the type of car being negotiated over in the exercise. We focused on negotiation over a Honda Accord (Task 1) as the transparent market range for such a product makes it typical of negotiation over a commodity item, see Appendix D.
|
| 65 |
+
|
| 66 |
+
<table><tr><td></td><td>Task 1</td><td>Task 2</td><td>Task 3</td><td>Total</td></tr><tr><td># of conversations</td><td>14</td><td>13</td><td>13</td><td>40</td></tr><tr><td>Avg. # of turns per conversation</td><td>23.8</td><td>22.3</td><td>15.6</td><td>20.6</td></tr><tr><td>Avg. # of tokens per turn</td><td>31.4</td><td>35</td><td>37</td><td>34.1</td></tr><tr><td>Vocabulary size</td><td>950</td><td>1022</td><td>820</td><td>1723</td></tr><tr><td>Deal %</td><td>93%</td><td>31%</td><td>100%</td><td>75%</td></tr><tr><td>Deal Amount</td><td>$12.9k</td><td>$7.3k</td><td>$1.3k</td><td>$7.2k</td></tr></table>
|
| 67 |
+
|
| 68 |
+
Table 1: A summary of statistics from our negotiation dataset.
|
| 69 |
+
|
| 70 |
+
# 5 Annotation Scheme
|
| 71 |
+
|
| 72 |
+
We designed a negotiation error annotation scheme to identify and correct user mistakes. Our scheme is based on the dataset collected in Section 4, expert input, and common distributive bargaining tactics from Lewicki et al., 2021.
|
| 73 |
+
|
| 74 |
+
We identified eight error categories that can be divided into preparation errors and negotiation errors. All the categories in our scheme are binary True/False labels, where a label of False indicates that a mistake has been made by the negotiator. Our annotation scheme is designed from the perspective of the buyer but can be easily adjusted to fit the seller's perspective.
|
| 75 |
+
|
| 76 |
+
The two categories below belong to preparation errors, which are mistakes in users' answers to a set of standard preparation questions before the negotiation. These categories are designed to identify whether the user prepared their negotiation strategy correctly.
|
| 77 |
+
|
| 78 |
+
1. Strategic walk-away point assesses whether the user has properly analyzed the facts of the negotiation scenario to set an appropriate walk-away price. From the buyer's perspective, a walk-away price is the maximum amount they would pay to purchase the item in the negotiation. If the scenario outlines an explicit budget limit, then a strategic walk-away point is one which exactly matches the budget amount. Otherwise, we consider any point below the maximum market price to be strategic.
|
| 79 |
+
|
| 80 |
+
2. Strategic target price evaluates the target price that the buyer sets before the negotiation. The buyer's target price is strategic if it falls within the first third of the range between the minimum market price and the buyer's walk-away point. If the buyer sets their target below this range, it is too ambitious to be a realistic outcome to aim for. Conversely, if their target is above this range, it is too weak to test how far their opponent can be pushed in the negotiation.
|
| 81 |
+
|
| 82 |
+
The six categories below refer to negotiation errors, which are mistakes that users make "at the
|
| 83 |
+
|
| 84 |
+
<table><tr><td>Role</td><td>Utterance</td><td>Annotation</td></tr><tr><td>Buyer</td><td>Hi, I'm new to California and I'm looking for probably a Honda Accord with reasonable mileage around maybe $11,000 to $12,000. Do you have anything like that?</td><td>Breaking the ice
|
| 85 |
+
Giving the first offer
|
| 86 |
+
Ambitious opening point
|
| 87 |
+
Including rationale</td></tr><tr><td>Seller</td><td>Nice. We have something similar. We have a nice 2013 Honda. It does have a little bit more miles than that.
|
| 88 |
+
It has about 50,000. It doesn't have any rust and it's in great condition. What's the price range you're looking to come out with?</td><td></td></tr><tr><td>Buyer</td><td>Probably around $11,000 or $12,000.</td><td>Strong counteroffer
|
| 89 |
+
Including rationale</td></tr><tr><td>Seller</td><td>Ooh, that's kind of rough. Our sticker price for this car is closer to $14,000.</td><td></td></tr><tr><td>Buyer</td><td>Ooh, yeah, that's definitely a little bit too much. Could I take it for a test drive maybe?</td><td></td></tr><tr><td>Seller</td><td>Sure.</td><td></td></tr><tr><td>Buyer</td><td>Okay, great. Yeah, it's pretty good. What do you think about maybe $12,500 and I would buy it today?</td><td>Strong counteroffer
|
| 90 |
+
Including rationale</td></tr><tr><td>Seller</td><td>$12,500. I mean, could we call it even $13,000?</td><td></td></tr><tr><td>Buyer</td><td>Yeah, I could probably do $13,000.</td><td>Strong counteroffer
|
| 91 |
+
Including rationale</td></tr><tr><td>Seller</td><td>All right.</td><td></td></tr><tr><td>Buyer</td><td>All right.</td><td></td></tr><tr><td>Seller</td><td>Sounds great.</td><td>Strategic closing</td></tr></table>
|
| 92 |
+
|
| 93 |
+
bargaining table," during the negotiation dialogue.
|
| 94 |
+
|
| 95 |
+
3. Breaking the ice refers to whether or not the user began the negotiation with some social bonding. A negotiator should spend their first conversational turn on social remarks unrelated to the negotiation issues.
|
| 96 |
+
4. Giving the first offer indicates whether the user stated the first price offer in the conversation. Negotiators are advised to state their opening price first to anchor the negotiation in a favorable position (Lewicki et al., 2021).
|
| 97 |
+
5. Ambitious opening point assesses the tactical quality of the user's opening offer relative to their target price. When the buyer proposes a price first, we consider the offer $O_{1}$ strong if:
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
O _ {1} \leq 0. 9 * T
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
with $T$ being the buyer's target price. Otherwise, when the seller previously made an offer $S$ , we consider the buyer's first offer strong if it creates a midpoint at or below their target price:
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\frac {S + O _ {1}}{2} \leq T
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
6. Strong counteroffer assesses the quality of the user's first three proposals following their first offer. A counter-offer $O_{t}$ is considered strong if it's below the midpoint of the remaining bargaining range:
|
| 110 |
+
|
| 111 |
+
$$
|
| 112 |
+
O _ {t} < \frac {O _ {t - 1} + m i n (S , W)}{2}
|
| 113 |
+
$$
|
| 114 |
+
|
| 115 |
+
where $O_{t-1}$ is the buyer's previous offer, $S$ is the seller's current offer, and $W$ is the buyer's walk-away point.
|
| 116 |
+
|
| 117 |
+
Table 2: An example negotiation dialogue from our dataset. Annotations in red indicate that the category was labeled as False. Annotations in green indicate that the category was labeled as True.
|
| 118 |
+
|
| 119 |
+
<table><tr><td>Negotiation Category</td><td>Error</td><td>Number of Turns with Errors</td><td>Number of Applicable Turns</td></tr><tr><td>Breaking the ice</td><td></td><td>28</td><td>40</td></tr><tr><td>Giving the first offer</td><td></td><td>15</td><td>40</td></tr><tr><td>Ambitious opening point</td><td></td><td>18</td><td>40</td></tr><tr><td>Strong counteroffer</td><td></td><td>34</td><td>73</td></tr><tr><td>Including rationale</td><td></td><td>25</td><td>112</td></tr><tr><td>Strategic closing</td><td></td><td>36</td><td>40</td></tr><tr><td>Preparation Category</td><td>Error</td><td>Number of Errors</td><td>Number of Dialogues</td></tr><tr><td>Strategic walk-away</td><td></td><td>7</td><td>40</td></tr><tr><td>Strategic target price</td><td></td><td>15</td><td>40</td></tr></table>
|
| 120 |
+
|
| 121 |
+
Table 3: A summary of our annotated negotiation dataset. Note that mistakes related to negotiation errors tend to be more common than preparation errors
|
| 122 |
+
|
| 123 |
+
7. Including rationale indicates whether the user's first four price offers were accompanied by a rationale. We define a rationale as any reasoning that supports a price offer (Lewicki et al., 2021).
|
| 124 |
+
|
| 125 |
+
8. Strategic closing behavior refers to whether the user closed the deal in ways that heighten the counterpart's commitment. The final two turns of the negotiation should contain either an acknowledgment of the opponent's negotiation skill or a recounting of their own concessions. The closing turns should not contain any celebratory statements about the negotiation outcome or any statements implying that the user got a better deal.
|
| 126 |
+
|
| 127 |
+
We annotated our collected dataset (Section 4) based on this annotation scheme with an inter-annotator agreement of 0.87, according to Cohen's kappa (Cohen, 1960). Inter-annotator agreement was calculated based on a subset of 288 dialogue
|
| 128 |
+
|
| 129 |
+
turns annotated by two authors. A breakdown of the errors that buyers made according to the annotation scheme can be seen in Table 3. An example of an annotated conversation is given in Table 2. The most prevalent errors that buyers made were those in the strategic closing category, with 36 out of the 40 conversations containing an error related to this. On the other hand, we see that buyers did a good job including a rationale in their price offers, with only 25 of these errors present out of a total of 112 relevant conversation turns.
|
| 130 |
+
|
| 131 |
+
# 6 Approach
|
| 132 |
+
|
| 133 |
+
In this section, we outline our approach to designing ACE. We first provide a high-level overview of the system and how users progress through it. We then describe the components of the system, including the negotiation chatbot agent and feedback modules.
|
| 134 |
+
|
| 135 |
+
# 6.1 System Overview
|
| 136 |
+
|
| 137 |
+
ACE begins by presenting users with a negotiation scenario similar to those in our dataset (Section 4). This is followed by a set of negotiation preparation questions which ask users for their target price, walk-away price, and planned opening point. Users then proceed into a simulated negotiation with our negotiation agent until they reach an agreement. After that we provide users with the feedback associated with their preparation questions, referred to as "preparation feedback." This is followed by "negotiation feedback" on the negotiators' linguistic performance, including both turn-specific points about their tactics and holistic points about their diction, tone, and politeness.
|
| 138 |
+
|
| 139 |
+
# 6.2 Negotiation Agent
|
| 140 |
+
|
| 141 |
+
Our negotiation agent is based on a prompted version of GPT-4 (OpenAI, 2023). Prior work has found that LLMs can achieve successful negotiation outcomes with proper prompting (Fu et al., 2023). Therefore, we adopted this approach for our agent. We found that prompting GPT-4 with a full negotiation scenario (Appendix D) resulted in nonstrategic oversharing; therefore, we use a summarized version as our instructional prompt (Table 20).
|
| 142 |
+
|
| 143 |
+
Even with the summarized prompt, the agent is prone to making "weak" counteroffers, meaning that is often pushed to offering its walk-away price too easily. To avoid a "pushover" negotiation
|
| 144 |
+
|
| 145 |
+
agent, we employ dynamic prompting. Specifically, we give the agent an initial "reservation price" in the instructional prompt, which is higher than the actual reservation price given in the negotiation scenario. We refer to this price point as the agent's "subjective limit." We adjust the subjective limit as the conversation progresses and eventually set it to the true reservation price after several conversation turns have passed. This allows us to control the bot's counteroffers to a certain degree while allowing for variation in the offers presented. We initially set the bot's subjective limit to a random value within the range given by strategic target price (Section 5). After the first turn, we update it to a price that corresponds to a strong counteroffer. This ensures that the bot gives robust counteroffers and will not reach its true reservation price too quickly.
|
| 146 |
+
|
| 147 |
+
# 6.3 Preparation Feedback
|
| 148 |
+
|
| 149 |
+
The preparation feedback we provide users is based on their answers to pre-negotiation preparation questions. We specifically look for errors corresponding to strategic walk-away point, strategic target price, and giving the first offer in our annotation scheme (Section 5). Since each of these errors is identified with a mathematical formula, flagging them is trivial. We give feedback to users using either a hard-coded message with the correct answer and an explanation, or we prompt GPT-4o to generate feedback given the user's answer, the correct answer, and in-context examples of quality feedback written by expert negotiators (Appendix F). A full example of preparation feedback can be found in Appendix E.
|
| 150 |
+
|
| 151 |
+
# 6.4 Negotiation Feedback
|
| 152 |
+
|
| 153 |
+
Negotiation feedback is provided based on users' conversations with the automated bargaining agent. We give users two categories of negotiation feedback. The first is feedback associated with individual conversation turns, which we call "turn-based feedback". The second is "holistic feedback" which corresponds to the conversation as a whole such as the conversational tone or politeness level.
|
| 154 |
+
|
| 155 |
+
# 6.4.1 Turn-Based Feedback
|
| 156 |
+
|
| 157 |
+
Figure 1 gives an overview of the pipeline ACE uses to provide turn-based feedback. Our pipeline consists of three components: 1) error identification, 2) direct feedback and 3) utterance revision. We describe each of these components in detail
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
Figure 1: Diagram illustrating the turn-based feedback flow for ACE as well as an example of holistic feedback.
|
| 161 |
+
|
| 162 |
+
below.
|
| 163 |
+
|
| 164 |
+
Error identification. To provide effective feedback, we start by detecting errors the user made at each conversational turn of their transcript. The errors we flag for turn-based feedback are based on the negotiation errors in our annotation scheme (Section 5). We divide these errors into two groups and use a different strategy for detecting each of them.
|
| 165 |
+
|
| 166 |
+
The first group of errors is based on price offers and involves applying a formula. These include categories such as strong counteroffer or ambitious opening point. The main challenge in identifying these mistakes is extracting the relevant price from the user utterance. We do this by prompting GPT-4 to extract the price. Our prompt consists of the user utterance followed by nine hand-written, in-context examples of successful price extractions (Table 14). After extracting the price, we apply the relevant formula to detect errors.
|
| 167 |
+
|
| 168 |
+
The second group of errors is based on the users' language rather than prices, such as including rationale or breaking the ice. For each turn, we use
|
| 169 |
+
|
| 170 |
+
a set of classifiers, one for each of the three relevant categories, to determine whether the turn contains an error. We create our classifiers by prompting GPT-4o to output a True/False label for the turn. Our prompt consists of the user utterance along with the error definition. For more difficult categories, such as including rationale, the prompt also contains in-context examples from our collected dataset (Table 15).
|
| 171 |
+
|
| 172 |
+
Direct feedback. When we identify a turn with a mistake, we prompt our GPT-4o to give direct comments on each error committed. This includes an explanation of the error and the tactical value of correcting it, see Figure 1 for examples. Our prompt consists of the conversation context, a definition of the error committed (Section 5 and Table 16), and one or two in-context examples of feedback written by negotiation instructors. We generate feedback for each mistake committed, which can result in the comments being quite lengthy. Therefore, in cases where the user commits more than one mistake in a turn, we give a summary of the error explanations as the final direct feedback.
|
| 173 |
+
|
| 174 |
+
Utterance revision. Along with the direct feedback, we also present users with a revised version of their utterance with their mistakes corrected. We prompt GPT-4o with the user utterance and the direct feedback to generate the corrected utterance. Our prompt includes the user utterance, the direct feedback, and three in-context examples of handwritten utterance revisions (Table 17).
|
| 175 |
+
|
| 176 |
+
The final turn-based feedback given to the user is the direct feedback concatenated with the revised user utterance. See Figure 1 and Appendix E for complete examples.
|
| 177 |
+
|
| 178 |
+
# 6.4.2 Holistic Feedback
|
| 179 |
+
|
| 180 |
+
The holistic feedback presented to the user is not based on any specific errors identified within the transcript. Instead, we prompt GPT-4o to comment on the linguistic aspects of the user's conversation. The aspects we focus on are formality, firmness, and linguistic level. For formality, the user should stay polite and avoid being rude or pushy. It's also better for users to be firm and assertive in their language, as studies have shown that this communication style leads to better deals (Jeong et al., 2019). In terms of linguistic level, users should avoid apologizing or using language that could be interpreted as a personal attack (Fisher et al., 2011). We prompt the model with a summary of the attributes for these three aspects and have it generate feedback. We have the model quote specific phrases from the users' transcript to make the comments more targeted and personalized (Table 19). See Figure 1 for a full example.
|
| 181 |
+
|
| 182 |
+
# 7 Evaluation
|
| 183 |
+
|
| 184 |
+
# 7.1 Error Identification Evaluation
|
| 185 |
+
|
| 186 |
+
We use a subset of the annotated data from Section 5 to evaluate how well ACE can identify mistakes. This subset consists of 26 dialogues (with a total of 494 conversational turns) which were excluded during the creation and testing of our coaching system. Since the preparation errors in our annotation scheme are trivial to identify, we measure how well ACE can classify negotiation errors.
|
| 187 |
+
|
| 188 |
+
Table 4 shows a breakdown of the system's accuracy. Using human annotations as our ground truth labels, we measure how well ACE predicts these labels. The system can identify mistakes with a high accuracy of at least 0.90 for all error categories. However, it performs worse in terms of precision and recall, with the F1 score ranging from a low
|
| 189 |
+
|
| 190 |
+
<table><tr><td>Error Category</td><td>Accuracy</td><td>Precision</td><td>Recall</td><td>F1 Score</td></tr><tr><td>Breaking the ice</td><td>0.98</td><td>0.99</td><td>0.76</td><td>0.83</td></tr><tr><td>First offer</td><td>0.99</td><td>0.95</td><td>0.91</td><td>0.93</td></tr><tr><td>Strong first offer</td><td>0.98</td><td>0.91</td><td>0.83</td><td>0.85</td></tr><tr><td>Strong counteroffer</td><td>0.96</td><td>0.74</td><td>0.73</td><td>0.73</td></tr><tr><td>Including rationale</td><td>0.90</td><td>0.81</td><td>0.63</td><td>0.67</td></tr><tr><td>Strategic closing</td><td>0.94</td><td>0.72</td><td>0.53</td><td>0.54</td></tr></table>
|
| 191 |
+
|
| 192 |
+
Table 4: A table indicating how accurately ACE can identify user mistakes. Our system is able to detect errors with high accuracy, but performs worse in terms of precision and recall. This is reflected in the lower F1 scores for including rationale and strategic closing.
|
| 193 |
+
|
| 194 |
+
of 0.54 for strategic closing to a high of 0.97 for the giving the first offer category. This suggests that ACE has difficulty balancing precision and recall for some annotation categories. Low recall in particular seems to be driving the lower scores, which implies that ACE has trouble identifying true positive cases for some difficult categories.
|
| 195 |
+
|
| 196 |
+
# 7.2 ACE-generated Feedback Evaluation
|
| 197 |
+
|
| 198 |
+
We next evaluate the feedback generated by ACE via a user experiment. Our experiment consists of two pilot studies and a main experiment. In our first pilot study (Pilot Study A) we recruited 100 native English-speaking U.S. participants from Prolific and asked them to perform two trials of used car negotiation. All participants in all trials were assigned the role of the buyer based on the used car scenario in Figure 4. Participants were randomly assigned to treatment or control conditions, and ACE feedback was only provided in the first trial of the treatment condition. Users in the control condition were given no feedback. The results of the pilot study showed that the improvements in the the treatment condition were significantly larger than in the control condition. Additional details for Pilot Study A can be found in Appendix B.
|
| 199 |
+
|
| 200 |
+
To exclude the possibility that the improvements were due to the learning of situation-specific inert knowledge limited to the used car scenario, we developed a new negotiation scenario. This new scenario involves negotiating over a price of summer sublease and can be seen in Figures 6 and 7. We tested its difficulty in Pilot Study B to ensure it has no difference with the used car scenario. We recruited 46 Prolific participants to negotiate against the ACE chat agent in the role of the buyer. T-tests revealed no significant differences between the standardized deal prices of the two scenarios (p = 1). Their distributions also showed no significant differences (Mann-Whitney U test: p = 0.9; Levene
|
| 201 |
+
|
| 202 |
+
<table><tr><td></td><td>ACE Condition (N=119)</td><td>Other-feedback (N=129)</td><td>No-feedback Baseline (N=126)</td><td>Between-person Comparison</td></tr><tr><td>\( 1^{st} \) Trial: Used Car</td><td>$12,891 (623.25)</td><td>$12,889 (779.13)</td><td>$12,948 (789.95)</td><td>F(2,371)=0.27,p=0.77</td></tr><tr><td>\( 2^{nd} \) Trial: Summer Sublease</td><td>$7,528 (658.72)</td><td>$7,751 (459.72)</td><td>$7,827 (431.65)</td><td>F(2,371)=10.79,p<0.001</td></tr><tr><td>Within-person Comparison</td><td>t=-2.97, p=0.003</td><td>t=0.30, p=0.76</td><td>t=1.03, p=0.30</td><td>F(2,371)=8.80,p<0.001</td></tr></table>
|
| 203 |
+
|
| 204 |
+
Table 5: A summary of outcomes from the human evaluation of ACE. Our results show that buyers who received feedback from ACE performed significantly better in a following negotiation than those who didn't. Note: 1. Among 371 participants, $52\%$ were female; $\bar{x}_{age} = 34.48$ years, $SD_{age} = 8.97$ ; $60.1\%$ were White/Caucasian; $54.4\%$ had a bachelor's degree and above. The median completion time for this study was 39.68 minutes. 2. Standard deviations are presented in parentheses. 3. $d$ in reported results stands for Cohen's d, a measure of effect size, calculated as the difference between two means divided by the pooled standard deviation of the data.
|
| 205 |
+
|
| 206 |
+
test: $p = 0.6$ ). Therefore, we can confidently state that the difficulty of the two scenarios is equivalent so that any differences that emerge across trials can not come from the scenario itself.
|
| 207 |
+
|
| 208 |
+
After the two pilot studies were completed we performed our main experiment. The main experiment was designed based on Pilot Study A but with several enhancements. The experimental details and results of this study are given in the following sections.
|
| 209 |
+
|
| 210 |
+
# 7.2.1 Experimental Setup
|
| 211 |
+
|
| 212 |
+
For our main experiment, we recruited 390 U.S.-based native English speakers from Prolific. This sample size was chosen based power analysis for a medium-size effect ( $f = 0.2$ ) which determined that $N > 390$ is required for robust results of ANOVA tests across three conditions. Each participant was paid $8 for completing this 40 minute online study. We obtained a total of 374 valid responses.
|
| 213 |
+
|
| 214 |
+
After consenting to this anonymous study, participants engaged in two trials of negotiation with a bot that simulated standard distributive bargaining tactics. All participants were assigned the role of the buyer. The first trial of negotiation was based on the used car scenario in Figure 4 and the second trial was based the summer sublease scenario in Figure 6. After each negotiation, they answered a round of questions. Participants were randomly assigned to one of three conditions. In the ACE condition, participants received ACE-generated feedback. In the "Other-feedback" condition, participants were given feedback based on the method described in Fu et al., 2023. This approach leverages the zero-shot capabilities of GPT-4 to give feedback and has been shown to improve the negotiation performance of LLMs (Fu et al., 2023). Lastly, participants in the "No-feedback" condition received no feedback on their performance.
|
| 215 |
+
|
| 216 |
+
The procedure is illustrated in Figure 2. We preregistered the design for our user experiment at aspredicted.org/NPR_36R.
|
| 217 |
+
|
| 218 |
+
Before starting, participants in all conditions answered four questions about their goals in the negotiation (e.g., how important it would be for them to reach a favorable deal, to be a tough bargainer). As shown in Table 6, participants in the conditions showed no differences in any of these goals, suggesting that the differences in learning outcomes between conditions should be attributed to the effectiveness of ACE rather than their motivation level.
|
| 219 |
+
|
| 220 |
+
For the first trial, participants in the ACE condition $(N = 119)$ proceeded through the ACE system as described in Section 6.1. Participants in the Other-feedback condition $(N = 129)$ proceeded through a similar system but were given feedback according to the method outlined in Fu et al., 2023. Users in the No-feedback condition $(N = 126)$ followed a similar procedure but were not given any feedback at the end of the negotiation. In every condition, we asked participants a series of reflection questions to ensure they digested any feedback they were given and prepared their strategy for the next round of negotiation (See Table 9).
|
| 221 |
+
|
| 222 |
+
For the second trial, participants in both conditions practiced with a negotiation agent with no feedback provided, as participants in the Nofeedback condition did in their first trial. Then we asked all participants their subjective perceptions of improvement in the second negotiation compared to the first. A sample item was "Compared to the first round of negotiation, in the second negotiation, I felt more confident." See details in Appendix A.
|
| 223 |
+
|
| 224 |
+
# 7.2.2 Results
|
| 225 |
+
|
| 226 |
+
Objective Improvement. We extracted the final deal price participants settled on from their chat history with the negotiation agent. Lower
|
| 227 |
+
|
| 228 |
+
prices indicate more successful negotiations for the participants as buyers, so reaching a lower price in the second trial provides objective evidence of learning. As may be seen in Table 5, the performance improvement was significant in the ACE condition $(t = 2.97, 95\% \mathrm{CI} = [0.14, 0.70]$ , $p = 0.003$ , $d = 0.38)$ , while no significant change in deal price was observed in Other-feedback condition or the No-feedback condition $(t = -0.23$ , $p = 0.82$ , $t = -1.03$ , $p = 0.30)$ . A significant 2 $\times$ 3 two-way mixed ANOVA $(F(2, 368) = 8.67$ , $p < 0.001)$ indicated that the magnitude of improvement differed across conditions.
|
| 229 |
+
|
| 230 |
+
Further, $2 \times 2$ ANOVAs suggested that the improvement in the ACE condition was significantly higher than in the No-feedback condition ( $F(1,246) = 12.82$ , $p < 0.001$ ) or the Other-feedback condition ( $F(1,240) = 10.04$ , $p = 0.002$ ), confirming the effectiveness of the ACE condition compared to the other conditions. Additionally, the lack of a significant difference between the Other-feedback and No-feedback condition ( $F(1,250) = 0.57$ , $p = 0.451$ ) indicates that Other-feedback did not improve negotiation performance, underscoring the need for detailed, targeted feedback like provided by the ACE system. Additionally, this suggests that the kind of feedback that can help LLMs improve their task performance may not aid human performance. The feedback method in Fu et al., 2023 has been show to improve the abilities of LLM negotiators (Appendix C) but our user experiment demonstrates that this type of coaching may not be useful for humans.
|
| 231 |
+
|
| 232 |
+
Subjective Improvement. Participants in the ACE condition also reported a higher score for perceived improvement in the second negotiation $(\bar{x} = 4.34, SD = 0.62)$ compared to those in the Other-feedback $(\bar{x} = 4.19, SD = 0.70)$ and No Feedback $(\bar{x} = 4.17, SD = 0.66)$ conditions. A significant $2 \times 3$ two-way mixed ANOVA $(F(2,371) = 2.59, f = 0.12, p = 0.08)$ indicated that the magnitude of improvement differed across conditions.
|
| 233 |
+
|
| 234 |
+
# 8 Conclusion
|
| 235 |
+
|
| 236 |
+
AI tutoring systems have the potential to democratize high-quality education in key areas such as negotiation. Prior work has shown that additional negotiation coaching is needed to correct for systematic gender and ethnic differences in bargaining
|
| 237 |
+
|
| 238 |
+
performance (Amanatullah and Morris, 2010; Lu and Zhao, 2023). To address this need, we built ACE to mimic the coaching learners would receive from a professional negotiation instructor in a seminar setting. ACE was built based on a dataset and annotation scheme created in collaboration with experienced negotiation instructors. We evaluated our system and confirmed its error identification accuracy and feedback effectiveness. Results from a user experiment demonstrated that users who interact with ACE improved their negotiation performance significantly compared to those who do not recieve ACE-coaching.
|
| 239 |
+
|
| 240 |
+
# Limitations
|
| 241 |
+
|
| 242 |
+
There are some limitations to the ACE system. The challenge for our negotiation agent is that LLM models tend to be agreeable and are not hardball bargainers. They respond to the user, so if users refused to discuss price, our negotiation agent wouldn't talk about price either. Additional work is needed to ensure our negotiation agent can guide users towards discussing price, especially when learners may be reluctant to fully engage in the negotiation.
|
| 243 |
+
|
| 244 |
+
Another limitation of ACE is that it does not retain any memory of previous user interactions. As a result, the utility of the feedback and negotiation agent may diminish for individuals who want to engage in repeated interactions. More work is required to enable our system to retain previous user interactions and tailor the chat agent and feedback accordingly.
|
| 245 |
+
|
| 246 |
+
Finally, the annotation scheme and feedback method we built is based on an "American" style of negotiation. Other cultures have different standards for effective bargaining and what constitutes a mistake in negotiation. Therefore, ACE may not be as useful for individuals who want to improve their negotiation capabilities in other cultural contexts.
|
| 247 |
+
|
| 248 |
+
# Acknowledgments
|
| 249 |
+
|
| 250 |
+
We would like to thank Shehan Panditharatne, Xi-anglu Tang, and Aylin Hadzhieva for their help in implementing our user experiments.
|
| 251 |
+
|
| 252 |
+
# References
|
| 253 |
+
|
| 254 |
+
Emily Amanatullah and Michael Morris. 2010. Negotiating gender roles: Gender differences in assertive negotiating are mediated by women's fear of backlash and attenuated when negotiating on behalf of
|
| 255 |
+
|
| 256 |
+
others. Journal of personality and social psychology, 98:256-67.
|
| 257 |
+
Linda Babcock and Sara Laschever. 2003. *Women Don't Ask Negotiation and the Gender Divide*. Princeton University Press, Princeton.
|
| 258 |
+
Federico Bianchi, Patrick John Chia, Mert Yuksekgonul, Jacopo Tagliabue, Dan Jurafsky, and James Zou. 2024. How well can llms negotiate? negotiationarena platform and analysis. Preprint, arXiv:2402.05863.
|
| 259 |
+
Jacob Cohen. 1960. A coefficient of agreement for nominal scales. Educational and Psychological Measurement, 20(1):37-46.
|
| 260 |
+
Paul Denny, Juho Leinonen, James Prather, Andrew Luxton-Reilly, Thezyrie Amarouche, Brett A. Becker, and Brent N. Reeves. 2024. Prompt problems: A new programming exercise for the generative ai era. In Proceedings of the 55th ACM Technical Symposium on Computer Science Education V.1, SIGCSE 2024, page 296-302, New York, NY, USA. Association for Computing Machinery.
|
| 261 |
+
Roger Fisher, William Ury, and Bruce Patton. 2011. Getting to Yes: Negotiating Agreement Without Giving In, 3 edition. Penguin Books, New York.
|
| 262 |
+
Yao Fu, Hao Peng, Tushar Khot, and Mirella Lapata. 2023. Improving language model negotiation with self-play and in-context learning from ai feedback. Preprint, arXiv:2305.10142.
|
| 263 |
+
Jonathan Gratch, David DeVault, and Gale Lucas. 2016. The benefits of virtual humans for teaching negotiation. In Intelligent Virtual Agents, pages 283-294, Cham. Springer International Publishing.
|
| 264 |
+
He He, Derek Chen, Anusha Balakrishnan, and Percy Liang. 2018. Decoupling strategy and generation in negotiation dialogues. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2333-2343, Brussels, Belgium. Association for Computational Linguistics.
|
| 265 |
+
Martha Jeong, Julia Minson, Michael Yeomans, and Francesca Gino. 2019. Communicating with warmth in distributive negotiations is surprisingly counterproductive. Management Science, 65(12):5813-5837.
|
| 266 |
+
Hieke Keuning, Johan Jeuring, and Bastiaan Heeren. 2018. A systematic literature review of automated feedback generation for programming exercises. ACM Trans. Comput. Educ., 19(1).
|
| 267 |
+
Charles Koutcheme, Nicola Dainese, Sami Sarsa, Arto Hellas, Juho Leinonen, and Paul Denny. 2024. Open source language models can provide feedback: Evaluating llms' ability to help students using gpt-4-as-a-judge. Preprint, arXiv:2405.05253.
|
| 268 |
+
Roy J. Lewicki, David M. Saunders, and Bruce Barry. 2021. Essentials of negotiation / Roy J. Lewicki, David M. Saunders, Bruce Barry., seventh edition, international student edition. edition. McGraw-Hill Education, New York, NY.
|
| 269 |
+
|
| 270 |
+
Mike Lewis, Denis Yarats, Yann Dauphin, Devi Parikh, and Dhruv Batra. 2017. Deal or no deal? end-to-end learning of negotiation dialogues. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2443-2453, Copenhagen, Denmark. Association for Computational Linguistics.
|
| 271 |
+
Yu Li, Chun-Yen Chen, Dian Yu, Sam Davidson, Ryan Hou, Xun Yuan, Yinghua Tan, Derek Pham, and Zhou Yu. 2022. Using chatbots to teach languages. In Proceedings of the Ninth ACM Conference on Learning @ Scale, L@S '22, page 451-455, New York, NY, USA. Association for Computing Machinery.
|
| 272 |
+
Kai-Hui Liang, Sam Davidson, Xun Yuan, Shehan Panditharatne, Chun-Yen Chen, Ryan Shea, Derek Pham, Yinghua Tan, Erik Voss, and Luke Fryer. 2023. ChatBack: Investigating methods of providing grammatical error feedback in a GUI-based language learning chatbot. In Proceedings of the 18th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2023), pages 83-99, Toronto, Canada. Association for Computational Linguistics.
|
| 273 |
+
Jeffrey Loewenstein and Leigh Thompson. 2006. Learning to Negotiate: Novice and Experienced Negotiators, pages 77-97. Frontiers of Social Psychology. Psychology Press.
|
| 274 |
+
Jackson Lu and Michelle Zhao. 2023. Empowering asians' leadership advancement with debate training: A longitudinal field experiment. Academy of Management Proceedings, 2023(1):18120.
|
| 275 |
+
Jackson G. Lu, Richard E. Nisbett, and Michael W. Morris. 2020. Why east asians but not south asians are underrepresented in leadership positions in the united states. Proceedings of the National Academy of Sciences, 117(9):4590-4600.
|
| 276 |
+
Jackson G. Lu, Richard E. Nisbett, and Michael W. Morris. 2022. The surprising underperformance of east Asians in us law and business schools: The liability of low assertiveness and the ameliorative potential of online classrooms. Proceedings of the National Academy of Sciences, 119(13):e2118244119.
|
| 277 |
+
OpenAI. 2023. Gpt-4 technical report. Preprint, arXiv:2303.08774.
|
| 278 |
+
Maciej Pankiewicz and Ryan S. Baker. 2023. Large language models (gpt) for automating feedback on programming assignments. *Preprint*, arXiv:2307.00150.
|
| 279 |
+
Kun Qian, Ryan Shea, Yu Li, Luke Kutszik Fryer, and Zhou Yu. 2023. User adaptive language learning chatbots with a curriculum. In Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky, pages 308-313, Cham. Springer Nature Switzerland.
|
| 280 |
+
Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2022.
|
| 281 |
+
|
| 282 |
+
Robust speech recognition via large-scale weak supervision. Preprint, arXiv:2212.04356.
|
| 283 |
+
Johannes Schneider, Steffi Haag, and Leona Chandra Kruse. 2023. Negotiating with llms: Prompt hacks, skill gaps, and reasoning deficits. Preprint, arXiv:2312.03720.
|
| 284 |
+
Shashank Sonkar, Naiming Liu, Debshila Mallick, and Richard Baraniuk. 2023. CLASS: A design framework for building intelligent tutoring systems based on learning science principles. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 1941-1961, Singapore. Association for Computational Linguistics.
|
| 285 |
+
Siddharth Verma, Justin Fu, Sherry Yang, and Sergey Levine. 2022. CHAI: A CHatbot AI for task-oriented dialogue with offline reinforcement learning. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4471-4491, Seattle, United States. Association for Computational Linguistics.
|
| 286 |
+
Aaron S. Wallen, Michael W. Morris, Beth A. Devine, and Jackson G. Lu. 2017. Understanding the MBA gender gap: Women respond to gender norms by reducing public assertiveness but not private effort. *Personality and Social Psychology Bulletin*, 43(8):1150-1170. PMID: 28903718.
|
| 287 |
+
Haolan Zhan, Yufei Wang, Zhuang Li, Tao Feng, Yuncheng Hua, Suraj Sharma, Lizhen Qu, Zhaleh Semnani Azad, Ingrid Zukerman, and Reza Haf. 2024. Let's negotiate! a survey of negotiation dialogue systems. In Findings of the Association for Computational Linguistics: EACL 2024, pages 2019-2031, St. Julian's, Malta. Association for Computational Linguistics.
|
| 288 |
+
Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. 2023. Judging llm-as-a-judge with mt-bench and chatbot arena. Preprint, arXiv:2306.05685.
|
| 289 |
+
|
| 290 |
+
# A Additional Experimental Details
|
| 291 |
+
|
| 292 |
+
# A.1 Objective Performance
|
| 293 |
+
|
| 294 |
+
In addition to the deal price they settled on, another indicator of objective performance in negotiation can be how long they persist before caving in. If participants have learned more negotiation tactics and know-hows, they should be able to persist longer in the second trial. Therefore, we also did ANOVA tests on the number of negotiation turns and the duration of their negotiations, and found a similar pattern to the objective results. That is, participants in the feedback group had more negotiation turns and longer negotiation duration in the second trial. Detailed analysis of the negotiation duration (in seconds) can be found in Table 7.
|
| 295 |
+
|
| 296 |
+
# A.2 Post-Negotiation Reflection Questions
|
| 297 |
+
|
| 298 |
+
Table 9 shows the set of reflection questions we asked users after they completed the first negotiation in the experiment. These questions were asked in all conditions of the to ensure that no differences in negotiation ability emerged from users reflecting on their performance.
|
| 299 |
+
|
| 300 |
+
# B Pilot Study A
|
| 301 |
+
|
| 302 |
+
The procedure and results from Pilot Study A are described in this section. We began by recruiting 100 U.S.-based native English speakers from Prolific. Each participant was paid \(8 for completing this online study involving two trials of a car negotiation, each followed by survey questions. We obtained a total of 96 effective responses.
|
| 303 |
+
|
| 304 |
+
After consenting to the study, participants engaged in two trials of a used car negotiation with the ACE chatbot. All participants were assigned the role of the buyer in both trials based on the scenario in Figure 4. Participants were randomly assigned to treatment or control conditions, and ACE coaching was only provided in the first trial of the treatment condition. The procedure is illustrated in Figure 3.
|
| 305 |
+
|
| 306 |
+
For the first trial, participants in the ACE condition $(N = 46)$ proceeded through the ACE system as described in Section 6.1. To ensure they digest their feedback thoroughly, we asked them a series of questions to guide their reflection on the feedback (See Table 9). Participants in the Control condition $(N = 50)$ were not asked preparation questions nor given any feedback. To match the workload across conditions, they were asked a series of filler questions instead (See Table 11).
|
| 307 |
+
|
| 308 |
+
For the second trial, participants in both conditions practiced with a negotiation agent with no ACE feedback provided, as participants in the control condition did in their first trial. Then we asked all participants their subjective perceptions of improvement in the second negotiation compared to the first.
|
| 309 |
+
|
| 310 |
+
# B.1 Results
|
| 311 |
+
|
| 312 |
+
Objective Improvement. We measured improvement by looking at the change in deal price across negotiation trials. As may be seen in Table 5, the performance improvement was significant in the ACE condition but not the Control condition. A two-level within-person $\times$ two-level between-person factorial ANOVA results reveal a significant interaction effect. This indicates that participants learned more from the experience of negotiating against the bot when it was surrounded by ACE coaching.
|
| 313 |
+
|
| 314 |
+
Subjective Improvement. Participants in the ACE condition also reported a higher score for perceived improvement in the second negotiation ( $\bar{x} = 4.23$ , $SD = 0.72$ ) compared to those in the Control condition ( $\bar{x} = 3.93$ , $SD = 0.80$ ; $t = 1.96$ , $p = .053$ ).
|
| 315 |
+
|
| 316 |
+
# B.2 Post-Negotiation Reflection Questions
|
| 317 |
+
|
| 318 |
+
Tables 9 and 11 show the set of reflection questions we asked users after they completed the first negotiation in the experiment. Users were given these instructions when answering the reflection questions: Please answer these questions in your own words. No AI-generated text is allowed. Use at least 30 characters for each of these open-ended questions.
|
| 319 |
+
|
| 320 |
+
# C Alternative Technical Approaches
|
| 321 |
+
|
| 322 |
+
# C.1 Building ACE
|
| 323 |
+
|
| 324 |
+
In this section we briefly go over some alternative technical approaches we explored to build the ACE system. We tested using smaller models for the error detection module such as GPT-3.5-turbo, T5, and Mistral. In general, we found that these models perform worse than GPT-4 and GPT-4o, especially when it comes to price extraction. This can be seen in Table 12 in the decreased performance for "strong first offer" and "strong counter offer" when compared to GPT-4o. GPT-4 and GPT-4o performed comparably so we chose GPT-4o because it is faster.
|
| 325 |
+
|
| 326 |
+
<table><tr><td>Survey Item</td><td>ACE Condition (N=119)</td><td>Other-feedback (N=129)</td><td>No-feedback Baseline (N=126)</td><td>Between-person Comparison</td></tr><tr><td>1. To reach a favorable deal</td><td>4.47 (0.64)</td><td>4.49 (0.66)</td><td>4.51 (0.61)</td><td>F = 0.13, p = .88</td></tr><tr><td>2. To reach a fair deal</td><td>4.34 (0.7)</td><td>4.29 (0.79)</td><td>4.46 (0.68)</td><td>F = 1.63, p = .20</td></tr><tr><td>3. To maintain an agreeable process</td><td>4.14 (0.84)</td><td>4.20 (0.64)</td><td>4.12 (0.77)</td><td>F = 0.38, p = .69</td></tr><tr><td>4. To be a tough bargainer</td><td>3.35 (0.93)</td><td>3.36 (1.05)</td><td>3.38 (1.00)</td><td>F = 0.03, p = .97</td></tr></table>
|
| 327 |
+
|
| 328 |
+
Table 6: Goal Set for Negotiations. Unless specified otherwise, all our questions use a five-point Likert scale. We asked participants "Before we start, please tell us about your approach to negotiations by rating the items below. It is important to me... (1 = Strongly disagree, 5 = Strongly Agree)." The Cronbach's alpha of our 4-item measure is 0.90, indicating high internal reliability of the scale. No difference in any of these goals excludes motivation level as a predictor for the differences that emerged between the two conditions.
|
| 329 |
+
|
| 330 |
+
<table><tr><td></td><td>ACE Condition (N=119)</td><td>Other-feedback (N=129)</td><td>No-feedback Baseline (N=126)</td><td>Between-person Comparison</td></tr><tr><td>\( 1^{st} \) Trial: Used Car</td><td>418.20 (307.16)</td><td>427.65 (310.64)</td><td>420.52 (278.41)</td><td>F(2,371)=0.03,p=0.97</td></tr><tr><td>\( 2^{nd} \) Trial: Summer Sublease</td><td>503.50 (305.67)</td><td>402.06 (238.43)</td><td>362.61 (218.19)</td><td>F(2,371)=9.92,p<0.001</td></tr><tr><td>Within-person Comparison</td><td>t=-2.15,p=0.03</td><td>t=0.73,p=0.46</td><td>t=1.86,p=0.06</td><td>F(2,371)=10.40,p<0.001</td></tr></table>
|
| 331 |
+
|
| 332 |
+
Table 7: Time duration of negotiations in seconds.
|
| 333 |
+
|
| 334 |
+
# Subjective Improvement Questions
|
| 335 |
+
|
| 336 |
+
1. I felt more confident.
|
| 337 |
+
2. I felt more comfortable bargaining.
|
| 338 |
+
3. I expressed myself better.
|
| 339 |
+
4. I had a better understanding of the process.
|
| 340 |
+
|
| 341 |
+
Table 8: Subjective improvement questions for participants. The first three items are adapted from an existing assertiveness scale (Wallen et al., 2017), and the last item is added to serve our study design. This scale's Cronbach Alpha is 0.90, which suggests its high reliability and excellent internal consistency among the items.
|
| 342 |
+
|
| 343 |
+
# Post-Negotiation Reflection Questions
|
| 344 |
+
|
| 345 |
+
1. Based on the feedback, what should be your walkaway point, your target point, and your opening point, respectively?
|
| 346 |
+
2. Based on the feedback, what can be compelling rationale for your offers and useful questions to elicit information or persuade the seller to make concessions?
|
| 347 |
+
3. What tips about your performance did you receive about the early phase of your negotiation conversation? Accordingly, what would you strive to do next time?
|
| 348 |
+
4. What tips about your performance did you receive about the later phase of you negotiation conversation? Accordingly, what would you strive to do next time?
|
| 349 |
+
|
| 350 |
+
Table 9: Post negotiation reflection questions for participants.
|
| 351 |
+
|
| 352 |
+
<table><tr><td></td><td>ACE
|
| 353 |
+
Condition (N=46)</td><td>Control
|
| 354 |
+
Condition (N=50)</td><td>Between-person
|
| 355 |
+
Comparison</td></tr><tr><td>1stNegotiation</td><td>$12,928 (693.11)</td><td>$13,161 (429.08)</td><td>t = -1.96, p = 0.054</td></tr><tr><td>2ndNegotiation</td><td>$12,485 (967.77)</td><td>$13,091 (621.69)</td><td>t = -3.61, p < 0.001</td></tr><tr><td>Within-person Comparison</td><td>t = 2.52, p = 0.014</td><td>t = 0.66, p = 0.514</td><td>F (1, 94) = 4.42, p = 0.038</td></tr></table>
|
| 356 |
+
|
| 357 |
+
Table 10: A summary of outcomes from the human evaluation of ACE in Pilot Study A. Our results show that buyers who received feedback from ACE performed significantly better in a following negotiation than those who didn't. Note: 1. Among 96 participants, $43\%$ were female; $\bar{x}_{age} = 34.04$ years, $SD_{age} = 9.14$ ; $55\%$ were White/Caucasian; $51\%$ had a bachelor's degree and above. The median completion time for this study was 36.29 minutes.
|
| 358 |
+
|
| 359 |
+
# Reflection Questions for the Control Condition in Pilot Study A
|
| 360 |
+
|
| 361 |
+
1. If you want to develop a new hobby, what should be your first step? Please write down a tactical plan.
|
| 362 |
+
2. Can you think of any useful tactics to learn a new foreign language?
|
| 363 |
+
3. If you aim to improve your performance at work, what should you do? Please write down a tactical plan.
|
| 364 |
+
4. In applying to graduate school, what are some steps that a student can take to raise their GPA?
|
| 365 |
+
|
| 366 |
+
Table 11: Filler questions given to users in the Control condition in Pilot Study A after they completed their first negotiation. These questions are given to match the workload between conditions.
|
| 367 |
+
|
| 368 |
+
<table><tr><td>Error Category</td><td>Accuracy</td><td>Precision</td><td>Recall</td><td>F1 Score</td></tr><tr><td>Breaking the ice</td><td>0.99</td><td>0.96</td><td>0.94</td><td>0.95</td></tr><tr><td>First offer</td><td>0.98</td><td>0.82</td><td>0.81</td><td>0.81</td></tr><tr><td>Strong first offer</td><td>0.97</td><td>0.68</td><td>0.67</td><td>0.65</td></tr><tr><td>Strong counteroffer</td><td>0.91</td><td>0.46</td><td>0.50</td><td>0.48</td></tr><tr><td>Including rationale</td><td>0.87</td><td>0.52</td><td>0.54</td><td>0.51</td></tr><tr><td>Strategic closing</td><td>0.96</td><td>0.68</td><td>0.78</td><td>0.66</td></tr></table>
|
| 369 |
+
|
| 370 |
+
Table 12: A table indicating how accurately ACE can identify user mistakes when prompted with GPT-3.5-turbo-0125 instead of GPT-4o. Our system is still able to detect errors with high accuracy, but performs worse overall, especially with categories involving numbers such as for strong counteroffer and strong first offer.
|
| 371 |
+
|
| 372 |
+
As for the other components of our system, such as direct feedback and utterance revision, we do not have data and therefore could not perform automatic evaluations. Instead, we relied on small scale expert evaluations of outputs to choose the best models. For each model we tested (GPT-4, GPT-4o, and GPT-3.5) we generated 10-15 examples of output and presented them to two experts who then selected which output they preferred. Our evaluations showed that GPT-4 and GPT-4o performed the best and we ultimately chose GPT-4o due to its speed.
|
| 373 |
+
|
| 374 |
+
For our chat agent, we tested both GPT-4 and GPT-4o by having two experts perform 10 negotiations with the model and rate its performance. We found that GPT-4 was better based on this evaluation. Prior works have extensively tested the negotiation performance of various LLM chat agents and also found that GPT-4 achieves quality performance (Bianchi et al., 2024).
|
| 375 |
+
|
| 376 |
+
# C.2 Alternate Feedback Approaches
|
| 377 |
+
|
| 378 |
+
In this section we provide some additional details about the alternative feedback approach in Fu et al., 2023. As mentioned in Section 7, this method uses the zero-shot capabilities of GPT-4 to give negotiation feedback. The method was originally designed for improving LLM negotiation performance, the
|
| 379 |
+
|
| 380 |
+
procedure goes as follows. First two LLMs participate in a negotiation, then another LLM provides three suggestions on how the buyer/seller can improve their performance in the next negotiation. The initial negotiation along with the suggestions are used as the prompt for the following negotiation. Fu et al., 2023 demonstrates that this feedback is effective at improving performance between LLM negotiators. However, the negotiation setting they tested is fairly basic. It involves a two parties negotiating over the sale of a balloon with the buyer's and seller's opening offers hard-coded at $10 and$ 20, respectively. Therefore we tested whether this approach remains effective when applied to our more complex setting of used car negotiation (Figures 4 and 5).
|
| 381 |
+
|
| 382 |
+
We conducted 200 simulated negotiations between a LLM buyer and seller. In 100 of the negotiations, no feedback was given to either party. In the other 100 negotiations the buyer received feedback according to Fu et al., 2023. The average deal price with no feedback given was $12,984 (SD = 554)$ and the average deal price with feedback given to the buyer was $\$12,788$ ( $SD = 749$ ). A two sample t-test shows that the buyer's improvement was statistically significant $p = .038$ . This demonstrates that the feedback method in Fu et al., 2023 is able to enhance the negotiation performance of LLMs in
|
| 383 |
+
|
| 384 |
+
more complex settings such as used car negotiation. This also illustrates that the lack of improvement of human negotiators when given feedback based on this method (Section 7) is not due to the nature or complexity of our scenario. Instead the lack of improvement likely stems from inherent differences in what constitutes effective feedback for LLMs versus humans. This further emphasizes the need for extensive human testing when building educational systems as LLMs may not serve as a realistic proxy for human learners.
|
| 385 |
+
|
| 386 |
+
# D Negotiation Scenarios
|
| 387 |
+
|
| 388 |
+
Figures 4 and 5 show the full negotiation scenarios for the first task in our collected dataset. These scenarios both relate to bargaining over a used Honda Accord. Figure 4 shows the scenario given to the buyer and Figure 5 shows the scenario for the seller. These same scenarios are used in our user experiment along with the summer sublease scenarios in Figures 6 and 7 (Section 7). The scenario for the buyer is given directly to the participants as part of the experiment instructions and the scenario for the seller is used to construct the system prompt for our negotiation agent. Table 20 shows the instructional prompt for the used car scenario.
|
| 389 |
+
|
| 390 |
+
# E Full Feedback
|
| 391 |
+
|
| 392 |
+
Figures 8 through 12 show full examples of feedback given by ACE. All of the feedback is given according to the conversation in Table 13 which is based on the Honda Accord negotiation scenario (Figures 4 and 5). Figure 8 gives a complete example of preparation feedback, Figures 9 to 11 show full examples of turn-based feedback, and Figure 12 shows a complete example of holistic feedback.
|
| 393 |
+
|
| 394 |
+
# F Prompts
|
| 395 |
+
|
| 396 |
+
Tables 14 through 20 show the key prompts we use for building ACE. All prompts were engineered based on GPT-4 version GPT-4-0613 and GPT-4o version GPT-4o-2024-05-13. The includes prompts we use to identify user errors (Tables 14 and 15), giving direct feedback (Table 16), revising utterances (Table 17), preparation feedback (Table 18), and holistic feedback (Table 19). We also include the prompt for our negotiation chatbot agent (Table 20).
|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
Figure 2: Experiment diagram, we designed our experiment in Qualtrics.
|
| 400 |
+
|
| 401 |
+

|
| 402 |
+
Figure 3: Experiment diagram for Pilot Study A, we designed our experiment in Qualtrics.
|
| 403 |
+
Figure 4: Honda scenario for the buyer.
|
| 404 |
+
|
| 405 |
+
# CONFIDENTIAL INSTRUCTIONS -- FOR BUYER ONLY
|
| 406 |
+
|
| 407 |
+
You are moving to California to take a new job. A few weeks ago, on your way home from a weekend trip, your car slid into a ditch. Fortunately, no one was hurt, but your car was totaled. Your insurance company has responded fairly and quickly: this morning you received a cashier's check for \(13,500, which you plan to use to buy another car immediately. In order to make it out to your job in time you must buy a replacement car today.
|
| 408 |
+
|
| 409 |
+
You have been looking around and you have found a 2004 Honda Accord which meets all of your requirements. The Honda has reasonable mileage (50,000 miles), automatic transmission, air conditioning, power steering/weather/door locks, and a CD player. It looks great: a dark green without any rust. Similar cars sell within a range of $11,000 to$ 15,000, depending on condition. You would like to get the price as far under $13,500 as possible.
|
| 410 |
+
|
| 411 |
+
The only realistic alternative you have to the Honda on such short notice is a 2006 Ford Taurus. The Taurus would cost you $13,500, but you really don't like Ford cars, and the color is a weird blue. You would greatly prefer the Accord. Still, you can't pay more than$ 13,500 for the Honda both because that is your budget and because you have another car at that price. If you can't get the Honda price below $13,500 you will buy the Ford.
|
| 412 |
+
|
| 413 |
+
The seller is a friend of a friend of a friend and has been reasonable to work with so far.
|
| 414 |
+
|
| 415 |
+
# CONFIDENTIAL INSTRUCTIONS -- FOR SELLER ONLY
|
| 416 |
+
|
| 417 |
+
You were just promoted at work and you received an unusually large bonus for a job well done on a recent project. You have decided it's time to buy a new car. Because you can park only one car at your apartment building, the only thing standing in the way of bringing a new car home is selling the old one: a Honda Accord. You have no sentimental feeling toward the Honda: you hate the car and are delighted to get rid of it. Fortunately, a friend of a friend of a friend has expressed interest in buying the car.
|
| 418 |
+
|
| 419 |
+
When you bought the car in 2004 you paid about $21,000 for it. Similar cars today sell within a range of $11,000 to $15,000, depending on condition. The Honda has reasonable mileage (50,000 miles), automatic transmission, air conditioning, power steering windows/door locks, and a CD player. It looks great: a dark green without any rust. You would like to get a price as much above $12,500 as possible.
|
| 420 |
+
|
| 421 |
+
Normally, you would wait around for the best deal but you have just learned that a brand new Volkswagen Passat—your new favorite car—has become available if you can sell the old Honda and make it to the Volkswagen dealer within 2 hours. This Passat happens to be configured exactly how you want it; if you can't get this one, there will be a significant time delay in ordering the car. Unfortunately, the most the dealer will give you in trade on the Honda is $12,500. This is barely enough for you to buy the Passat (your bonus will cover the rest), but it won't get you the extras you would like, such as a roof rack and high-performance tires. You really would greatly prefer to sell your Honda privately. Still, you can't accept less than $12,500 for the Honda, because that is what the dealer has offered. If you can't get a price above $12,500, you will sell it to the dealer.
|
| 422 |
+
|
| 423 |
+
Figure 5: Honda scenario for the seller.
|
| 424 |
+
|
| 425 |
+
# CONFIDENTIAL INSTRUCTIONS -- FOR BUYER ONLY
|
| 426 |
+
|
| 427 |
+
On the last day of the Spring semester, you receive an unexpected notice: your university apartment building needs emergency renovation, so you can't stay there over the summer. With a weeklong trip to Korea starting this evening, you need to secure a summer apartment today for your nearby internship.
|
| 428 |
+
|
| 429 |
+
In this area, summer sublet tenants typically pay a lump sum upfront for June, July, and August. The total price is negotiated informally between the apartment owner (or leaseholder) and the subletter.
|
| 430 |
+
|
| 431 |
+
The budget amount you have to work with for three months is $8,100—what you would have paid to keep your nice university apartment for the summer months. After asking around, you learn that prices of summer sublet for studios around campus range from$ 6,600 to $9,000. You can't spend more than $8,100, but it would be great if you could reach a deal for less than that, as it would mean some spending money for the summer.
|
| 432 |
+
|
| 433 |
+
The only viable option you have found so far is a studio in a building called "Urban Tower," where the sublet rent is \(8,000 for three months. This fits your budget. However, you don't feel happy with this option because it's not in your preferred neighborhood.
|
| 434 |
+
|
| 435 |
+
You find another studio in a nearby building called "Horizon Loft." The listing pictures show it meets all your requirements: a modern kitchen and laundry facilities. The owner, affiliated with your university, agreed to discuss the sublet rent via email. If you can't negotiate a price below \(8,100, you'll choose the Urban Tower apartment. You'll be writing a check to secure an apartment this afternoon.
|
| 436 |
+
|
| 437 |
+
Figure 6: Summer sublease scenario for the buyer.
|
| 438 |
+
|
| 439 |
+
# CONFIDENTIAL INSTRUCTIONS -- FOR SELLER ONLY
|
| 440 |
+
|
| 441 |
+
You have a longtime lease of a studio apartment in a building near the university called "Horizon Loft." You have recently been selected for a position abroad this summer. You have to depart tomorrow morning. Because you are so busy, you only got around to listing the studio with photos on a website for summer sublets yesterday.
|
| 442 |
+
|
| 443 |
+
In this area, summer sublet tenants typically pay a lump sum upfront for June, July, and August. The total price is negotiated informally between the apartment owner (or leaseholder) and the subletter. According to the website, prices for sublets of studios range between $6,600 and$ 9,000 for three months.
|
| 444 |
+
|
| 445 |
+
It is a well-equipped studio featuring a modern kitchen and shared laundry facilities. Based on your rent, the cost of the apartment for the three summer months would be $11,700. However, summer sublet rates are set lower because there are many vacancies. You didn't include a price in your website listing as you didn't want to deter any potential renters from contacting you. So far only one person has emailed you, a student from the same university that you were affiliated with who seems reliable. You set up a time soon to chat about a potential sublet deal.
|
| 446 |
+
|
| 447 |
+
Getting the highest possible price is less important to you than securing a deal, but you will try to get a high price in case that is possible. To get this potential subletter to commit today, you won't have time to negotiate with anyone else. You hope that after the negotiation in the following hour, they come by to drop off a check and pick up the key. If you can't get a price agreement of at least $7500 you won't sublet the apartment at all as it wouldn't be worth the trouble.
|
| 448 |
+
|
| 449 |
+
Figure 7: Summer sublease scenario for the seller.
|
| 450 |
+
|
| 451 |
+
# Example Conversation
|
| 452 |
+
|
| 453 |
+
AI Seller: Hi there! What a beautiful day, isn't it? I hope your weekend is off to a good start! Did you happen to catch the game last night?
|
| 454 |
+
|
| 455 |
+
User: No, I was busy with work.
|
| 456 |
+
|
| 457 |
+
AI Seller: Ah, I know how that goes, work can be quite demanding at times. I hope you're getting some time to unwind now. Shifting gears a bit, I wanted to chat about the Honda I'm selling. It's a real gem with automatic transmission, air conditioning, power features, and a CD player. Not to mention, its dark green color that gives it quite a classy look. Shall we delve into some more details about it?
|
| 458 |
+
|
| 459 |
+
User: Sure, how much are you asking for it?
|
| 460 |
+
|
| 461 |
+
AI Seller: I'm glad you're interested! Given the car's condition, low mileage, and features, I believe a fair asking price to start the negotiation would be at $16,000. How do you feel about that?
|
| 462 |
+
|
| 463 |
+
User: That seems high, could you do $12,500?
|
| 464 |
+
|
| 465 |
+
AI Seller: I appreciate your offer, but considering the car's low mileage and overall condition along with all the features it comes with, it does hold its value quite well. Would you be open to discussing a price of $15,000?
|
| 466 |
+
|
| 467 |
+
User: Sorry the most I can do is $13,100
|
| 468 |
+
|
| 469 |
+
AI Seller: I appreciate your offer. Given the features and condition of the car, I can come down to \(14,500. It has a lot of value for that price considering its mileage and overall state. It's a solid, reliable car that still has a lot of life left in it. What do you think?
|
| 470 |
+
|
| 471 |
+
User: When's the last time you had it tuned up?
|
| 472 |
+
|
| 473 |
+
AI Seller: I'm glad you asked! The car's maintenance has always been a top priority. It received its most recent complete tune-up just a couple of months ago. This included changing the engine oil, checking all fluids, replacing spark plugs, inspecting the brakes, and doing a thorough check of the entire vehicle. So it's in great running condition. Would this possibly influence your offer?
|
| 474 |
+
|
| 475 |
+
User: Okay, I can maybe do $13,300.
|
| 476 |
+
|
| 477 |
+
AI Seller: I understand where you're coming from and I do appreciate the offer. How about we meet halfway? I can lower the asking price to $14,000. This car is a fantastic deal at that price and it would ensure you peace of mind for many ears to come given its condition and maintenance history. What do you say?
|
| 478 |
+
|
| 479 |
+
User: The most I can do is $13,500
|
| 480 |
+
|
| 481 |
+
AI Seller: I understand that it's really important for you to stick to your budget. Considering all aspects, I can meet you at \(13,500, but I must insist that this is the lowest price I can accept. This car is definitely worth every penny. Do we have a deal?
|
| 482 |
+
|
| 483 |
+
User: Yeah sounds good.
|
| 484 |
+
|
| 485 |
+
Table 13: An example conversation between a user and our LLM negotiation agent (Section 6).
|
| 486 |
+
|
| 487 |
+
1. What should be your "walkaway point"? (i.e., the maximum price you can pay?) ($)
|
| 488 |
+
|
| 489 |
+
Your Answer: 13500
|
| 490 |
+
|
| 491 |
+
Your reservation price corresponds accurately to the role information about your $13,500 budget for the car purchase.
|
| 492 |
+
|
| 493 |
+
2. What should be your "target point"? (i.e., the most optimistic realistic price that you should aim for?) ($)
|
| 494 |
+
|
| 495 |
+
Your Answer: 13000
|
| 496 |
+
|
| 497 |
+
Your target price of $13,000 is not ambitious enough to test how far this seller can be pushed. To negotiate more effectively, you should aim closer to the low end of the market range for this car. A more strategic target price would be below$ 11,833, ideally closer to $11,000, which is the minimum market range. Setting your sights lower gives you more room to negotiate upwards if needed, and it positions you to potentially secure a better deal. Adjusting your target price in this way will improve your leverage and strengthen your negotiating position.
|
| 498 |
+
|
| 499 |
+
3. What should be your "opening point" (i.e., the first price that you will offer in the conversation) ($)
|
| 500 |
+
|
| 501 |
+
Your Answer: 12500
|
| 502 |
+
|
| 503 |
+
Your opening price of $12,500 is not ambitious enough relative to your target. Given that your target price is$ 11,700, setting your opening price at $12,500 means you don't leave yourself much room to negotiate and make concessions while still aiming to reach your target. To give yourself a better negotiating position, you should start with an opening price below 90% of your target. In this case, a better opening price would be closer to $10,530. This allows for more flexibility and room to maneuver during the negotiation, increasing the likelihood that you'll get closer to your target price of$ 11,700.
|
| 504 |
+
|
| 505 |
+
4. What is a rationale or explanation that you can give to support your opening point.
|
| 506 |
+
|
| 507 |
+
Your Answer: The current market price for the car and its condition.
|
| 508 |
+
|
| 509 |
+
If accompanied by a compelling rationale, a very low opening offer can come across as principled bargaining rather than greed or ignorance. The offer rationales that work best in negotiations are those that make reference to the market, to one's budget, to norms of fairness, and to the buyer and seller's levels of urgency.
|
| 510 |
+
|
| 511 |
+
5. What are some questions that you plan to ask the seller?
|
| 512 |
+
|
| 513 |
+
Your Answer: When's the last time the car was tuned up?
|
| 514 |
+
|
| 515 |
+
It is wise to plan the questions that you will ask before coming to the table. One purpose of questions is to gain information, such as how far the seller can be pushed. Another purpose is to persuade the seller to be reasonable by raising issues such as your relative ages or wealth levels. If you don't plan out questions beforehand, you may forget to ask them once the action begins.
|
| 516 |
+
|
| 517 |
+
Figure 8: Full example of preparation feedback.
|
| 518 |
+
|
| 519 |
+
Hi there! What a beautiful day, isn't it? I hope your weekend is off to a good start! Did you happen to catch the game last night?
|
| 520 |
+
|
| 521 |
+
No, I was busy with work.
|
| 522 |
+
|
| 523 |
+
Message feedback: Begin your negotiation conversation with some brief social conversation before delving into the economic issues. Show esteem for the other person (your counterpart) by praising what they are selling or asking about their day. "Breaking the ice" in some way through initial personal conversation creates rapport, which tends to increase openness and cooperativeness.
|
| 524 |
+
|
| 525 |
+
A suggested alternative: "Hey there! How have you been? I hope everything is going well on your end. I've been swamped with work lately, but I'm finally catching up. How's your week going?"
|
| 526 |
+
|
| 527 |
+
Ah, I know how that goes, work can be quite demanding at times. I hope you're getting some time to unwind now. Shifting gears a bit, I wanted to chat about the Honda I'm selling. It's a real gem with automatic transmission, air conditioning, power features, and a CD player. Not to mention, its dark green color that gives it quite a classy look. Shall we delve into some more details about it?
|
| 528 |
+
|
| 529 |
+
Sure, how much are you asking for it?
|
| 530 |
+
|
| 531 |
+
I'm glad you're interested! Given the car's condition, low mileage, and features, I believe a fair asking price to start the negotiation would be at $16,000. How do you feel about that?
|
| 532 |
+
|
| 533 |
+
Figure 9: Example of turn based feedback where a user made an error in breaking the ice.
|
| 534 |
+
|
| 535 |
+
Message feedback: Negotiation research finds a benefit to speaking your opening offer first. It can "anchor" the other person's judgment of the price range, setting the stage for a more favorable outcome. Additionally, opening offers are most effective when accompanied by a rationale in terms of some objective reference point, such as an expert's valuation or market value indicated by past sales prices. Simply saying "that seems high" without providing a concrete reason or comparison makes it harder to justify your offer of $12,500. Considering the seller's offer of$ 16,000 and your target price of $13,000, a strong first offer would ideally be below $10,000. This approach helps to keep your target price near the midpoint of the range under discussion. By starting lower, you give yourself more room to negotiate up towards your target price.
|
| 536 |
+
|
| 537 |
+
A suggested alternative: "The current price seems quite high to me. Based on the market value and recent sales, I believe \(9,500 would be a fair offer. This price reflects the average market rate and aligns with my budget constraints. Is there any flexibility on your end to consider this offer?"
|
| 538 |
+
|
| 539 |
+
I appreciate your offer, but considering the car's low mileage and overall condition along with all the features it comes with, it does hold its value quite well. Would you be open to discussing a price of $15,000?
|
| 540 |
+
|
| 541 |
+
# Sorry the most I can do is $13,100.
|
| 542 |
+
|
| 543 |
+
Message feedback: When you present a revised offer, it's persuasive to give some explanation for the move. Why are you offering more? Why are you resisting offering everything they ask for? The explanations you provide may be subjective, such as your eagerness to reach a deal or your pressing budget constraints, but some words of explanation like this help the seller understand and accept your perspective. For example, you could say something like, "I really want to make this work, but my budget is quite tight," or "I believe $13,100 is fair considering similar cars I've seen on the market." This way, the seller understands your reasoning and may be more willing to meet you halfway. Additionally, don't 'cave in' immediately. Considering the seller's offer of$ 15,000 and your previous offer of $12,500, a strong counteroffer would ideally be below $13,000. This approach helps to keep your target price near the midpoint of the range under discussion. How about offering $12,800 instead? This keeps the negotiation balanced and closer to your target price.
|
| 544 |
+
|
| 545 |
+
A suggested alternative: "After carefully considering my budget constraints and the market prices, the highest I can go is $12,800. I really want to make this deal work, but I have to stay within my budget. I hope we can come to a mutually agreeable price. Is there any room for flexibility from your side?"
|
| 546 |
+
|
| 547 |
+
Figure 10: Example of two turn-based feedbacks from ACE. The first turn shows feedback where the user failed in giving the first offer, ambitious opening point, and including rationale. The second turn gives feedback where the user failed in including rationale and strong counteroffer.
|
| 548 |
+
|
| 549 |
+
I appreciate your position. Given all the extras this car includes, with its low mileage, immaculate condition and recent servicing, I think it's worth meeting me at $13,700. This is a great car, well below average miles for its age, and it will serve you well. Do we have an agreement at that price?
|
| 550 |
+
|
| 551 |
+
# Are you sure you can't do $13,500.
|
| 552 |
+
|
| 553 |
+
I understand that it's really important for you to stick to your budget. Considering all aspects, I can meet you at $13,500, but I must insist that this is the lowest price I can accept. This car is definitely worth every penny. Do we have a deal?
|
| 554 |
+
|
| 555 |
+
# Yeah sounds good.
|
| 556 |
+
|
| 557 |
+
Message feedback: As you draw close to an agreement, this is another moment to manage your counterpart's impression. You want the counterpart to feel that they have succeeded. Express pain at your extensive concessions. Tell them that they drive a hard bargain. Don't goat if you think you have done well. All this helps them feel committed to the deal and willing to negotiate with you again.
|
| 558 |
+
|
| 559 |
+
Instead of just agreeing, you could say something like:
|
| 560 |
+
|
| 561 |
+
"I appreciate the offer, but this has been a tough decision for me. You've really driven a hard bargain. I think we can move forward, but I still need to see the car in person to be absolutely sure. Can we arrange a time for that?"
|
| 562 |
+
|
| 563 |
+
This approach shows that you are serious but still cautious, ensuring the other party feels they have done well in the negotiation.
|
| 564 |
+
|
| 565 |
+
A suggested alternative: "I appreciate the offer, but I must say, you've really driven a hard bargain. It's been a tough decision for me. I think we can move forward, but I still need to see the car in person to be absolutely sure. Can we arrange a time for that?"
|
| 566 |
+
|
| 567 |
+
Figure 11: Example of turn based feedback where a user made an error in the strategic closing category.
|
| 568 |
+
|
| 569 |
+
- Formality: You maintained an adequate level of formality and politeness throughout the negotiation. It's important to continue being respectful to ensure a positive interaction with the seller.
|
| 570 |
+
Firmness: You displayed firmness by sticking to your budget, which is commendable. However, you could benefit from being more assertive in your statements. For example, instead of saying "Sorry the most I can do is $13,100," you could say, "My budget is capped at$ 13,100." This projects confidence and decisiveness.
|
| 571 |
+
- Linguistic level: Avoid apologizing and expressing uncertainty, as it can weaken your negotiating position. For instance, instead of saying, "Okay, I can maybe do $13,300," you could say, "I can offer $13,300, considering my budget constraints." Additionally, mentioning that you have alternative options can strengthen your stance. For example, you might say, "I am considering another car within my budget, but I am willing to finalize a deal at $13,500 if we can agree."
|
| 572 |
+
|
| 573 |
+
Overall, assertiveness and the strategic mention of alternatives can help you achieve a better outcome in future negotiations.
|
| 574 |
+
|
| 575 |
+
Figure 12: A complete example of holistic feedback from ACE.
|
| 576 |
+
|
| 577 |
+
# Price Extraction Prompt
|
| 578 |
+
|
| 579 |
+
# Instructional Prompt:
|
| 580 |
+
|
| 581 |
+
INSTRUCTION
|
| 582 |
+
|
| 583 |
+
You have to extract priced offers from messages. Just give the dollar amount and nothing else. If no offer was proposed yet then say so. If an offer was accepted then say so. If the offer is presented as range of prices, then give both the prices. Do not elicitate your reasoning.
|
| 584 |
+
|
| 585 |
+
EXAMPLES
|
| 586 |
+
|
| 587 |
+
Message : "I will be willing to pay something from 10k to 11k" Offer: "10000 to 11000".
|
| 588 |
+
|
| 589 |
+
Message: "so i am uh looking for this car and my current price range is between uh eleven thousand and five hundred to twelve thousand dollars" Offer: "11500 to 12000"
|
| 590 |
+
|
| 591 |
+
Message: "Ooh, that's kind of rough. Our sticker price for this car is closer to $14,000." Offer: "14000"
|
| 592 |
+
|
| 593 |
+
Message: "Yes 12000 sounds like a good price for me." Offer: "Accepted."
|
| 594 |
+
|
| 595 |
+
Message: "That's well beyond my price, I can't do that" Offer: "Refused."
|
| 596 |
+
|
| 597 |
+
Message: "Sure. No Problem"
|
| 598 |
+
Offer: "No offer."
|
| 599 |
+
|
| 600 |
+
Message: "I don't think I am able to do that"
|
| 601 |
+
Offer: "Refused."
|
| 602 |
+
|
| 603 |
+
Message: "12,500... I mean, could we call it even $13,000? Offer:"13000"
|
| 604 |
+
|
| 605 |
+
Message: "You said you would be willing to pay 12k ?" Offer: "Rephrasing."
|
| 606 |
+
|
| 607 |
+
# EXTRACTION
|
| 608 |
+
|
| 609 |
+
Message: {} Offer:
|
| 610 |
+
|
| 611 |
+
Table 14: Prompt used to extract prices from user utterances. These prices are used to identify all of the price-based negotiation errors.
|
| 612 |
+
|
| 613 |
+
# Rationale Error Prompt
|
| 614 |
+
|
| 615 |
+
# Instructional Prompt:
|
| 616 |
+
|
| 617 |
+
# INSTRUCTION
|
| 618 |
+
|
| 619 |
+
We are here in the context of a negotiation. Your task is to detect if the buyer gives sufficient rationale/argument along with their offer or not in the passage.
|
| 620 |
+
|
| 621 |
+
We think of rationales as a piece of argumentation that can justify a mentioned price. Rationales should be related to the item purchased (clearly mentioning some specific features or price ranges etc...). Nevertheless, we can think of exceptions such as "You're a friend so I can maybe push it a bit to..."
|
| 622 |
+
|
| 623 |
+
# EXAMPLE
|
| 624 |
+
|
| 625 |
+
Passage: "Buyer: Hello I'd like to make an offer
|
| 626 |
+
|
| 627 |
+
Seller: Great what were you thinking?
|
| 628 |
+
|
| 629 |
+
Buyer: I don't know something like 10k ?"
|
| 630 |
+
|
| 631 |
+
Rationale :False
|
| 632 |
+
|
| 633 |
+
Passage: "Buyer: Hello, this car is in great shape for its mileage, I was looking for a similar car on the internet. I like it and my kids would have a great time in it. Can I make an offer?
|
| 634 |
+
|
| 635 |
+
Seller: Sure how much?
|
| 636 |
+
|
| 637 |
+
Buyer: Something around 10k ?"
|
| 638 |
+
|
| 639 |
+
Rationale : True
|
| 640 |
+
|
| 641 |
+
Passage: "Buyer: Yeah I guess i can do 12,500. It seems reasonable.
|
| 642 |
+
|
| 643 |
+
Seller: Can you push it more to 13,5?
|
| 644 |
+
|
| 645 |
+
Buyer: No sorry, 12,5 nothing more."
|
| 646 |
+
|
| 647 |
+
Rationale: False
|
| 648 |
+
|
| 649 |
+
Task
|
| 650 |
+
|
| 651 |
+
Passage: {}
|
| 652 |
+
|
| 653 |
+
Rationale : Answer here
|
| 654 |
+
|
| 655 |
+
Table 15: Prompt used to identify errors related to including rationale.
|
| 656 |
+
|
| 657 |
+
# Direct Feedback Prompts
|
| 658 |
+
|
| 659 |
+
# Rationale Feedback Prompt:
|
| 660 |
+
|
| 661 |
+
INSTRUCTION
|
| 662 |
+
|
| 663 |
+
We are here in the context of a negotiation. You are an assistant aimed help a buyer in a negotiation and give them comments on their offers. In this passage: {}
|
| 664 |
+
|
| 665 |
+
The buyer did not give enough arguments to justify their offer.
|
| 666 |
+
|
| 667 |
+
Give the buyer a quick explanation. Try to quote some words the buyer said.
|
| 668 |
+
|
| 669 |
+
# EXAMPLE OF EXPLANATION:
|
| 670 |
+
|
| 671 |
+
"When you present a revised offer, it's persuasive to give some explanation for the move. Why are you offering more? Why are you resisting offering everything they ask for? The explanations you provide may be subjective, such as your eagerness to reach a deal or your pressing budget constraints, but some words of explanation like this help the seller understand and accept your perspective."
|
| 672 |
+
|
| 673 |
+
# Counteroffer Feedback Prompt:
|
| 674 |
+
|
| 675 |
+
You are an assistant aimed to reedit text to help a buyer in a negotiation and provide them feedback on their offer.
|
| 676 |
+
|
| 677 |
+
Here is the conversation :
|
| 678 |
+
|
| 679 |
+
1
|
| 680 |
+
|
| 681 |
+
Give them an explanation.
|
| 682 |
+
|
| 683 |
+
Example of good explanation:
|
| 684 |
+
|
| 685 |
+
"Considering the seller's offer of ${}{}$ and your target price of ${}{}$ , a strong first offer would ideally be below ${}{}$ . This approach helps to keep your target price near the midpoint of the range under discussion."
|
| 686 |
+
|
| 687 |
+
Table 16: Prompt used to give direct feedback to users.
|
| 688 |
+
|
| 689 |
+
# Re-edit Message Prompt
|
| 690 |
+
|
| 691 |
+
# Instructional Prompt:
|
| 692 |
+
|
| 693 |
+
We are in the context of a negotiation. Different teachers gave comments to the buyer:
|
| 694 |
+
|
| 695 |
+
Your task is to propose an alternative message the buyer could have sent that would match all the comments given by teachers.
|
| 696 |
+
|
| 697 |
+
For example if a comment is saying that the buyer should open the conversation with an ice breaker, then propose an icebreaker. If a comment is saying that they should add rationales to their offers, then rewrite the offer and add a few rationales to it. You have to put yourself in the buyer's position. Assume that you are talking to the seller.
|
| 698 |
+
|
| 699 |
+
# EXAMPLE1:
|
| 700 |
+
|
| 701 |
+
# -MESSAGE:
|
| 702 |
+
|
| 703 |
+
"Seems a little steep, steep for me. You know, I can do something in the, you know, $12,000 range would really be, you know, near the top of the end of my budget. Do you have any flexibility there? You know, anything we can do to, you know, work on that price?"
|
| 704 |
+
|
| 705 |
+
- COMMENTS: "comment 1: "Negotiation research finds a benefit to speaking your opening offer first. It can "anchor" the other person's judgment of the price range, setting the stage for a more favorable outcome."
|
| 706 |
+
|
| 707 |
+
comment 2: "Considering your target price of $10000, a strong first offer would ideally be below$ 9000. This approach helps to keep your target price near the midpoint of the range under discussion."
|
| 708 |
+
|
| 709 |
+
- ANSWER: "The price seems a little steep for me. I can work with something in the $9,000 range, which is near the top end of my budget. I want to ensure that we can reach a mutually beneficial agreement. Is there any flexibility on the price from your end?"
|
| 710 |
+
|
| 711 |
+
# EXAMPLE2:
|
| 712 |
+
|
| 713 |
+
# -MESSAGE:
|
| 714 |
+
|
| 715 |
+
"Hi, I'm looking for probably a Honda Accord with reasonable mileage around maybe $15000. Do you have anything like that?"
|
| 716 |
+
|
| 717 |
+
# -COMMENTS:
|
| 718 |
+
|
| 719 |
+
"comment 1: "Begin your negotiation conversation with some brief social conversation before delving into the economic issues. Show esteem for the other person (your counterpart) by praising what they are selling or asking about their day. "Breaking the ice" in some way through initial personal conversation creates rapport, which tends to increase openness and cooperativeness.
|
| 720 |
+
|
| 721 |
+
comment 2: "Negotiation research finds that opening offers are most effective when accompanied by a rationale in terms of some objective reference point, such as an expert's valuation of the object under negotiation or market value indicated by past sales prices."
|
| 722 |
+
|
| 723 |
+
-ANSWER: "Hey! It has been a long time are you doing?"
|
| 724 |
+
|
| 725 |
+
# YOUR TURN TO DO IT NOW
|
| 726 |
+
|
| 727 |
+
# -MESSAGE:
|
| 728 |
+
|
| 729 |
+
1
|
| 730 |
+
|
| 731 |
+
- COMMENTS:
|
| 732 |
+
|
| 733 |
+
{}
|
| 734 |
+
|
| 735 |
+
- ANSWER:
|
| 736 |
+
|
| 737 |
+
Table 17: Prompt used to revise user messages.
|
| 738 |
+
|
| 739 |
+
# Relevant Target Price Prompts
|
| 740 |
+
|
| 741 |
+
# Low Target Feedback Prompt:
|
| 742 |
+
|
| 743 |
+
You are an assistant aimed to give advice to help a buyer in a negotiation. You are addressing directly to the buyer, use the second person (You).
|
| 744 |
+
|
| 745 |
+
The buyer made an error setting their target price for the negotiation. The buyer set their target price to ${}_{\mathrm{t}}\left\{ \} \right\}$ . However a good target price should be above the minimum market value for the car which is $\left\{ \begin{array}{l} 0 \\ 1 \end{array}\right\}$ .
|
| 746 |
+
|
| 747 |
+
Give the buyer feedback explaining their error including details about what would be a good target price.
|
| 748 |
+
|
| 749 |
+
Here is an example of good feedback:
|
| 750 |
+
|
| 751 |
+
This overly ambitious target is below the market range for the car. It may cause offense. By overreaching, you may miss out on good deal.
|
| 752 |
+
|
| 753 |
+
# High Target Feedback Prompt:
|
| 754 |
+
|
| 755 |
+
You are an assistant aimed to give advice to help a buyer in a negotiation. You are addressing directly to the buyer, use the second person (You).
|
| 756 |
+
|
| 757 |
+
The buyer made an error setting their target price for the negotiation. The buyer set their target price to ${}_{\mathrm{t}}\left\{ \} .\right.$ However a good target price should be below $\{ \}$ and closer to the minimum market range for the car which is $\{ \}$ .
|
| 758 |
+
|
| 759 |
+
Give the buyer feedback explaining their error including details about what would be a good target price.
|
| 760 |
+
|
| 761 |
+
Here is an example of good feedback:
|
| 762 |
+
|
| 763 |
+
Your target price of $\{\}$ is not ambitious enough to test how far this seller can be pushed. You should aspire to a price at the low end of the market range.
|
| 764 |
+
|
| 765 |
+
Table 18: Prompt used to give feedback on how well the user prepared their target price.
|
| 766 |
+
|
| 767 |
+
# Holistic Feedback Prompt
|
| 768 |
+
|
| 769 |
+
# Low Target Feedback Prompt:
|
| 770 |
+
|
| 771 |
+
Given the negotiation transcript: {}
|
| 772 |
+
|
| 773 |
+
Your goal is to build a constructive feedback to a user in order to them reaching a better outcome if they had to go over this negotiation again. You will focus on the linguistics aspect and strategic aspects and dont bother with discussing the prices offered. You are addressing directly to the buyer, use the second person (You). Here are the dimensions your feedback will include:
|
| 774 |
+
|
| 775 |
+
- Formality: A buyer cannot be rude and pushy. Also a good buyer stays polite.
|
| 776 |
+
- Firmness: A buyer cannot be too emotional. Studied have shown that firm and tough levels of communication help reaching better economic outcome than warmth and too friendly.
|
| 777 |
+
- Linguistic level: A buyer should not be apologizing. Buyer do not say the word "greedy" (can be interpreted as a personal attack).
|
| 778 |
+
|
| 779 |
+
As a buyer you should project that you do not need to buy a car/you have a perfectly good alternative. The buyer also should somehow mention that they have a plan B.
|
| 780 |
+
|
| 781 |
+
# Feedback:
|
| 782 |
+
|
| 783 |
+
Table 19: Prompt used to give holistic feedback to the user.
|
| 784 |
+
|
| 785 |
+
# Negotiation Agent Prompt
|
| 786 |
+
|
| 787 |
+
# Instructional Prompt:
|
| 788 |
+
|
| 789 |
+
You are a chatbot designed for negotiation. The discussion has to be fluent and realistic. The Honda has reasonable mileage (50,000 miles), automatic transmission, air conditioning, power steering windows/door locks, and a CD player. It looks great: a dark green without any rust. You need to sell the car for a price above ${}. You will not sell the car for below that amount. You are selling the car as a private individual not a dealer. Try not to be redundant in your arguments and talks (do not repeat what you already said in previous turns). If you give the buyer a counteroffer make sure any new offers are lower than the price you gave previously. Make sure you negotiate hard and never offer a price lower than what the buyer gives you. Do not mention that you need to sell the car for over ${}. If the buyer offers a price below $8,000 respond with "That's a very unrealistic price. Please start with an offer that aligns with the market range for this kind of car. Otherwise I can't take time to talk with you about this car."
|
| 790 |
+
|
| 791 |
+
Table 20: The instructional prompt for our negotiation chatbot agent.
|
aceallmbasednegotiationcoachingsystem/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1517830d4ffba10deca609d115d37cc8253450a72aeb5c9feb8bbb35dfd307d8
|
| 3 |
+
size 714066
|
aceallmbasednegotiationcoachingsystem/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c388f96afaaf0d5645d9522abd9b5629d9330b2ebb88b0e9c40a0ba562d1c13b
|
| 3 |
+
size 703075
|
acloserlookatmultidimensionalonlinepoliticalincivility/8e194152-6624-404c-bf7d-b3df6189c956_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df430ebe299c0685a938e54b068e0b85443a35f8208d5fbeb0d47fed79a28e85
|
| 3 |
+
size 110316
|
acloserlookatmultidimensionalonlinepoliticalincivility/8e194152-6624-404c-bf7d-b3df6189c956_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f89190ffef8d497b25921b53ca6a8315529e3306fde8900db45c9e2dc4601ec9
|
| 3 |
+
size 133176
|
acloserlookatmultidimensionalonlinepoliticalincivility/8e194152-6624-404c-bf7d-b3df6189c956_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2a00b7c436a455502fcd35a5e42c5c06c98e596e552d464e15d8680d73fedd67
|
| 3 |
+
size 896511
|
acloserlookatmultidimensionalonlinepoliticalincivility/full.md
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Closer Look at Multidimensional Online Political Incivility
|
| 2 |
+
|
| 3 |
+
Sagi Pendzel
|
| 4 |
+
|
| 5 |
+
CS Dep., Univ. of Haifa
|
| 6 |
+
|
| 7 |
+
spendzel@campus.haifa.ac.il
|
| 8 |
+
|
| 9 |
+
Nir Lotan
|
| 10 |
+
|
| 11 |
+
IS Dep., Univ. of Haifa
|
| 12 |
+
|
| 13 |
+
nlotan02@campus.haifa.ac.il
|
| 14 |
+
|
| 15 |
+
Alon Zoizner
|
| 16 |
+
|
| 17 |
+
Dep. of Communication, Univ. of Haifa
|
| 18 |
+
|
| 19 |
+
zoizner@com.haifa.ac.il
|
| 20 |
+
|
| 21 |
+
Einat Minkov
|
| 22 |
+
|
| 23 |
+
IS Dep., Univ. of Haifa
|
| 24 |
+
|
| 25 |
+
einaTM@is.haifa.ac.il
|
| 26 |
+
|
| 27 |
+
# Abstract
|
| 28 |
+
|
| 29 |
+
Toxic online political discourse has become prevalent, where scholars debate about its impact on democratic processes. This work presents a large-scale study of political incivility on Twitter. In line with theories of political communication, we differentiate between harsh impolite style and intolerant substance. We present a dataset of 13K political tweets in the U.S. context, which we collected and labeled by this multidimensional distinction using crowd sourcing. The evaluation of state-of-the-art classifiers illustrates the challenges involved in political incivility detection, which often requires high-level semantic and social understanding. Nevertheless, performing incivility detection at scale, we are able to characterise its distribution across individual users and geopolitical regions. Our findings align with and extend existing theories of political communication. In particular, we find that roughly $80\%$ of the uncivil tweets are authored by $20\%$ of the users, where users who are politically engaged are more inclined to use uncivil language. We further find that political incivility exhibits network homophily, and that incivility is more prominent in highly competitive geopolitical regions. Our results apply to both uncivil style and substance. Warning: The paper contains examples that readers might find offensive.
|
| 30 |
+
|
| 31 |
+
# 1 Introduction
|
| 32 |
+
|
| 33 |
+
An active discourse between political groups and citizens is considered a fundamental condition for a healthy democracy (Gutmann and Thompson, 2009). The recent rise of social media has been argued however to intensify disrespectful and hostile online political discourse (Coe et al., 2014; Frimer et al., 2023). The term political incivility is commonly used in the political communication literature that studies the prominence, determinants and consequences of using harsh style and discriminatory discourse in online platforms. According
|
| 34 |
+
|
| 35 |
+
to researchers, negative consequences of this trend are that it fosters polarization between rival political groups, and may disengage some citizens from being politically involved (Muddiman et al., 2020; Skytte, 2021; Van't Riet and Van Stekelenburg, 2022). Conversely, others argue that incivility is not inherently negative, considering certain flavors of incivility as a legitimate rhetoric in heated political discussions (Rossini, 2022). Aiming to study the consequences and contextual factors that underlie this general phenomenon, several previous research works have attempted to empirically detect, quantify and characterise political incivility on discussion groups and social media platforms (ElSherief et al., 2018; Davidson et al., 2020; Theocharis et al., 2020; Bianchi et al., 2022; Frimer et al., 2023). In this research, we take a closer look at the challenges involved in the automatic detection of political incivility online, considering it as a multidimensional concept. We then present the results of a large-scale study, where we examine incivility as detected within a very large sample of political tweets posted on the social media platform of Twitter. In particular, we gauge and quantify user-level and geopolitical factors that correlate with political incivility online.
|
| 36 |
+
|
| 37 |
+
We operationalize political incivility as a two-dimensional concept in accordance with recent theories of political communication (Muddiman, 2017; Rossini, 2022). The first dimension is personal-level incivility (impoliteness). This flavor of incivility pertains to a harsh tone that violates interpersonal norms, including foul language, name-calling, vulgarity, and aspersion towards other discussion partners or their ideas (e.g., "are you really so stupid that you would defund this program?"). The second dimension of public-level incivility (intolerance) rather pertains to exclusionary speech,
|
| 38 |
+
|
| 39 |
+
IMPOLITE: "All hell has broken loose under the leadership of the senile old man. I don't believe a damn word from this dumb son of a bitches"; "That's what they are protesting, you rank imbecile. People like you need a damn good kicking."
|
| 40 |
+
INTOLERANT: "Hillary and the dems ARE enemies, foreign AND domestic"; "If you agree with democrats in congress, you are an anti-American commie"
|
| 41 |
+
NEUTRAL: "How long do Republicans believe you can keep pushing this line? You never intended to secure the border"; "There are 400,000,000 guns in the United States, you're going to have to stop the criminals not the guns"
|
| 42 |
+
|
| 43 |
+
Table 1: Example tweets per class. These examples were presented to the annotators as part of their training.
|
| 44 |
+
|
| 45 |
+
silencing or denying the rights of a social or political group (e.g., “Democrats are openly trying to see to the destruction of America”). That is, interpersonal incivility refers to tone, whereas intolerance is defined in terms of substance. Table 1 includes example tweets of each category. As illustrated, the impolite examples are characterized by a harsh tone, vulgar language and profanity, which may be directed at the user participating in the specific Twitter discussion (second example). However, the impolite tweets do not call for silencing an entire community or group, or denying their rights. In contrast, the example tweets of the intolerant category explicitly accuse an entire political group (in this case, Democrats) for being an enemy of the country. Regarding the neutral category, while the first example in the table criticizes Republicans, it does not call for limiting their rights or accuses their entire group of treason—thus, the tweet is not considered intolerant. Table 5 includes additional labeled examples, including a tweet that is both intolerant—as it denounces the elected U.S president along with his voters, as well as impolite-denoting its use of vulgar language. While we follow this distinction in the paper, we acknowledge that both types of incivility may be offensive. A more detailed discussion concerning the terminology of these concepts is included in Section 2.
|
| 46 |
+
|
| 47 |
+
There are several motivations for identifying political incivility at this multidimensional resolution. In general, scholars of political communication have shown that the exposure to either impolite style or intolerant content online leads to increased polarization and intergroup tensions (Muddiman et al., 2020; Skytte, 2021). Yet, recent studies argue that heated political talk should not be dismissed due to interpersonal incivility, whereas expressions of intolerance on digital platforms have a more detrimental effect on democratic processes (Pa
|
| 48 |
+
|
| 49 |
+
pacharissi, 2004; Rossini, 2022). It is therefore desired to distinguish between the different dimensions of political incivility in studying this phenomenon. In this work, we further show that interpersonal incivility and intolerance differ in their language characteristics. While impolite speech often contains unequivocally negative lexical expressions, the interpretation of intolerance is generally a more challenging task, in that it requires contextual, political and social, understanding.
|
| 50 |
+
|
| 51 |
+
A main contribution of our work is the construction of a large dataset of 13K political tweets. We carefully retrieved and sampled these tweets using diverse strategies, aiming to capture both incivility types, while avoiding lexical and topical biases (Wiegand et al., 2019). The dataset was labeled by multidimensional incivility via crowd sourcing, having the annotation process supervised by a domain expert. Using our dataset, we adapt and evaluate a variety of state-of-the-art language models on the task of multi-label incivility detection. Our results indicate that political incivility detection is a challenging task, where we obtain best F1 scores of 0.70 and 0.59 on impoliteness and intolerance detection, respectively.
|
| 52 |
+
|
| 53 |
+
In the second part of this work, we report the results of a large scale study, in which we performed multidimensional incivility detection and examined the prevalence of incivility among the political posts by more than 200K users. We find that both types of political incivility are prevalent on social media, identifying $17.6\%$ of the political tweets as impolite, $13.3\%$ as intolerant and $2.5\%$ as both, with an overall political incivility rate of $28.4\%$ . A user-level analysis shows that a minority of the users, who are politically engaged (as measured by the proportion of their tweets that concern political topics), are more inclined to use uncivil language, generating the majority of the uncivil tweets. Our analysis further establishes that social patterns of political incivility involve network homophilly. Considering the large scope of our study, we were also able to assess differences in the prevalence of incivility across geopolitical regions, specifically, states. We find that state-level incivility on social media is significantly correlated with partisan competition per state, observing higher incivility levels in 'battleground states', where the two camps are on par. We interpret our findings in light of existing theories of political communication, and discuss the challenges and potential of political incivility detection for future research.
|
| 54 |
+
|
| 55 |
+
# 2 Related work
|
| 56 |
+
|
| 57 |
+
As noted in a recent survey, the concepts of uncivil, offensive, and toxic speech often overlap, where incivility is most frequently used by social scientists (Pachinger et al., 2023). In the political communication literature, some researchers frame incivility in terms of impolite speech (Theocharis et al., 2016; Seely, 2018), whereas others define it as either impoliteness, intolerance or hate speech (Davidson et al., 2020; Theocharis et al., 2020). Accordingly, most relevant empirical studies address incivility detection as a binary classification problem, differentiating between neutral and uncivil discourse (Davidson et al., 2020; Theocharis et al., 2020; Rheault et al., 2019). Following recent theories of political communication (Rossini, 2022), we consider political incivility as a multidimensional concept, defining uncivil language as either impolite or intolerant, or both. In a closely related work, Bianchi et al. (2022) introduced a dataset of tweets annotated with fine grained labels, distinguishing between our high-level categories of rude or offensive tone (profanities, insults, outrage, or character assassination) and intolerant expressions (discrimination, hostility). Overall, they report F1 performance of roughly 0.7 on all categories. While offering valuable insights into multidimensional incivility detection, their dataset is focused on the topic of immigration, which receives limited attention in online political discourse (Barbera et al., 2019; Wojcieszak et al., 2022). Crucially, we refrained from sampling tweets based on topical keywords, while targeting political tweets by U.S. residents. Consequently, our dataset captures incivility mainly in the U.S. partisan context, which is prevalent in Twitter, across various topics (only $1.8\%$ of the sampled tweets mention immigration). Aiming at lexical as well as topical diversity, we also minimized the use of pre-trained tools as means for sampling texts that were likely to be toxic. Possibly for these reasons, we observe substantially lower performance on intolerance detection in comparison to Bianchi et al. (F1 of $\sim 0.6$ vs. $\sim 0.7$ ).<sup>2</sup> Our analysis indicates that in the lack of clear lexical cues, contextual social understanding is required in order to improve on the task of intolerance detection. In this respect, our work relates to a recent line of
|
| 58 |
+
|
| 59 |
+
works than concern the detection of implicit hate speech, where the underlying toxic intention is encoded using indirect semantics rather than by foul language (ElSherief et al., 2021; Hartvigsen et al., 2022). Finally, this work makes the contribution of applying multidimensional political incivility detection at large-scale, studying its prevalence while considering various contextual factors, including user-level characteristics and geopolitical conditions.
|
| 60 |
+
|
| 61 |
+
# 3 MUPID: a Multidimensional Political Incivility Dataset
|
| 62 |
+
|
| 63 |
+
# 3.1 Data sampling strategy
|
| 64 |
+
|
| 65 |
+
Even though political incivility is not rare, it is desired to focus the costly annotation effort on a high yield sample. We exploit multiple network-based and other cues to obtain a diverse and representative sample of the target classes, while avoiding topical and lexical biases (Wiegand et al., 2019).
|
| 66 |
+
|
| 67 |
+
As a first step, we collected tweets posted by users who follow multiple disputable political accounts, assuming that such users are more inclined to use uncivil language in political contexts (Gervais, 2014). Concretely, we referred to lists of accounts that are known to distribute fake news (Grinberg et al., 2019), news accounts that are considered politically biased to a large extent (Wojcieszak et al., 2023), and the accounts of members of the U.S. Congress who are considered as ideologically extreme (Lewis et al., 2019). We selected the top accounts per category, balanced over conservative and liberal orientation, based on bias scores specified by those sources. We then identified users who followed two or more biased accounts, maintaining a balance between users of conservative and liberal orientation, and retrieved the (200) latest tweets posted by them as of December 2021. This yielded 885K tweets authored by 15.8K users.
|
| 68 |
+
|
| 69 |
+
Identifying political tweets. We trained a dedicated classifier to identify tweets that discuss political topics, exploiting existing resources for this purpose. Specifically, we sampled 12.5K tweets concerning topics that are discussed frequently by either Republicans (e.g., the U.S. federal budget), Democrats (e.g., marriage equality), or both (e.g., the presidential campaign) (Barbera et al., 2015).
|
| 70 |
+
|
| 71 |
+
Additional 3.5K political posts were extracted from the social media accounts of U.S. politicians.<sup>5</sup> As counter examples, we considered random tweets by U.S. users,<sup>6</sup> constructing a balanced dataset of 32K examples overall. We finetuned a 'bert-base-uncased' model on this dataset using its public implementation and standard training practices, minimizing the cross-entropy loss function. In applying the finetuned classifier, we set a high threshold (0.96) over its confidence scores, aiming to achieve high precision. Overall, 82K (9.3%) of our sampled tweets were predicted to be political. The manual examination of 300 random tweets by a graduate student of political communication indicated on classification precision of 0.91.
|
| 72 |
+
|
| 73 |
+
Sampling tweets for annotation. In order to focus the annotation effort on tweets that demonstrate incivility, we applied several additional sampling heuristics. Following insights by which hateful user accounts tend to be new and more active than average (Ribeiro et al., 2018), we sampled 2K tweets by accounts which were created up to two months prior to the tweet retrieval date, or posted more than one tweet daily on average since their creation date. Similar to previous works (Theocharis et al., 2020; Hede et al., 2021; Bianchi et al., 2022), we utilized the pretrained Jigsaw Perspective tool<sup>7</sup> to identify toxic tweets, sampling another 2K tweets that received high scores on the categories of 'abusive language and slurs', 'inflammatory comments' and 'attacks on the author'. Finally, we sampled 4K tweets uniformly at random. Throughout the annotation process, we tracked the yield of tweets of each class. Among the 8K selected tweets, 2.3K $(28.9\%)$ were labeled as impolite, and 0.8K $(9.8\%)$ as intolerant. Applying an active labeling paradigm (Tong and Koller, 2001), we trained a classifier of intolerance detection using the examples labeled thus far to identify additional tweets that were likely to be intolerant within our large sampled pool of political tweets. In several consequent annotation and learning batches, we selected 5.2K additional tweets for manual annotation in this fashion. The ratio of impulsiveness remained similar to the original sample $(22.5\%)$ , yet the ratio of intolerant tweets has tripled $(29.5\%)$ . Next, we describe the annotation
|
| 74 |
+
|
| 75 |
+
procedure of the sampled examples. We note that in the resulting dataset, for each example, we maintain its sampling method, where we exclude all of the examples obtained via active sampling from the test set in order to avoid evaluation bias.
|
| 76 |
+
|
| 77 |
+
# 3.2 Annotation procedure
|
| 78 |
+
|
| 79 |
+
The task of assessing multidimensional political incivility involves fine semantics and critical thinking. Since labeling examples by experts is costly and limited in capacity, we turned to crowd sourcing, using the platform of Amazon Mechanical Turk. $^{8}$ In order to elicit labels of high-quality, we required the workers to be highly qualified, $^{9}$ as well as residents of the U.S. who are presumably fluent in English and familiar with U.S. politics. Candidate workers were required to undergo dedicated training and quality testing. Table 1 includes examples which were presented to the workers of each class. These examples were accompanied by a code book containing explanations regarding the guidelines for annotating the tweets (Appendix A). In the qualification phase, the workers labeled six other tweets. Whoever labeled a majority of the tweets correctly got qualified to work on our task, as well as received detailed feedback on their mistakes. During annotation, we included control questions (2 out of 15 tweets in each micro-task) which we expected the workers to do well on. We rejected the annotations by workers who failed to label the control tweets, and banned them from further working on our task. Finally, we paid the workers an hourly fee of 17.5 USD, which exceeds the U.S. minimum wage standards, as fair pay positively affects annotation quality (Ye et al., 2017). Overall, our final cohort included 125 workers who annotated up to 2,000 tweets per week over a period of 3 months.
|
| 80 |
+
|
| 81 |
+
Given each tweet, several independent workers were asked to assess whether it was impolite, intolerant, neither, or both. Each tweet was labeled by 3-5 annotators, where we discarded examples for which a label could not be determined based on majority voting.[10] While we take a prescriptive approach, we acknowledge that human judgement on this task may be subjective, being affected by one's cultural background, beliefs, and political stance (Rottger et al., 2022). An assessment of inter-annotator agreement gives an indication for
|
| 82 |
+
|
| 83 |
+
<table><tr><td>Dataset</td><td>Size</td><td>Uncivil</td><td>Impol./Intol./Both</td></tr><tr><td>MUPID</td><td>13.1K</td><td>42.3%</td><td>24.6 / 15.1 / 2.6%</td></tr><tr><td>Davidson et al.</td><td>5.0K</td><td>10.3%</td><td>-</td></tr><tr><td>Rheault et al.</td><td>10.0K</td><td>12.4%</td><td>-</td></tr><tr><td>Theocharis et al.</td><td>4.0K</td><td>26.0%</td><td>-</td></tr></table>
|
| 84 |
+
|
| 85 |
+
Table 2: Dataset statistics: MUPID vs. other datasets.
|
| 86 |
+
|
| 87 |
+
the semantic complexity and subjectivity of the target concepts. Comparing the labels assigned to every tweet by random worker pairs resulted in Fleiss' kappa scores of 0.63 and 0.54 on the categories of impoliteness and intolerance, indicating on 'substantial' and 'moderate' agreement, respectively. This suggests that intolerance may be more subjective and subtle compared to impoliteness. We further compared the majority labels against the judgement of a scholar of political communication, assigned to 300 random labeled tweets. Fleiss' kappa scores in this case indicated on 'substantial' agreement, measuring 0.57 and 0.61 on impoliteness and intolerance, respectively. For a subset of this sample, for which the workers tended to agree on (majority of $70\%$ or more), the agreement scores between the crowed sourced labels and the expert were substantially higher on the impoliteness compared to the intolerance category, measuring 0.79 vs. 0.69, respectively. Again, this suggests that the concept of political intolerance is more semantically subtle.
|
| 88 |
+
|
| 89 |
+
# 3.3 Dataset statistics
|
| 90 |
+
|
| 91 |
+
The resulting dataset includes 13.1K labeled tweets. As detailed in Table 2, the dataset includes a substantial number of tweets labeled as impolite (3.6K), and intolerant (2.3K), where a large proportion of the examples in the dataset (42.3%) correspond to political incivility (with 2.6% of the examples labeled as both intolerant and impolite). As noted in the table, other available datasets of political incivility use binary annotations, and include a lower proportion of examples of incivility.
|
| 92 |
+
|
| 93 |
+
# 4 Multidimensional incivility detection
|
| 94 |
+
|
| 95 |
+
Next, we evaluate the extent to which neural models can detect political incivility as perceived by humans. We perform multi-label classification, detecting impulsiveness and intolerance as orthogonal dimensions, as well as experiment with binary prediction of political incivility.
|
| 96 |
+
|
| 97 |
+
# 4.1 Experimental setup
|
| 98 |
+
|
| 99 |
+
We finetuned several popular transformer-based pre-trained language models, including BERT (Devlin et al., 2019), RoBERTa (Liu et al., 2019) and DeBERTa (He et al., 2021) using our dataset. We report our results using the base configurations of these models, as the larger architectures yielded minor performance gains. In addition, we experiment with task-specialized variants of BERT: HateBERT, a model that has been re-trained using a large-scale corpus of offensive, abusive, and hateful Reddit comments (Caselli et al., 2021); and HateXplain, a model that has been finetuned to classify hateful and offensive Twitter and Gab posts (Mathew et al., 2021). All models were applied using their public implementation. $^{11}$ In finetuning, we split our dataset into fixed stratified train (70%), validation (10%) and test (20%) sets, optimizing the parameters of each model on the validation examples. Considering the class imbalance, we found it beneficial to employ a class-weighted cross-entropy loss function (Henning et al., 2023).
|
| 100 |
+
|
| 101 |
+
# 4.2 Classification results
|
| 102 |
+
|
| 103 |
+
Table 3 reports our test results in terms of ROC AUC, precision, recall and F1 with respect to each class. The table includes also the results of binary classification, considering incivility as a unified concept. As shown, binary classification yields best F1 performance of 0.75. In comparison, the best F1 results obtained for impoliteness and intolerance prediction are 0.70 and 0.59, respectively.
|
| 104 |
+
|
| 105 |
+
As baseline, we report the performance of the pre-trained Jigsaw Perspective tool, scoring the test examples by their toxicity. The Perspective model has been trained to predict toxicity as "a rude, disrespectful, or unreasonable comment that is likely to make you leave a discussion". Following related works, we marked as toxic the examples that received a toxicity score of 0.5 or more by the model (Gehman et al., 2020). As detailed in Table 3, this method yields high precision (0.78) yet low recall (0.43) in identifying impolite speech. Possibly, the low recall indicates on a domain adaptation issue. Toxicity is a poor predictor of intolerance however, yielding very low precision and recall scores of 0.20 and 0.18 on this category, respectively. This indicates that the intolerant examples in our dataset are not typically conveyed using general toxic language.
|
| 106 |
+
|
| 107 |
+
<table><tr><td></td><td colspan="4">Inter-personal (impolite style)</td><td colspan="4">Public-level (intolerant substance)</td><td colspan="5">Any incivility (binary)</td></tr><tr><td>Classifier</td><td>AUC</td><td>P</td><td>R</td><td>F1</td><td>AUC</td><td>P</td><td>R</td><td>F1</td><td>AUC</td><td>P</td><td>R</td><td>F1</td><td>Mac.F1</td></tr><tr><td>Perspective</td><td>0.841</td><td>0.781</td><td>0.432</td><td>0.556</td><td>0.674</td><td>0.200</td><td>0.180</td><td>0.189</td><td>0.850</td><td>0.897</td><td>0.329</td><td>0.481</td><td>0.636</td></tr><tr><td>BERT</td><td>0.857</td><td>0.635</td><td>0.713</td><td>0.671</td><td>0.848</td><td>0.530</td><td>0.644</td><td>0.581</td><td>0.849</td><td>0.752</td><td>0.692</td><td>0.721</td><td>0.766</td></tr><tr><td>RoBERTa</td><td>0.874</td><td>0.642</td><td>0.744</td><td>0.689</td><td>0.859</td><td>0.501</td><td>0.728</td><td>0.593</td><td>0.864</td><td>0.765</td><td>0.707</td><td>0.735</td><td>0.777</td></tr><tr><td>DeBERTa</td><td>0.861</td><td>0.687</td><td>0.707</td><td>0.697</td><td>0.845</td><td>0.558</td><td>0.626</td><td>0.590</td><td>0.865</td><td>0.754</td><td>0.739</td><td>0.746</td><td>0.782</td></tr><tr><td>HateBert</td><td>0.865</td><td>0.701</td><td>0.661</td><td>0.680</td><td>0.835</td><td>0.515</td><td>0.639</td><td>0.571</td><td>0.857</td><td>0.755</td><td>0.719</td><td>0.737</td><td>0.777</td></tr><tr><td>HateXplain</td><td>0.820</td><td>0.567</td><td>0.688</td><td>0.622</td><td>0.756</td><td>0.374</td><td>0.537</td><td>0.441</td><td>0.811</td><td>0.773</td><td>0.532</td><td>0.630</td><td>0.713</td></tr><tr><td>GPT-3.5</td><td>0.827</td><td>0.421</td><td>0.913</td><td>0.576</td><td>0.765</td><td>0.379</td><td>0.519</td><td>0.438</td><td>0.838</td><td>0.652</td><td>0.835</td><td>0.732</td><td>0.742</td></tr><tr><td>GPT-4</td><td>-</td><td>0.666</td><td>0.659</td><td>0.663</td><td>-</td><td>0.562</td><td>0.416</td><td>0.478</td><td>-</td><td>0.807</td><td>0.638</td><td>0.712</td><td>0.769</td></tr></table>
|
| 108 |
+
|
| 109 |
+
Table 3: Multi-label and binary prediction results.
|
| 110 |
+
|
| 111 |
+
Considering that Generative Pre-trained Transformer (GPT) models have been applied to related tasks such as hate speech detection (Wullach et al., 2021a; Del Arco et al., 2023), we further attempted few-shot incivility prediction using GPT-3.5 and GPT-4. $^{12}$ In this case, for each target category, we prompted the model with a definition of the task and category, and with (3) labeled examples that were also presented to the human workers (see Appendix A). As shown in Table 3, this approach fell short of the finetuned models. (Unlike GPT-3.5, GPT-4 no longer provides token probability information in its API. For this reason, we do not report AUC figures for GPT-4.) It is possible that further improvements in the performance of these models can be achieved via prompt engineering, additional examples or finetuning (Gül et al., 2024), however this is out of the scope of our work. Nevertheless, we observe similar trends using the GPT and the other models, showing a substantial gap in performance in favor of the impoliteness category. Concretely, we observe that GPT-4 yields F1 of 0.66 vs. 0.48 on the tasks of impulsiveness and intolerance detection, respectively. The finetuned DeBERTa and RoBERTa achieve the best overall performance. Taking into account both performance and cost considerations, RoBERTa is our classifier of choice. This model yields F1 results of 0.69 and 0.59 on the impolite and intolerant classes, respectively.
|
| 112 |
+
|
| 113 |
+
Impoliteness vs. intolerance. We applied Shapley analysis (Lundberg and Lee, 2017)<sup>13</sup> to our training set to identify unigrams that are predictive of impoliteness or intolerance. Table 4 lists words that characterise each class. As expected, impolite style is characterised by derogatory words. Most of the listed words carry negative meaning in an unequivocal way, being offensive in any context, e.g., 'stupid'. In contrast, the intolerant tweets concern political affiliations, e.g., 'republicans', 'right', or
|
| 114 |
+
|
| 115 |
+
Impolite: fuck, help, stupid, damn, obnoxious, fed, joke, ass, goddamn, shit, coward, crap, unreal, love, neoliberal, king, mentality, anarchist, fuel, publishing, bad, wow, back, bastard, communists, forgive, idiot, dumb, change, worst, terrible, broke, asshole, humiliating
|
| 116 |
+
|
| 117 |
+
Intolerant: republican(s), democrat(s), leftists, GOP, democratic, catholics, speech, liberal, dem(s), socialist(s), conservatives, liberals, progressive(s), left, communist(s), party, right, racist, fascists, terrorists, nationalist(s), constituents, marxist, whites, radical, destroyed, americans
|
| 118 |
+
|
| 119 |
+
Table 4: Salient unigrams associated with impolite and intolerant speech in our dataset (Shapley analysis).
|
| 120 |
+
|
| 121 |
+
liberals'. Unlike slur words, negative sentiment that such terms may carry is context dependent. In accordance, we found that impolite tweets were less susceptible to get misclassified as neutral compared with intolerant tweets (26.7% vs. 44.0%). Thus, semantic and contextual understanding is needed to detect intolerance more precisely.
|
| 122 |
+
|
| 123 |
+
Error analysis. Table 5 includes examples of misclassified tweets, showing the labels assigned to them by the human workers versus the predicted labels. We indeed observe cases in which the model missed the presence of intolerance due to implied language (examples (c) and (d)), e.g., "you Republicans don't even know how to keep the electricity on!". Likewise, the model was sometimes misled by lexical cues, demonstrating the gap between lexical-level and semantic understanding (Zagoury et al., 2021); for instance, example (b) was misclassified as impolite, possibly because of the idiom 'sick of'. In some other cases, we found seemingly faulty predictions to be sensible, e.g., "impeach Biden and his administration! Or charge them with treason" was justifiably classified as intolerant. Again, this demonstrates the semantic and contextual challenges involved in identifying political intolerance.
|
| 124 |
+
|
| 125 |
+
Cross-dataset evaluation. We assess learning generalization using MUPID against other relevant
|
| 126 |
+
|
| 127 |
+
<table><tr><td>Tweet</td><td>Label</td><td>Prediction</td></tr><tr><td>(a) We need to impeach Biden and his administration! Or charge them with treason.</td><td>Neither</td><td>Intolerant</td></tr><tr><td>(b) Yes I have hope for your country. There are enough people who are sick of this.</td><td>Neither</td><td>Impolite</td></tr><tr><td>(c) Oh anyways the lefties are lying about everything relating to fixing the economy</td><td>Intolerant</td><td>Impolite</td></tr><tr><td>(d) How are you going to protect our Freedom? You Republicans don’t even know how to keep the electricity on!</td><td>Intolerant</td><td>Neither</td></tr><tr><td>(e) FXCK THAT! NEVER GONNA HAPPEN IN AMERICA! Civil War will happen before that happens here! @LINK</td><td>Impolite</td><td>Neither</td></tr><tr><td>(f) When will this nincompoop leave the White House. He got 81 million votes? God help us!!
|
| 128 |
+
#IllegitimatePresident</td><td>Both</td><td>Intolerant</td></tr></table>
|
| 129 |
+
|
| 130 |
+
Table 5: Examples of tweets illustrating discrepancies between human-assigned labels and classifier predictions for impulsiveness and intolerance.
|
| 131 |
+
|
| 132 |
+

|
| 133 |
+
Figure 1: Test F1 results on impoliteness and intolerance detection, varying the number of training examples.
|
| 134 |
+
|
| 135 |
+
datasets of political incivility (Table 2).<sup>14</sup> Concretely, we measured the extent to which performance declines in a cross-dataset setup compared to within-dataset training. We considered fixed random test sets $(20\%)$ , finetuning a RoBERTa classifier in all cases. On average, applying our model to the other datasets resulted in lower precision $(-25.3\%)$ and higher recall $(29\%)$ , reaching similar F1 results $(-3.3\%)$ . We attribute the increased recall to the diversity of MUPID, where precision may be reduced due to data shift or incompatibility of the annotations. Inversely, we finetuned a model using the other datasets (19K examples overall) and applied it to MUPID test set. Compared to our results (Table 3), we observed lower precision $(-11.5\%)$ , recall $(-23.2\%)$ and F1 $(-18\%)$ . The reduction of recall reflects a failure to detect intolerant instances that are under-represented in the other datasets. See detailed results in Appendix B.
|
| 136 |
+
|
| 137 |
+
Impact of train set size. Figure 1 shows test F1 results while finetuning the RoBERTa classifier using increasing stratified subsets of the train set. As shown, impulseness dominates intolerance detection results using as few as 1,000 training examples, again showing the greater semantic complex
|
| 138 |
+
|
| 139 |
+
ity involved in detecting uncivil substance vs. tone. While the improvement rate subsides past $\sim 4\mathrm{K}$ labeled examples, the best results are obtained using the full dataset. We conjecture that similar to hate speech, further improvements may be achieved by extending the dataset, e.g., via methods of synthetic example generation (Wullach et al., 2021b; Hartvigsen et al., 2022).
|
| 140 |
+
|
| 141 |
+
# 5 From tweets to users: a large-scale evaluation
|
| 142 |
+
|
| 143 |
+
Automatic incivility detection may be used to identify and quantify political incivility at scale, addressing research questions of interest. Here, we introduce and examine the following questions: (i) Are certain users more inclined to post uncivil political content online? (ii) Do incivility levels vary by geopolitical region, specifically, across U.S. states? In both cases, we explore contextual factors that correlate with increased political incivility levels with respect to either impoliteness or intolerance.
|
| 144 |
+
|
| 145 |
+
To investigate these questions, we collected a corpus comprised of the tweeting history of a large number of user accounts. Concretely, we randomly sampled users who authored tweets between July-Nov. 2022, whom we verified to be residents of the U.S. based on the location attribute of their profiles. For each user account, we retrieved the most recent (up to 200) tweets posted by them, discarding retweets and non-textual tweets, as well as tweets posted by overly active accounts suspected as bots.[15] This resulted in a corpus of 16.3M tweets authored by 373K users. Our of those, 2.6M tweets by 230K users were classified as political, henceforth, the corpus. Finally, $17.6\%$ of the political tweets were identified as impolite, $13.3\%$ as intolerant, and $2.5\%$ as both categories, accounting for overall incivility ratio of $28.4\%$ . These proportions
|
| 146 |
+
|
| 147 |
+
<table><tr><td>Variable</td><td>% Impolite</td><td>% Intolerant</td></tr><tr><td colspan="3">User-level metrics (N=230K)</td></tr><tr><td># Followers</td><td>-0.109</td><td>-0.038</td></tr><tr><td># Followees</td><td>-0.017</td><td>0.058</td></tr><tr><td>Tweets per day</td><td>0.068</td><td>0.091</td></tr><tr><td>% political tweets</td><td>0.237</td><td>0.498</td></tr><tr><td colspan="3">Incivility among followees (N=1K, F=600k)</td></tr><tr><td>% Impolite</td><td>0.135</td><td>0.236</td></tr><tr><td>% Intolerant</td><td>0.128</td><td>0.371</td></tr></table>
|
| 148 |
+
|
| 149 |
+
Table 6: Spearman's correlations: the ratio of impolite/intolerant tweets vs. user-level metrics and the incivility ratios among the accounts followed. The table denotes the user sample size (N) and number of followees (F). All scores are significant $(p - value < 0.001)$ . Multivariate analysis gave similar results (Appendix C).
|
| 150 |
+
|
| 151 |
+
are similar to figures reported based on manual examination of a non-English political comments on Facebook- $20\%$ impolite and $10.8\%$ intolerant comments (Rossini, 2022). Considering this distribution, we note the importance of detecting incivility both in terms of style and substance for achieving a comprehensive coverage of online hostility.
|
| 152 |
+
|
| 153 |
+
# 5.1 Political incivility at the user level
|
| 154 |
+
|
| 155 |
+
Our results indicate that some users are indeed more inclined to post uncivil content than others. As few as $7.3\%$ of the users authored $50\%$ of the uncivil posts in the corpus, and $20.6\%$ of the users authored $80\%$ of the uncivil posts. On the other hand, $43.7\%$ of the users authored no uncivil post.
|
| 156 |
+
|
| 157 |
+
To explore the characteristics of incivility at user-level, we examined the associations between the share of impolite and intolerant tweets among one's political tweets and other user-level metrics of interest, including network connectivity (number of followers and followees), activity level (average number of tweets per day), and the ratio of political tweets among the tweets posted by them. Table 6 reports our findings in terms of Spearmans's rank correlation scores. As shown, users who post intolerant and impolite political content are active, posting more tweets per day than other users. They also tend to have less followers—possibly, popular users refrain from controversial political language. Interestingly, a study of 'hateful' users similarly showed that they tweet more, follow other users more, but are less followed (Ribeiro et al., 2018). We find strong positive correlation between incivility and the share of political tweets posted by the user (Spearman's correlation scores of 0.24 and 0.50 with respect to impulsiveness and intolerance, respectively). That is, users who discuss political
|
| 158 |
+
|
| 159 |
+
topics more often—an indicator of increased political engagement (Vaccari and Valeriani, 2018), are more likely to use either intolerant or impolite language. This result echoes the suggestion that incivility may become normalized for those who discuss politics online more often (Hmielowski et al., 2014). As we observe similar trends for both types of incivility, our study suggests that public-level incivility, i.e., intolerance, may have also become normalized online among those who practice political talk often. Importantly, since our classifiers mainly focus on hostility between partisan groups and ideological camps (Table 4), our analyses and findings apply to this context.
|
| 160 |
+
|
| 161 |
+
In another analysis, we examine whether user-level incivility is correlated with incivility among the accounts that one follows. To address this question, we considered a random sample 1K users, and obtained the tweets posted by their followees within a 2-month period prior to the user retrieval date. Overall, we processed 8M tweets posted by 0.6M unique followees, quantifying the share of uncivil political tweets by those accounts. As detailed in Table 6 and in Appendix C, strong and significant correlations were found with respect to both types of incivility between users and the accounts that they follow. Thus, we observe a substantial degree of network homophily among users and followees who use political incivility online (see also Mathew et al. (2019)). This result implies that network information may provide meaningful context for political incivility detection, especially in those cases where indirect language is used (Ribeiro et al., 2018; Ghosh et al., 2023).
|
| 162 |
+
|
| 163 |
+
# 5.2 Incivility across geopolitical regions
|
| 164 |
+
|
| 165 |
+
Using our large sample of users, we further quantify and compare political incivility across geopolitical regions, namely, U.S. states. We identified relevant user accounts for this purpose, which specified state information (full state name, or its abbreviation) in the meta-data location field. Overall, 186K users in the corpus met this condition. The largest number of users were affiliated with the states of New York (23K), California (16K) and Texas (14K). The states with the least number of users were North Dakota (265), Wyoming (315), South Dakota (426), and Alaska (579). The median number of tweets per state was 2.2K, providing a sufficient sample size for statistical analysis.
|
| 166 |
+
|
| 167 |
+
For each state, we computed the average user-level proportion of impolite or intolerant tweets.
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Figure 2: Average detected user-level political intolerance ratio per state (ranging between $7 - 12\%$ ).
|
| 171 |
+
|
| 172 |
+
Figure 2 presents a heat map illustrating the average age intolerance ratio across states. Also here, we observed aligned trends for both incivility types, obtaining similar results for impoliteness. As shown, some states demonstrate relatively low incivility rates (e.g., WA and NY) whereas others exhibit higher incivility rates (e.g., AZ and FL).
|
| 173 |
+
|
| 174 |
+
In light of these results, we conjectured that in 'battleground states', where the two camps are on par, there would be more hostility and toxicity in the political debate. To test this hypothesis, we contrasted the detected state-level average ratios of impolite and intolerant tweets against the differences between the percentage of votes for the Democratic and the Republican parties per state.[16] The analysis confirmed our hypothesis, yielding significant Spearman's rank correlation scores of -0.43 and -0.40 (p-value $< 0.01$ ), respectively. In words, this result suggests that political incivility tends to escalate in regions where electoral competition is intense, corresponding to a closer contest between the Democratic and Republican parties.
|
| 175 |
+
|
| 176 |
+
We note that rather than specify our results per state, we wish to highlight the contextual factors that may affect incivility rates at state-level. Our findings corroborate and align with existing literature of political communication. In particular, researchers previously showed that candidates and the media use more negative rhetoric in battleground states (Goldstein and Freedman, 2002); that citizens of battleground states engage more in politics on social media (Settle et al., 2016); and, that competitive districts feature higher levels of Twitter-based incivility (Vargo and Hopp, 2017). Our large-scale study is first to provide conclusive empirical evidence of increased multidimensional
|
| 177 |
+
|
| 178 |
+
political incivility by social media users in battleground states.
|
| 179 |
+
|
| 180 |
+
# 6 Conclusion
|
| 181 |
+
|
| 182 |
+
We presented MUPID, a dataset of political incivility annotated via crowd sourcing, distinguishing between dimensions related to style (impoliteness) and substance (intolerance). As discussed in detail, we refrained from term matching and from using available toxicity detection tools so as to diminish topical and lexical bias. Our experiments using finetuned language models and few-shot learners reached best F1 performances of 0.70 and 0.59 in identifying impolite and intolerance language, respectively. Our results and analyses suggest that finer semantic and social understanding is required for more accurately decoding incivility as perceived in political contexts, where this particularly holds for intolerant expressions. A large-scale study demonstrates the utility of our models for studying various aspects of political incivility. We find that users who are politically engaged, in that they post political content more often, are more inclined to use uncivil language, where as few as $20\%$ of the users authored $80\%$ of the uncivil tweets. We also track network homophily, showing that 'uncivil users' tend to follow other accounts with increased incivility. Analysing incivility at the aggregate level, we find that increased incivility is more prominent in battleground states.
|
| 183 |
+
|
| 184 |
+
Our dataset and models of multidimensional political incivility detection may support future research about the relationship between incivility and other contextual factors, e.g., user sociodemographics, as user traits such as age, gender, and education level, may be elicited given popular accounts that are followed by them (Lotan and Minkov, 2023). A temporal analysis may highlight the impact of political events on incivility levels.
|
| 185 |
+
|
| 186 |
+
We believe that political incivility detection would benefit from the modeling of relevant social context, such as conversation history (Ghosh et al., 2023) and the political events that the text refers to (Pujari and Goldwasser, 2021). Incorporating information about the user alongside the text authored by them may also help decode the text meaning (Pujari et al., 2024). Initial experiments, in which we conjoined user network embeddings with the text encoding showed improved prediction performance. We hope that researchers will benefit from our dataset in exploring similar directions.
|
| 187 |
+
|
| 188 |
+
# 7 Limitations
|
| 189 |
+
|
| 190 |
+
This study applies to political incivility in the U.S., focusing on the Twitter network. Our dataset and models may be therefore limited geographically, temporally, and with respect to platform. In fact, soon after performing this research, the Twitter social network changed ownership and turned into X, where changes in its user base and political incivility levels might have followed. In general, however, we believe that much of the patterns captured in our dataset and models are general, and may transfer to other sites of social media and over time.
|
| 191 |
+
|
| 192 |
+
It is important to note that while we attend contextual factors of political incivility at user and geopolitical level, we acknowledge the potential significance of other contextual factors, e.g., the conversation history, and whether the discussion is held among like-minded users (Rossini, 2022). Exploring these aspects requires diverse methodological approaches, which are beyond the scope of the current paper.
|
| 193 |
+
|
| 194 |
+
Another limitation that is inherent to Twitter data concerns replicability, as accounts may be deleted or suspended and posts may be removed from the social network platform over time. This limitation applies to all Twitter datasets, which require tweet recovery via rehydration (Bianchi et al., 2022). We release our dataset, as well as our code and classification models to the research community to promote future research on this topic,[17] and to allow comparison of our models with future models of political incivility detection.
|
| 195 |
+
|
| 196 |
+
# 8 Ethics statement
|
| 197 |
+
|
| 198 |
+
As the primary focus of this study is political incivility, crowd coders may have encountered texts characterized by an impolite style (e.g., foul language) or intolerant content (e.g., speech that discriminates against or excludes individuals based on their social and political characteristics). To mitigate potential harm to the crowd coders, we implemented several protective measures. First, we deliberately avoided providing coding examples that contained violent threats and extreme forms of incivility. Second, we ensured that all coding examples and tasks were derived from real-world political tweets, similar to those commonly encountered on social media platforms. Additionally, we allowed coders the flexibility to terminate their tasks at their
|
| 199 |
+
|
| 200 |
+
discretion. We further wish to clarify that political incivility is not considered to be a personal trait or a characteristic of a population by the authors. Considering that toxic political discourse may have become normalized among those who frequently engage in social media discussions, our study aims to distinguish between two distinct dimensions of such discourse within the framework of partisan competition. Finally, we clarify that the normative debate on online freedom of speech and its possible restrictions is beyond the scope of our manuscript. Rather, our study aims to provide a foundation for researchers to explore the underlying factors shaping political incivility, allowing for future studies to delve into its implications. This research was approved by our institutional review board.
|
| 201 |
+
|
| 202 |
+
# Acknowledgements
|
| 203 |
+
|
| 204 |
+
We thank the reviewers for their useful comments. This research was funded by the Data Science and Research Center at the University of Haifa and by the Israeli Science Foundation, Grant no. 2671/22.
|
| 205 |
+
|
| 206 |
+
# References
|
| 207 |
+
|
| 208 |
+
Pablo Barberá, Andreu Casas, Jonathan Nagler, Patrick J Egan, Richard Bonnaue, John T Jost, and Joshua A Tucker. 2019. Who leads? who follows? measuring issue attention and agenda setting by legislators and the mass public using social media data. *American Political Science Review*, 113(4):883-901.
|
| 209 |
+
Pablo Barbera, John T Jost, Jonathan Nagler, Joshua A Tucker, and Richard Bonneau. 2015. Tweeting from left to right: Is online political communication more than an echo chamber? Psychological science, 26(10):1531-1542.
|
| 210 |
+
Sam Bestvater, Sono Shah, Gonzalo River, and Aaron Smith. 2022. Politics on twitter: One-third of tweets from us adults are political.
|
| 211 |
+
Federico Bianchi, Stefanie HIlls, Patricia Rossini, Dirk Hovy, Rebekah Tromble, and Nava Tintarev. 2022. "it's not just hate": A multi-dimensional perspective on detecting harmful speech online. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing.
|
| 212 |
+
Tommaso Caselli, Valerio Basile, Jelena Mitrovic, and Michael Granitzer. 2021. HateBERT: Retraining BERT for abusive language detection in English. In Proceedings of the 5th Workshop on Online Abuse and Harms (WOAH 2021), pages 17-25.
|
| 213 |
+
Kevin Coe, Kate Kenski, and Stephen A Rains. 2014. Online and uncivil? Patterns and determinants of incivility in newspaper website comments. Journal of communication, 64(4):658-679.
|
| 214 |
+
|
| 215 |
+
Sam Davidson, Qiusi Sun, and Magdalena Wojcieszak. 2020. Developing a new classifier for automated identification of incivility in social media. In Proceedings of the Fourth Workshop on Online Abuse and Harms.
|
| 216 |
+
Flor Miriam Plaza Del Arco, Debora Nozza, and Dirk Hovy. 2023. Respectful or toxic? using zero-shot learning with language models to detect hate speech. In *The 7th Workshop on Online Abuse and Harms* (WOAH).
|
| 217 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT.
|
| 218 |
+
Mai ElSherief, Vivek Kulkarni, Dana Nguyen, William Yang Wang, and Elizabeth Belding. 2018. Hate lingo: A target-based linguistic analysis of hate speech in social media. Proceedings of the International AAAI Conference on Web and Social Media, 12(1).
|
| 219 |
+
Mai ElSherief, Caleb Ziems, David Muchlinski, Vaishnavi Anupindi, Jordyn Seybolt, Munmun De Choudhury, and Diyi Yang. 2021. Latent hatred: A benchmark for understanding implicit hate speech. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing.
|
| 220 |
+
Jeremy A Frimer, Harinder Aujla, Matthew Feinberg, Linda J Skitka, Karl Aquino, Johannes C Eichstaedt, and Robb Willer. 2023. Incivility is rising among american politicians on Twitter. Social Psychological and Personality Science, 14(2):259-269.
|
| 221 |
+
Samuel Gehman, Suchin Gururangan, Maarten Sap, Yejin Choi, and Noah A. Smith. 2020. RealToxicityPrompts: Evaluating neural toxic degeneration in language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2020.
|
| 222 |
+
Bryan T Gervais. 2014. Following the news? reception of uncivil partisan media and the use of incivility in political expression. Political Communication, 31(4):564-583.
|
| 223 |
+
Sreyan Ghosh, Manan Suri, Purva Chiniya, Utkarsh Tyagi, Sonal Kumar, and Dinesh Manocha. 2023. Cosyn: Detecting implicit hate speech in online conversations using a context synergized hyperbolic network. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing.
|
| 224 |
+
Ken Goldstein and Paul Freedman. 2002. Lessons learned: Campaign advertising in the 2000 elections. Political Communication, 19(1):5-28.
|
| 225 |
+
Nir Grinberg, Kenneth Joseph, Lisa Friedland, Briony Swire-Thompson, and David Lazer. 2019. Fake news on Twitter during the 2016 US presidential election. Science, 363(6425):374-378.
|
| 226 |
+
|
| 227 |
+
Ilker Gül, Rémi Lebret, and Karl Aberer. 2024. Stance detection on social media with fine-tuned large language models. CoRR, abs/2404.12171.
|
| 228 |
+
Amy Gutmann and Dennis F Thompson. 2009. Democracy and disagreement. Harvard University Press.
|
| 229 |
+
Thomas Hartvigsen, Saadia Gabriel, Hamid Palangi, Maarten Sap, Dipankar Ray, and Ece Kamar. 2022. Toxigen: A large-scale machine-generated dataset for adversarial and implicit hate speech detection. In Proceedings of the Annual Meeting of the Association for Computational Linguistics ACL.
|
| 230 |
+
Pengcheng He, Xiaodong Liu, Jianfeng Gao, and Weizhu Chen. 2021. Deberta: Decoding-enhanced bert with disentangled attention. In 9th International Conference on Learning Representations (ICLR).
|
| 231 |
+
Anushree Hede, Oshin Agarwal, Linda Lu, Diana C. Mutz, and Ani Nenkova. 2021. From toxicity in online comments to incivility in American news: Proceed with caution. In Proceedings of the Conference of the European Chapter of the Association for Computational Linguistics (EACL).
|
| 232 |
+
Sophie Henning, William Beluch, Alexander Fraser, and Annemarie Friedrich. 2023. A survey of methods for addressing class imbalance in deep-learning based natural language processing. In Proceedings of the Conference of the European Chapter of the Association for Computational Linguistics (EACL).
|
| 233 |
+
Jay D Hmielowski, Myiah J Hutchens, and Vincent J Cicchirillo. 2014. Living in an age of online incivility: Examining the conditional indirect effects of online discussion on political flaming. Information, Communication & Society, 17(10):1196-1211.
|
| 234 |
+
Jeffrey B Lewis, Keith Poole, Howard Rosenthal, Adam Boche, Aaron Rudkin, and Luke Sonnet. 2019. Voteview: Congressional roll-call votes database. See https://voteview.com/.
|
| 235 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized BERT pretraining approach. CoRR, abs/1907.11692.
|
| 236 |
+
Nir Lotan and Einat Minkov. 2023. Social world knowledge: Modeling and applications. Plos one, 18(7).
|
| 237 |
+
Scott M Lundberg and Su-In Lee. 2017. A Unified Approach to Interpreting Model Predictions. In Advances in Neural Information Processing Systems, volume 30.
|
| 238 |
+
Binny Mathew, Ritam Dutt, Pawan Goyal, and Animesh Mukherjee. 2019. Spread of hate speech in online social media. In Proceedings of the 10th ACM conference on web science, pages 173-182.
|
| 239 |
+
Binny Mathew, Punyajoy Saha, Seid Muhie Yimam, Chris Biemann, Pawan Goyal, and Animesh Mukherjee. 2021. Hatexplain: A benchmark dataset for
|
| 240 |
+
|
| 241 |
+
explainable hate speech detection. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI).
|
| 242 |
+
Ashley Muddiman. 2017. Personal and public levels of political incivility. International Journal of Communication, 11:21.
|
| 243 |
+
Ashley Muddiman, Jamie Pond-Cobb, and Jamie E. Matson. 2020. Negativity bias or backlash: Interaction with civil and uncivil online political news content. Communication Research, 47(6):815-837.
|
| 244 |
+
Pia Pachinger, Allan Hanbury, Julia Neidhardt, and Anna Planitzer. 2023. Toward disambiguating the definitions of abusive, offensive, toxic, and uncivil comments. In Proceedings of the First Workshop on Cross-Cultural Considerations in NLP (C3NLP).
|
| 245 |
+
Zizi Papacharissi. 2004. Democracy online: Civility, politeness, and the democratic potential of online political discussion groups. New Media & Society, 6(2):259-283.
|
| 246 |
+
Rajkumar Pujari and Dan Goldwasser. 2021. Understanding politics via contextualized discourse processing. In Proceedings of the Conference on Empirical Methods in Natural Language Processing.
|
| 247 |
+
Rajkumar Pujari, Chengfei Wu, and Dan Goldwasser. 2024. "we demand justice!": Towards social context grounding of political texts. In Proceedings of the Conference on Empirical Methods in Natural Language Processing.
|
| 248 |
+
Ludovic Rheault, Erica Rayment, and Andreea Musulan. 2019. Politicians in the line of fire: Incivility and the treatment of women on social media. Research & Politics, 6(1).
|
| 249 |
+
Manoel Ribeiro, Pedro Calais, Yuri Santos, Virgílio Almeida, and Wagner Meira Jr. 2018. Characterizing and detecting hateful users on Twitter. In Proceedings of the International AAAI Conference on Web and Social Media, volume 12.
|
| 250 |
+
Patricia Rossini. 2022. Beyond incivility: Understanding patterns of uncivil and intolerant discourse in online political talk. Communication Research, 49(3):399-425.
|
| 251 |
+
Paul Rottger, Bertie Vidgen, Dirk Hovy, and Janet Pierrehumbert. 2022. Two contrasting data annotation paradigms for subjective NLP tasks. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies.
|
| 252 |
+
Natalee Seely. 2018. Virtual vitriol: A comparative analysis of incivility within political news discussion forums. *Electronic News*, 12(1):42-61.
|
| 253 |
+
Jaime E Settle, Robert M Bond, Lorenzo Coviello, Christopher J Fariss, James H Fowler, and Jason J Jones. 2016. From posting to voting: The effects of political competition on online political engagement.
|
| 254 |
+
|
| 255 |
+
Political Science Research and Methods, 4(2):361-378.
|
| 256 |
+
Rasmus Skytte. 2021. Dimensions of elite partisan polarization: Disentangling the effects of incivility and issue polarization. *British Journal of Political Science*, 51(4):1457-1475.
|
| 257 |
+
Yannis Theocharis, Pablo Barbera, Zoltán Fazekas, Sebastian Popa, and Olivier Parnet. 2016. A bad workman blames his tweets: The consequences of citizens' uncivil Twitter use when interacting with party candidates: Incivility in interactions with candidates on Twitter. Journal of Communication, 66.
|
| 258 |
+
Yannis Theocharis, Pablo Barbera, Zoltán Fazekas, and Sebastian Adrian Popa. 2020. The dynamics of political incivility on Twitter. SAGE Open, 10(2).
|
| 259 |
+
Simon Tong and Daphne Koller. 2001. Support vector machine active learning with applications to text classification. Journal of machine learning research, 2(Nov):45-66.
|
| 260 |
+
Cristian Vaccari and Augusto Valeriani. 2018. Digital political talk and political participation: Comparing established and third wave democracies. Sage Open, 8(2).
|
| 261 |
+
Jonathan Van't Riet and Aart Van Stekelenburg. 2022. The effects of political incivility on political trust and political participation: A meta-analysis of experimental research. Human Communication Research, 48(2):203-229.
|
| 262 |
+
Chris J Vargo and Toby Hopp. 2017. Socioeconomic status, social capital, and partisan polarity as predictors of political incivility on twitter: A congressional district-level analysis. Social Science Computer Review, 35(1):10-32.
|
| 263 |
+
Michael Wiegand, Josef Ruppenhofer, and Thomas Kleinbauer. 2019. Detection of abusive language: The problem of biased datasets. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT).
|
| 264 |
+
Magdalena Wojcieszak, Andreu Casas, Xudong Yu, Jonathan Nagler, and Joshua A Tucker. 2022. Most users do not follow political elites on twitter; those who do show overwhelming preferences for ideological congruity. Science advances, 8(39):eabn9418.
|
| 265 |
+
Magdalena Wojcieszak, Sjifra de Leeuw, Ericka Menchen-Trevino, Seungsu Lee, Ke M Huang-Isherwood, and Brian Weeks. 2023. No polarization from partisan news: Over-time evidence from trace data. The International Journal of Press/Politics, 28(3):601-626.
|
| 266 |
+
Tomer Wullach, Amir Adler, and Einat Minkov. 2021a. Fight fire with fire: Fine-tuning hate detectors using large samples of generated hate speech. In *Findings of the Association for Computational Linguistics: EMNLP*.
|
| 267 |
+
|
| 268 |
+
Tomer Wullach, Amir Adler, and Einat Minkov. 2021b. Towards hate speech detection at large via deep generative modeling. IEEE Internet Comput., 25(2):48-57.
|
| 269 |
+
|
| 270 |
+
Teng Ye, Sangseok You, and Lionel Robert Jr. 2017. When does more money work? Examining the role of perceived fairness in pay on the performance quality of crowdworkers. In Proceedings of the International AAAI Conference on Web and Social Media (ICWSM).
|
| 271 |
+
|
| 272 |
+
Avishai Zagoury, Einat Minkov, Idan Szpektor, and William W. Cohen. 2021. What's the best place for an AI conference, vancouver or : Why completing comparative questions is difficult. In Thirty-Fifth AAAI Conference on Artificial Intelligence, AAAI.
|
| 273 |
+
|
| 274 |
+
# A Instructions and interfaces for the crowd workers and the GPT prompt
|
| 275 |
+
|
| 276 |
+
Figure 3 presents the code book presented to the crowd workers, and Figure 4 demonstrates the training phase which workers had to complete in order to get qualified to work on our task. As shown in the screenshot, following the completion of the training phase, the candidate worker was presented with explanations about their labeling mistakes. In instructing the GPT model to label the test examples, we used the prompt shown in Figure 5.
|
| 277 |
+
|
| 278 |
+
# B Cross-dataset evaluation results
|
| 279 |
+
|
| 280 |
+
Table 7 includes detailed intra- and cross-dataset evaluation results.
|
| 281 |
+
|
| 282 |
+
<table><tr><td></td><td>Train</td><td>Test</td><td>Precision</td><td>Recall</td><td>F1</td></tr><tr><td colspan="6">MUPID → Other datasets:</td></tr><tr><td>I</td><td>TH</td><td>TH</td><td>0.677</td><td>0.543</td><td>0.604</td></tr><tr><td>C</td><td>MUPID</td><td>TH</td><td>0.542</td><td>0.847</td><td>0.661</td></tr><tr><td></td><td></td><td>Δ</td><td>-19.9%</td><td>56.0%</td><td>9.4%</td></tr><tr><td>I</td><td>RH</td><td>RH</td><td>0.845</td><td>0.672</td><td>0.749</td></tr><tr><td>C</td><td>MUPID</td><td>RH</td><td>0.547</td><td>0.831</td><td>0.66</td></tr><tr><td></td><td></td><td>Δ</td><td>-35.3%</td><td>23.6%</td><td>-11.9%</td></tr><tr><td>I</td><td>DA</td><td>DA</td><td>0.871</td><td>0.725</td><td>0.791</td></tr><tr><td>C</td><td>MUPID</td><td>DA</td><td>0.692</td><td>0.779</td><td>0.733</td></tr><tr><td></td><td></td><td>Δ</td><td>-20.6%</td><td>7.4%</td><td>-7.3%</td></tr><tr><td></td><td></td><td>Average Δ:</td><td>-25.3%</td><td>29.0%</td><td>-3.3%</td></tr><tr><td colspan="6">Other datasets → MUPID:</td></tr><tr><td>I</td><td>MUPID</td><td>MUPID</td><td>0.765</td><td>0.707</td><td>0.735</td></tr><tr><td>C</td><td>All</td><td>MUPID</td><td>0.677</td><td>0.543</td><td>0.603</td></tr><tr><td></td><td></td><td>Δ</td><td>-11.5%</td><td>-23.2%</td><td>-18.0%</td></tr></table>
|
| 283 |
+
|
| 284 |
+
Table 7: Detailed cross-dataset evaluation results: Intra-I) vs. cross-dataset (C) experiments. The table uses acronyms: TH (Theocharis et al., 2020), RH (Rheault et al., 2019), DA (Davidson et al., 2020).
|
| 285 |
+
|
| 286 |
+
<table><tr><td>Variable</td><td>Odds ratio</td><td>Std.Error</td></tr><tr><td colspan="3">IMPOLITE</td></tr><tr><td># Followers</td><td>1.000000</td><td>1</td></tr><tr><td># Followees</td><td>0.999992</td><td>1.000001</td></tr><tr><td>Tweets per day</td><td>1.008036</td><td>1.000401</td></tr><tr><td>% Political tweets</td><td>1.589433</td><td>1.020808</td></tr><tr><td colspan="3">INTOLERANT</td></tr><tr><td># Followers</td><td>1</td><td>1</td></tr><tr><td># Followees</td><td>1.00001</td><td>1.000001</td></tr><tr><td>Tweets per day</td><td>1.008002</td><td>1.000356</td></tr><tr><td>% Political tweets</td><td>5.176365</td><td>1.018723</td></tr></table>
|
| 287 |
+
|
| 288 |
+
Table 8: Multivariate beta regression results of user-level characteristics as explaining factors of the share of impolite and intolerant tweets out of their political tweets. The sample size is $230\mathrm{K}$ users, and all the results are significant at p-value $< 0.001$
|
| 289 |
+
|
| 290 |
+
# C Multi-variate analyses of user-level incivility
|
| 291 |
+
|
| 292 |
+
This section includes multi-variate analysis results, showing similar trends to our results measured in terms of Spearman's correlation, reported in Table 6.
|
| 293 |
+
|
| 294 |
+
We modeled multivariate beta regressions to examine the associations between the share of impolite and intolerant tweets out of users' political tweets and other user-level characteristics, including their number of followers, number of followees (i.e., accounts followed by a given user), average tweets per day, and the share of political tweets out of the total texts by a given user. The correlates with respect to the ratio of impolite and intolerant tweets are presented in Tables 8. We use odds ratio (OR) to interpret the results more intuitively. The results show, for example, a positive relationship between the share of impoliteness and tweets per day $(\mathrm{OR} = 1.008)$ : for a one-unit increase in a user's tweets per day, the odds of observing a higher share of impolite tweets increase by $0.80\%$ . Focusing on the share of political tweets as a predictor, the results show that a movement from its minimum value (0) to its maximum value (1) is associated with a $59\%$ increase in the odds of observing a higher share of impolite tweets $(\mathrm{OR} = 1.59)$ . We also observe that a greater share of political tweets is associated with a higher ratio of intolerant tweets, to a greater extent $(\mathrm{OR} = 5.17)$ . Note that while there is a very small change in impoliteness or intolerance ratio with the increase of a single follower or followee (OR is roughly 1), this effect is statistically significant.
|
| 295 |
+
|
| 296 |
+
We also examined whether posting uncivil tweets is correlated with exposure to incivility by
|
| 297 |
+
|
| 298 |
+
# Rules, Tips and Examples (Click to collapse)
|
| 299 |
+
|
| 300 |
+
# Rules and Tips
|
| 301 |
+
|
| 302 |
+
1. What makes a tweet uncivil:
|
| 303 |
+
The tweet contains foul language or a harsh tone toward other people or their ideas and actions. It can also include harmful or discriminatory intent toward people or groups based on gender, race, ethnicity, political views, etc.
|
| 304 |
+
2. Uncivil tweets can be categorized into three sub-dimensions: (A) impulsiveness, (B) intolerance, or (C) both.
|
| 305 |
+
|
| 306 |
+
A. Impoliteness: the tweet contains insults, foul language, harsh tone, name-calling, vulgarity, an accusation of lying, or aspersion toward other people or their ideas and actions.
|
| 307 |
+
B. Intolerance: the tweet contains expressions that derogate or undermine particular groups due to social, political, sexual, ethnic, or cultural features. The tweet can contain threats of physical or emotional harm to others, or the silencing or denial of rights of people and groups (e.g., minorities, political groups, etc.).
|
| 308 |
+
C. Both: the tweet contains both of the above sub-dimensions.
|
| 309 |
+
|
| 310 |
+
3. An intolerant tweet (sub-dimension B) does not necessarily have an impolite style (sub-dimension A), and vice-versa.
|
| 311 |
+
4. Disagreements with another person or idea are not considered uncivil automatically. When these disagreements contain foul language or intolerant expressions, only then the tweet will be categorized as uncivil.
|
| 312 |
+
5. Notice that a tweet may contain criticism (even harsh criticism) toward public figures. However, criticism does not automatically make the tweet intolerant. In order to be intolerant, the tweet should explicitly offend/derogate entire groups (e.g., minorities, liberals, Republicans, women, LGBT, etc.)
|
| 313 |
+
|
| 314 |
+
Figure 3: The code book presented to the crowd workers
|
| 315 |
+
|
| 316 |
+
one's followees network (i.e., the accounts that the user follows). We calculated users' potential exposure to incivility as the share of impolite and intolerant tweets in their followees' network, i.e., the number of uncivil tweets posted by their followees divided by the total number of political tweets of these followees. We modeled the same beta regressions as above, this time adding considering the share of impolite and intolerant tweets in one's network as predictors. In the case of impoliteness, it is indicated that the more users are potentially exposed to impoliteness in their network, the higher is the share of their impolite tweets $(\mathrm{OR} = 1.03$ , a $3\%$ increase, p-value $< 0.001$ ). Similar findings are observed in the case of intolerance $(\mathrm{OR} = 1.06$ , a $6\%$ increase, p-value $< 0.001$ ). While we cannot establish causality due to the cross-sectional nature of the data, we encourage scholars to further investigate these initial conclusions that uncivil users follow others who behave similarly.
|
| 317 |
+
|
| 318 |
+
You were right in 2 out of 6 questions.
|
| 319 |
+
|
| 320 |
+
Correct answers in green & incorrect ones in red color.
|
| 321 |
+
|
| 322 |
+
Please review the correct answers and their detailed explanations:
|
| 323 |
+
|
| 324 |
+

|
| 325 |
+
|
| 326 |
+
USER @User.Jun 27
|
| 327 |
+
|
| 328 |
+
The government has ruined millions of lives and livelihoods by disrupting commerce, destroying small businesses and hindering individuals from going about their daily lives.
|
| 329 |
+
|
| 330 |
+
That's what the truckers are protesting, you rank imbecile. People like you need a damn good kicking.
|
| 331 |
+
|
| 332 |
+

|
| 333 |
+
Figure 4: Annotator training: each candidate worker was asked to label 6 example tweets. We provided feedback about the labeling accuracy, and explanations about the labeling mistakes. Workers who obtained 4 out of 6 or higher accuracy in their responses got qualified to work on our task.
|
| 334 |
+
|
| 335 |
+

|
| 336 |
+
|
| 337 |
+

|
| 338 |
+
|
| 339 |
+

|
| 340 |
+
|
| 341 |
+
This is an uncivil tweet that belongs to the impoliteness category. Although the beginning of the tweet is critical of the government in a relatively civil manner, the tweet ends with offensive language, including direct insults.
|
| 342 |
+
|
| 343 |
+

|
| 344 |
+
|
| 345 |
+
USER @User.Jun 27
|
| 346 |
+
|
| 347 |
+
It would be nice to see the left fight for the basic human rights of everyone like they do for abortions.
|
| 348 |
+
|
| 349 |
+

|
| 350 |
+
|
| 351 |
+

|
| 352 |
+
|
| 353 |
+

|
| 354 |
+
|
| 355 |
+

|
| 356 |
+
|
| 357 |
+
This is a neutral tweet that is considered Civil. For a tweet to be labeled uncivil, it must be unambiguously offensive or contain derogative language. In this case, the tweet criticizes liberals in a relatively neutral way.
|
| 358 |
+
|
| 359 |
+
You are an advanced Political Impoliteness Classification System, specialized in detecting impolite language and behavior in political discourse, particularly within the context of United States politics and its specific terminology.
|
| 360 |
+
|
| 361 |
+
# Instruction:
|
| 362 |
+
|
| 363 |
+
Determine whether the provided tweet displays impoliteness.
|
| 364 |
+
|
| 365 |
+
Impoliteness is characterized by insults, foul language, harsh tone, name-calling, vulgarity, an accusation of lying, or aspersion toward other people or their ideas and actions.
|
| 366 |
+
|
| 367 |
+
Please provide your judgment in the following JSON format: {"impoliteness": "Yes" or "No"}
|
| 368 |
+
|
| 369 |
+
# Example Evaluations:
|
| 370 |
+
|
| 371 |
+
Tweet: "All hell has broken loose under the leadership of the senile old man. And now due to his weakness we will see him take us to WWII. Young people voted for this crap."
|
| 372 |
+
|
| 373 |
+
Your JSON response: {{"impoliteness": "Yes"}}
|
| 374 |
+
|
| 375 |
+
Tweet: "And what's it called when Hillary and the dems arranged illegal surveillance against the POTUS? spying on the Whitehouse servers? Hillary and the dems ARE enemies, foreign AND domestic."
|
| 376 |
+
|
| 377 |
+
Your JSON response: {{"impoliteness": "No"}}
|
| 378 |
+
|
| 379 |
+
Tweet: "@USER just passed a trillion dollar infrastructure bill for Biden with no wall funding. How long do Republicans believe you can keep pushing this line? You never intended to secure the border."
|
| 380 |
+
|
| 381 |
+
Your JSON response: {{"impoliteness": "No"}}
|
| 382 |
+
|
| 383 |
+
# Tweet: $\{x\}$
|
| 384 |
+
|
| 385 |
+
Your JSON response:
|
| 386 |
+
|
| 387 |
+
Figure 5: The prompt provided to the GPT-3.5-instruct model for impulsiveness classification. A similar prompt was provided for intolerance classification. The format of the prompt follows common practice in instructing GPT-instruct and similar models to perform specific classification tasks.
|
acloserlookatmultidimensionalonlinepoliticalincivility/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:730ee959b1d5036ec451c92e2f85ee8dea7361d603e641584b161942c1b6133b
|
| 3 |
+
size 291527
|
acloserlookatmultidimensionalonlinepoliticalincivility/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6062af20e0dad25f0f693efc0f988f396c65fc019e949c1377eed8bfc187884
|
| 3 |
+
size 453772
|
acomparisonoflanguagemodelingandtranslationasmultilingualpretrainingobjectives/979594f1-eb73-4d54-8635-879feb871523_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6723b1795e060def8f7f3971cd8b148e4f352943790c364c8326f31ae9767576
|
| 3 |
+
size 81973
|
acomparisonoflanguagemodelingandtranslationasmultilingualpretrainingobjectives/979594f1-eb73-4d54-8635-879feb871523_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c8e8366dcf8030b108c0f4556a5807e3f86b690d89bae5e729e4216ff48dd515
|
| 3 |
+
size 99910
|
acomparisonoflanguagemodelingandtranslationasmultilingualpretrainingobjectives/979594f1-eb73-4d54-8635-879feb871523_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5055120c77332f42da3b82e5d257cb2476bcf501ffc8e903286f300760713f17
|
| 3 |
+
size 288658
|