Add Batch 223f0823-2eb9-4221-8122-38a65bc3e532
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/1ec0d09f-46f4-4104-bfdf-c87e062462ac_content_list.json +3 -0
- acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/1ec0d09f-46f4-4104-bfdf-c87e062462ac_model.json +3 -0
- acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/1ec0d09f-46f4-4104-bfdf-c87e062462ac_origin.pdf +3 -0
- acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/full.md +233 -0
- acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/images.zip +3 -0
- acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/layout.json +3 -0
- adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/bdcf793f-f222-45fa-8bf3-c81a767b7987_content_list.json +3 -0
- adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/bdcf793f-f222-45fa-8bf3-c81a767b7987_model.json +3 -0
- adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/bdcf793f-f222-45fa-8bf3-c81a767b7987_origin.pdf +3 -0
- adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/full.md +134 -0
- adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/images.zip +3 -0
- adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/layout.json +3 -0
- advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/4dc50125-c0f0-4b0e-bd23-e245af616f1c_content_list.json +3 -0
- advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/4dc50125-c0f0-4b0e-bd23-e245af616f1c_model.json +3 -0
- advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/4dc50125-c0f0-4b0e-bd23-e245af616f1c_origin.pdf +3 -0
- advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/full.md +365 -0
- advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/images.zip +3 -0
- advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/layout.json +3 -0
- advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/bcde49e0-9021-4943-a047-8162add29b41_content_list.json +3 -0
- advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/bcde49e0-9021-4943-a047-8162add29b41_model.json +3 -0
- advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/bcde49e0-9021-4943-a047-8162add29b41_origin.pdf +3 -0
- advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/full.md +369 -0
- advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/images.zip +3 -0
- advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/layout.json +3 -0
- alohaanewmeasureforhallucinationincaptioningmodels/2651e8c2-ba78-498e-9084-aabb876a82f8_content_list.json +3 -0
- alohaanewmeasureforhallucinationincaptioningmodels/2651e8c2-ba78-498e-9084-aabb876a82f8_model.json +3 -0
- alohaanewmeasureforhallucinationincaptioningmodels/2651e8c2-ba78-498e-9084-aabb876a82f8_origin.pdf +3 -0
- alohaanewmeasureforhallucinationincaptioningmodels/full.md +874 -0
- alohaanewmeasureforhallucinationincaptioningmodels/images.zip +3 -0
- alohaanewmeasureforhallucinationincaptioningmodels/layout.json +3 -0
- amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/0a65d3d9-987a-47b3-b9d9-a1053e256e3c_content_list.json +3 -0
- amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/0a65d3d9-987a-47b3-b9d9-a1053e256e3c_model.json +3 -0
- amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/0a65d3d9-987a-47b3-b9d9-a1053e256e3c_origin.pdf +3 -0
- amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/full.md +626 -0
- amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/images.zip +3 -0
- amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/layout.json +3 -0
- arithmeticreasoningwithllmprologgenerationpermutation/5a089955-c71e-4ee9-b652-0504dd5f9383_content_list.json +3 -0
- arithmeticreasoningwithllmprologgenerationpermutation/5a089955-c71e-4ee9-b652-0504dd5f9383_model.json +3 -0
- arithmeticreasoningwithllmprologgenerationpermutation/5a089955-c71e-4ee9-b652-0504dd5f9383_origin.pdf +3 -0
- arithmeticreasoningwithllmprologgenerationpermutation/full.md +293 -0
- arithmeticreasoningwithllmprologgenerationpermutation/images.zip +3 -0
- arithmeticreasoningwithllmprologgenerationpermutation/layout.json +3 -0
- beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/430aedb8-8e92-42ef-8176-5984d7337890_content_list.json +3 -0
- beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/430aedb8-8e92-42ef-8176-5984d7337890_model.json +3 -0
- beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/430aedb8-8e92-42ef-8176-5984d7337890_origin.pdf +3 -0
- beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/full.md +394 -0
- beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/images.zip +3 -0
- beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/layout.json +3 -0
- breakingthelanguagebarriercandirectinferenceoutperformpretranslationinmultilingualllmapplications/22394956-5f70-4322-9984-6d8e991f143c_content_list.json +3 -0
- breakingthelanguagebarriercandirectinferenceoutperformpretranslationinmultilingualllmapplications/22394956-5f70-4322-9984-6d8e991f143c_model.json +3 -0
acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/1ec0d09f-46f4-4104-bfdf-c87e062462ac_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ee0f22eed8873ce2bcff21c8ce584eedd3b0ecd072c203694348337987bbbe72
|
| 3 |
+
size 50871
|
acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/1ec0d09f-46f4-4104-bfdf-c87e062462ac_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3ff18db669e5d1679c8fb88f38893bc555f00c4b17e470ace63c8511c2857a8b
|
| 3 |
+
size 61546
|
acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/1ec0d09f-46f4-4104-bfdf-c87e062462ac_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8acff5b3b6c8a033d9fe7b6fac894950317930902123e020f52cbd9bb5390524
|
| 3 |
+
size 155361
|
acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/full.md
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Continued Pretrained LLM Approach for Automatic Medical Note Generation
|
| 2 |
+
|
| 3 |
+
Dong Yuan* Eti Rastogi* Gautam Naik Sree Prasanna Rajagopal
|
| 4 |
+
|
| 5 |
+
Sagar Goyal Fen Zhao Bharath Chintagunta Jeff Ward
|
| 6 |
+
|
| 7 |
+
DeepScribe Inc.
|
| 8 |
+
|
| 9 |
+
San Francisco, California, USA
|
| 10 |
+
|
| 11 |
+
{dong, eti, gautam, sree, sagar, fen, jai, jeff}@deepscribe.tech
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
LLMs are revolutionizing NLP tasks. However, the use of the most advanced LLMs, such as GPT-4, is often prohibitively expensive for most specialized fields. We introduce HEAL, the first continuously trained 13B LLaMA2-based LLM that is purpose-built for medical conversations and measured on automated scribing. Our results demonstrate that HEAL outperforms GPT-4 and PMC-LLaMA in PubMedQA, with an accuracy of $78.4\%$ . It also achieves parity with GPT-4 in generating medical notes. Remarkably, HEAL surpasses GPT-4 and Med-PaLM 2 in identifying more correct medical concepts and exceeds the performance of human scribes and other comparable models in correctness and completeness.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
The emergence of large language model (LLM) has brought revolutionary changes to natural language processing and understanding tasks, paving the way for practical applications of AI across multiple domains such as law, finance, and healthcare. Private LLMs such as GPT-4 (OpenAI, 2023) and MedPaLM 2 (Singhal et al., 2023) and open-source LLMs like LLaMA2 (Meta, 2023) have shown strong performance on general NLP benchmarks. However, recent studies have shown promise that with continued training on more targeted datasets, e.g. smaller LLMs like Orca (Mukherjee et al., 2023; Mitra et al., 2023) and Phi-2 (Mojan Javaheripi, 2023), can surpass much larger LLMs on general tasks. Despite the success of LLM in general capabilities, they often fall short in niche domains like healthcare, where precision and profound understanding are crucial. Hence, several models such as Meditron-70B (Chen et al., 2023b), PMC-LLaMA (Wu et al., 2023) have emerged.
|
| 20 |
+
|
| 21 |
+
Transcribing medical conversations is a challenging task for both humans and machines due to po
|
| 22 |
+
|
| 23 |
+
tential transcription errors and the innate complexity of spoken language, an issue unaddressed by existing medical LLMs. Existing LLMs trained on medical data largely do well on problems like medical Q&A but struggle to produce a comprehensive EHR-compatible medical note. Some domain-adapted LLMs (Van Veen et al., 2023) can write some components of the note, but they leave out the crucial "Subjective" section. Some fine-tuned models (Zhang et al., 2021) can generate notes from medical conversations but need human overview.
|
| 24 |
+
|
| 25 |
+
Overall, we developed a new medical LLM proficient in interpreting medical conversation. By using techniques like continued pretraining on diverse data and explanation tuning, including medical and general web corpora, GPT-4 task instructions, EHRs, the model was capable of producing medical SOAP notes approved by physicians.
|
| 26 |
+
|
| 27 |
+
Our main contributions include:
|
| 28 |
+
|
| 29 |
+
To the best of our knowledge, we are the first to build a small-size (13B) medical LLM that can produce medical notes without any human intervention from doctor-patient conversations that bypass human quality and are accepted by physicians.
|
| 30 |
+
|
| 31 |
+
HEAL surpasses Med-PaLM 2 and other publicly available models of the same size, matches GPT-4's performance in medical notes generation, and excels with the highest completeness.
|
| 32 |
+
|
| 33 |
+
Despite having a smaller model size, we achieved an accuracy of $78.4\%$ on PubMedQA, outperforming GPT-4 and within $5\%$ of MedPaLM 2's performance.
|
| 34 |
+
|
| 35 |
+
# 2 Continued Pretraining
|
| 36 |
+
|
| 37 |
+
# 2.1 Dataset
|
| 38 |
+
|
| 39 |
+
We collected our training data from three major sources to enable the model to generate coherent English sentences, comprehend medical content, and execute complex instructions required for generating medical notes. (see Table 1)
|
| 40 |
+
|
| 41 |
+
<table><tr><td>Dataset</td><td>Number of tokens (in billions)</td><td>Percentage of total data</td></tr><tr><td>Non-medical public</td><td>5.33</td><td>35.79</td></tr><tr><td>Medical public</td><td>5.68</td><td>38.14</td></tr><tr><td>Medical proprietary</td><td>3.88</td><td>26.07</td></tr><tr><td>Total</td><td>14.89</td><td>100.00</td></tr></table>
|
| 42 |
+
|
| 43 |
+
Table 1: Pretraining datasets.
|
| 44 |
+
|
| 45 |
+
Non-medical public datasets. To ensure that the new model doesn't lose the generative capabilities of the pretrained LLaMA2 model, we added general domain datasets such as C4 (Raffel et al., 2019). Continued pretraining on them was crucial for generational tasks, enhancing the model's grammar and phrase composition skills. Initially, we also included filtered subtitle data from open-subtitle and youtube. However, we decided to exclude these datasets due to their poor quality negatively impacting the model's performance.
|
| 46 |
+
|
| 47 |
+
Medical public datasets. We filtered data from medical web domains such as nih.gov to cover different aspects of medical concept understanding and replay medical knowledge to the model, so the model won't forget the medical knowledge after continued training. MedDialog (Chen et al., 2020) taught medical language conversation while reading materials such as PubMed articles (Gao et al., 2020) provided the model with an overall medical context. PubMed and filtered web medical corpus were two major sources, each contributed around 2.5B tokens each in the final training dataset.
|
| 48 |
+
|
| 49 |
+
Proprietary medical datasets. We also curated a deidentified proprietary medical dataset that consists of real-world doctor-patient conversations from the United States, Electronic Health Records (EHR), SOAP (Subjective, Objective, Assessment, and Plan) notes, and ROS (Review of System) templates. We also created a synthetic dataset comprising of medical instructions, like extraction of medications from a medical conversation and grammar correction of a generated medical note, respectively. These instructions were generated with the help of both humans and GPT-3.5/GPT-4. For some of the instructions, we also included detailed explanation as shown in (Mukherjee et al., 2023). Training on such instructions with explanations, helped the model better comprehend the medical notes and understand the reasoning behind it, which was especially needed for the downstream medical documentation task. For example, we created a medical instruction that asks the model to retrieve information from a conversation as shown below:
|
| 50 |
+
|
| 51 |
+
You specialize in summarizing medical conversations, providing clear and thorough explanations so that people can trust your summary with evidence. I have part of a transcript from a conversation between my doctor and myself.
|
| 52 |
+
|
| 53 |
+
Task: Summarize the <targeted content> from this conversation.
|
| 54 |
+
|
| 55 |
+
Requirements: <requirements>
|
| 56 |
+
|
| 57 |
+
Transcript: <transcript>
|
| 58 |
+
|
| 59 |
+
Then we further created instructions about reviewing the generated note:
|
| 60 |
+
|
| 61 |
+
Your job is to review a given medical note and generate an updated note.
|
| 62 |
+
|
| 63 |
+
Rules: <rules on how to review>.
|
| 64 |
+
|
| 65 |
+
List all the needed updates for the medical note as Updates. Return the updated medical note as Updated Medical Note.
|
| 66 |
+
|
| 67 |
+
Transcript: <transcript>
|
| 68 |
+
|
| 69 |
+
Medical Note: <medical note>
|
| 70 |
+
|
| 71 |
+
Finally, both of them were used for training the model to improve the model's understanding of the summarization task.
|
| 72 |
+
|
| 73 |
+
While we developed a much larger high-quality custom dataset including more than 60B tokens, currently only 14.89B tokens were used for this training exercise.
|
| 74 |
+
|
| 75 |
+
# 2.2 Training Details
|
| 76 |
+
|
| 77 |
+
We performed training using FSDP (Zhao et al., 2023) pipeline parallelism with hybrid sharding and flash attention 2 on 32 A100 80 GB GPUs. We continued training LLaMA2 13B using learning rate of 5e-5 which decays to 1e-5 following a cosine schedule. We chose a relatively small batch size of 256, to achieve more than 10K effective gradient update steps. A medical conversation can exceed 30 minutes and surpass 4K in context length. Therefore, we used 8K context length by applying positional interpolation (Chen et al., 2023a) to the base model. We set the weight decay at 0.1 and a warm-up step count to 50.
|
| 78 |
+
|
| 79 |
+
Robust Training. To be tolerant of machine and experiment related mishaps, we used fixed seed, checkpoints, and implemented phased training where we divided the training data into $n$ subsets. If the loss of a particular validation subset started to stabilize, we reduced the sampling rate in the next phase for efficiency.
|
| 80 |
+
|
| 81 |
+
Data Packing & Dedup. We packed data by sentence to fit into max sequence length. We also
|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
Figure 1: Pretraining validation perplexity.
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
|
| 88 |
+
deduplicated our data to improve data quality (Lee et al., 2021).
|
| 89 |
+
|
| 90 |
+
Loss. For the general corpus including C4, public medical materials, we calculated the gradient on every token. However, on proprietary instruction data, the loss was only calculated on response tokens like (Mukherjee et al., 2023).
|
| 91 |
+
|
| 92 |
+
# 3 Evaluations
|
| 93 |
+
|
| 94 |
+
This section shows some of our continued pretraining results and evaluation methodology.
|
| 95 |
+
|
| 96 |
+
# 3.1 Pretraining
|
| 97 |
+
|
| 98 |
+
We employed two evaluation methods to monitor pertaining. Firstly, we measured the perplexity across all the data sources. We used a validation set to track how efficiently the model learns from each source. Figure 1 is a subset of evaluations on EHR and MIMIC IV Note. EHR Note is 1K notes sampled from our proprietary dataset, which is the doctors' written notes from real clinic visits. MIMIC IV Note is 1K sampled deidentified critical care notes from the public dataset (Johnson et al., 2020). The Figure 1 shows that as the training continues, the model progressively increases its understanding of both data sets. However, MIMIC IV has a much lower perplexity suggesting that the base LLaMA2 model might have been trained on this dataset during the initial pertaining process.
|
| 99 |
+
|
| 100 |
+
Secondly, for a holistic understanding of the generation quality, we used several few-shot (3-shot) generative tasks for validation, that included:
|
| 101 |
+
|
| 102 |
+
1) Long text generation: This task is associated with summarizing different categories of the subjective section of SOAP notes from medical transcripts between doctor and patient. For example:
|
| 103 |
+
|
| 104 |
+
Prompt Summarize the patient's chief complaint from the given text.
|
| 105 |
+
|
| 106 |
+
Transcript: <transcript>
|
| 107 |
+
|
| 108 |
+
Output <response>
|
| 109 |
+
|
| 110 |
+
<table><tr><td>Training data</td><td>ROS (multi-choice) (Acc %)</td><td>Long Text Rouge-1 (f1 %)</td><td>Long Text Rouge-cls (f1 %)</td></tr><tr><td>1B Total</td><td>47.36</td><td>44.81</td><td>41.53</td></tr><tr><td>MED</td><td>37.85</td><td>39.44</td><td>35.91</td></tr><tr><td>PUB</td><td>36.81</td><td>44.49</td><td>42.35</td></tr></table>
|
| 111 |
+
|
| 112 |
+
Table 2: Training data ablation results. The MED dataset is derived from the 1B training dataset by excluding all the public datasets. Similarly, the PUB dataset is produced by removing all medical datasets.
|
| 113 |
+
|
| 114 |
+
2) Medium text generation: This is a question answering task on medical transcript. We curated this data by modifying the Alpaca (Rohan et al., 2023) pipeline on the collected transcription dataset. We queried GPT-4 to generate questions prompting responses ranging from a few words to a full sentence based on the transcription. For example:
|
| 115 |
+
|
| 116 |
+
Prompt Identify the patient's current medication.
|
| 117 |
+
|
| 118 |
+
Transcript: <transcript>
|
| 119 |
+
|
| 120 |
+
Output <response>
|
| 121 |
+
|
| 122 |
+
3) Short text generation: This comprises of ROS (Review of System) - related classification tasks, including questions about body system identification (multi-choice), and absence or presence of symptoms (single-choice). For example:
|
| 123 |
+
|
| 124 |
+
Prompt Is the patient showing signs of depression, like persistent sadness, lack of interest, or appetite changes?
|
| 125 |
+
|
| 126 |
+
Transcript: <transcript>
|
| 127 |
+
|
| 128 |
+
Output <response>
|
| 129 |
+
|
| 130 |
+
We measured Rouge-cls for tasks 1, 2 and accuracy for task 3, to monitor pretraining performance. Each of evaluation dataset has 1000 examples.
|
| 131 |
+
|
| 132 |
+
Figure 2 demonstrates that our model's performance consistently improved in generating long and medium texts, and in multi-choice classification. However, no significant improvement was observed in single-choice classification. We attribute this to the already high accuracy numbers and the fact that further improvement was noted when the model was separately trained on a smaller related dataset, indicating potential enhancements with scaled-up training.
|
| 133 |
+
|
| 134 |
+
# 3.2 Pretraining Ablation
|
| 135 |
+
|
| 136 |
+
Table 2 shows our examination of the effects of varying data proportions using a 1B token dataset, derived from a scaled-down version of our custom
|
| 137 |
+
|
| 138 |
+

|
| 139 |
+
Figure 2: Pretraining validation generation capability monitoring.
|
| 140 |
+
|
| 141 |
+

|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
|
| 145 |
+

|
| 146 |
+
|
| 147 |
+
<table><tr><td>Model</td><td>#Incorrect</td><td>#Irrelevant</td><td>#Missed</td></tr><tr><td>Human</td><td>1.20</td><td>0</td><td>11.20</td></tr><tr><td>GPT-4</td><td>0.80</td><td>0.20</td><td>6.75</td></tr><tr><td>Med-PaLM 2</td><td>1.36</td><td>0</td><td>10.50</td></tr><tr><td>GPT-3.5</td><td>2.00</td><td>1.71</td><td>8.50</td></tr><tr><td>†LLaMA2-chat-13B</td><td>4.14</td><td>4.71</td><td>11.21</td></tr><tr><td>†PMC-LLaMA-13B</td><td>1.57</td><td>0.43</td><td>15.14</td></tr><tr><td>*LLaMA2-13B</td><td>1.50</td><td>0.14</td><td>9.86</td></tr><tr><td>*MedLLaMA-13B</td><td>2.07</td><td>0.71</td><td>11.57</td></tr><tr><td>*Meditron-7B</td><td>3.00</td><td>0.57</td><td>10.64</td></tr><tr><td>HEAL</td><td>0.85</td><td>0.30</td><td>4.30</td></tr></table>
|
| 148 |
+
|
| 149 |
+
Table 3: Average entity errors comparison. Both * and † are fine-tuned models. * indicates a pretrained model was used as the base, † denotes a fine-tuned instruction model was used as the base.
|
| 150 |
+
|
| 151 |
+
15B dataset on the 7B LLaMA2 model. The ablation study revealed that removing general datasets from the mix detrimentally impacted the model's generative abilities, resulting in decreased summarization quality. We were also able to conclude that the medical datasets indeed improved the model's understanding of the medical context. Consequently, we decided to use equal proportions of these datasets during training to maintain the model's generative abilities while improving its understanding of medical contexts.
|
| 152 |
+
|
| 153 |
+
# 3.3 Medical Note Generation
|
| 154 |
+
|
| 155 |
+
Evaluation Dataset and Setup. We compared the HEAL model to several general and medical SOTA models, including the high-end GPT-4, GPT-3.5, and Med-PaLM 2 (Singhal et al., 2023) and other similarly sized open-source medical LLMs, as shown in Table 3. We meticulously fine-tuned LLaMA2-Chat-13B (Meta, 2023) and the PMC-LLaMA-13B (Wu et al., 2023) on medical generative tasks of varying lengths, detailed in Section 3.1 using 10K instruction samples. Pretrained models like LLaMA2-13B (Meta, 2023), MedLLaMA (base model of PMC-LLaMA), and Meditron-7B (Chen et al., 2023b) were explanation-tuned on our proprietary dataset of 500K examples to enhance their instruction-following capabilities.
|
| 156 |
+
|
| 157 |
+
We also compared these models to human scribes from our production system (medical students who underwent internal scribe training and received monetary compensation for their services). All the models and scribes were evaluated on generating the Subjective and Plan sections of the SOAP medical note using 10 doctor-patient dialogue-style conversations averaging 12 minutes each.
|
| 158 |
+
|
| 159 |
+
Evaluation Metric. We leveraged human medical experts to evaluate these models. They developed a rubric note for each transcript, highlighting all essential medical information as separate medical entities. Every entity symbolized a significant sentence or phrase that a healthcare provider needed to approve the note. On average, our experts identified 35 medical entities per transcript. We evaluated the generated notes on three key parameters: Completeness, Correctness, and Conciseness as outlined in (Van Veen et al., 2023) using the following metrics: 1) Missed Information refers to the entities omitted in the test note relative to the rubric note. This metric reflects the test note's completeness.
|
| 160 |
+
2) Incorrect Information implies the entities inaccurately captured by the test note. This metric is critical in healthcare where information accuracy is essential, as misinformation can erode trust in AI. 3) Irrelevant information refers to extraneous elements in the test note not linked to the rubric note. As lengthy medical notes require more time for review, it's crucial to reduce irrelevant information.
|
| 161 |
+
Results and Analysis. Table 3 compares the performance of our HEAL model, other models, and human scribes. Notably, HEAL surpasses all other models in the Missed Information metric, indicating a superior ability to identify and summarize critical medical information. We attribute this improved performance to our continued pretraining approach using complex medical instructions. We also observed some inaccuracies due to ASR (Automatic Speech Recognition) errors, yet both our model and GPT-4 excelled at correcting these
|
| 162 |
+
|
| 163 |
+
<table><tr><td>Dataset</td><td>LLaMA2 13B</td><td>PMC- LLaMA 13B</td><td>GPT-4 (5-shot)</td><td>Med- PaLM 2 (best)</td><td>HEAL 13B</td></tr><tr><td>PubMedQA</td><td>76.40</td><td>77.90</td><td>75.2</td><td>81.8</td><td>78.4</td></tr><tr><td>MedQA</td><td>45.48</td><td>56.36</td><td>81.4</td><td>86.5</td><td>47.2</td></tr></table>
|
| 164 |
+
|
| 165 |
+
Table 4: Accuracy (%) on PubMedQA and MedQA.
|
| 166 |
+
|
| 167 |
+
mistakes. Human scribes and Med-PaLM 2 created concise notes but missed vital medical details. Other models, such as GPT 3.5, MedLLaMa, and LLaMA2-chat, struggled to grasp real-world conversation nuances, as shown by their high Incorrect and Missed Information scores. Overall, our model shows exceptional performance in all metrics of the task, outperforming both human scribes and other fine-tuned models.
|
| 168 |
+
|
| 169 |
+
In our detailed quality evaluation, we found that a human scribe takes about 1.67 times longer than the audio recording to create a medical note. However, AI models can generate the same note almost instantly, demonstrating the efficiency and time-saving capabilities of AI in medical transcription.
|
| 170 |
+
|
| 171 |
+
# 3.4 Public Benchmark
|
| 172 |
+
|
| 173 |
+
Although HEAL is specifically designed for medical note summarization, we also tested its performance against other LLMs on two popular medical benchmarks to evaluate its efficiency in other medical tasks.
|
| 174 |
+
|
| 175 |
+
PubMedQA (Jin et al., 2019) A biomedical QA task to answer research questions with yes/no/maybe using the corresponding PubMed paper snippets. MedQA (Jin et al., 2021) Multi-choice questions extracted from US Medical License Exams.
|
| 176 |
+
|
| 177 |
+
In PubMedQA, Med-PaLM 2 with the best prompting strategy (Singhal et al., 2023) took advantage of its huge size and further tuning on PubMedQA data to achieve the highest score. As shown in Table 4, HEAL achieved $78.4\%$ accuracy after tuning, which surpasses GPT-4's performance (Nori et al., 2023), fine-tuned LLaMA2 and even PMC-LLaMA (Wu et al., 2023) which is further tuned on 75B PubMed data. Our improved performance can be attributed to our proprietary medical instruction data on conversational data which focuses more on medical understanding.
|
| 178 |
+
|
| 179 |
+
In MedQA, we attained a $47.2\%$ accuracy rate, surpassing the LLaMA2 13B model yet falling short of PMC-LLaMA. MedQA focuses on medical reasoning, requiring the model to recall medical knowledge and derive diagnoses or solutions from specified problems. Larger models like GPT-4, Med-PaLM 2, or those trained with vast amounts of
|
| 180 |
+
|
| 181 |
+
data hold an inherent advantage in this task. HEAL, which is geared towards interpreting medical conversations, does not align with this task, yielding suboptimal performance on this dataset.
|
| 182 |
+
|
| 183 |
+
# 4 Conclusion
|
| 184 |
+
|
| 185 |
+
This paper presents our work of developing a medical LLM capable of comprehending and summarizing medical conversation. As a result, this is the first model, with significantly fewer parameters, to outperform humans, existing medical LLMs including Med-PaLM 2, PMC-LLaMA and perform on par with GPT-4. Our evaluation shows that even small-scale continued pretraining of smaller LLMs can show impressive gains. We believe that scaling up our training can further improve results. Our work presents a promising development in healthcare documentation and other medical areas.
|
| 186 |
+
|
| 187 |
+
# 5 Related Work
|
| 188 |
+
|
| 189 |
+
Medical LLMs. Various medical LLMs such as MedGPT (Kraljevic et al., 2021), and Med-PaLM 2 (Singhal et al., 2023) show how training on various medical datasets, improves model's performance on medical knowledge understanding tasks. MEDITRON-70B (Chen et al., 2023b), the state-of-the-art open-source LLM and PMC-LLaMA (Wu et al., 2023) demonstrates the effectiveness of task-specific fine-tuning and instruction tuning.
|
| 190 |
+
|
| 191 |
+
Domain adaption LLM. As demonstrated by (Gururangan et al., 2020), (Beltagy et al., 2019), continued pretraining on unlabeled, domain-specific data boosts model performance on domain tasks, providing a practical solution when resources for scratch domain-adaptive pretraining are limited.
|
| 192 |
+
|
| 193 |
+
Medical Note Generation. Prior work by (Zhang et al., 2021), (Van Veen et al., 2023) demonstrated the feasibility of using Language Models to generate medical summaries from dialogues. However, they primarily aimed at producing partial notes or semi-automated methods requiring human involvement, rather than comprehensive, provider-ready reports.
|
| 194 |
+
|
| 195 |
+
Explanation tuning. Orca (Mukherjee et al., 2023; Mitra et al., 2023) models showcased that smaller Language Models capable of sound reasoning can efficiently perform complex tasks. They were trained by explanation tuning a LLaMA2 13B model (Touvron et al., 2023) using bigger models like GPT4 as a teacher.
|
| 196 |
+
|
| 197 |
+
# 6 Ethical Considerations
|
| 198 |
+
|
| 199 |
+
All the data processing and experiments were done in HIPAA-compliant environment. We deidentified clinical data to remove any PHI information as per our data compliance agreement. HEAL is only used for internal medical tasks like summarization, transcription based Q&A, and note review. All prompts are audited to prevent unintentional usage.
|
| 200 |
+
|
| 201 |
+
# 7 Limitations
|
| 202 |
+
|
| 203 |
+
Our design focuses on contextual comprehension and summarization of transcripts, and can be further improved on MedQA or similar benchmarks with training on more medical data. Future projects could explore utilizing more sophisticated base models, curating higher quality data with a balanced mix of medical knowledge and reasoning content, and scaling up the experiment.
|
| 204 |
+
|
| 205 |
+
# References
|
| 206 |
+
|
| 207 |
+
Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. Scibert: A pretrained language model for scientific text. arXiv preprint arXiv:1903.10676.
|
| 208 |
+
Shouyuan Chen, Sherman Wong, Liangjian Chen, and Yuandong Tian. 2023a. Extending context window of large language models via positional interpolation. arXiv preprint arXiv:2306.15595.
|
| 209 |
+
Shu Chen, Zeqian Ju, Xiangyu Dong, Hongchao Fang, Sicheng Wang, Yue Yang, Jiaqi Zeng, Ruisi Zhang, Ruoyu Zhang, Meng Zhou, Penghui Zhu, and Pengtao Xie. 2020. Meddialog: a large-scale medical dialogue dataset. arXiv preprint arXiv:2004.03329.
|
| 210 |
+
Zeming Chen, Alejandro Hernández Cano, Angelika Romanou, Antoine Bonnet, Kyle Matoba, Francesco Salvi, Matteo Pagliardini, Simin Fan, Andreas Köpf, Amirkeivan Mohtashami, et al. 2023b. Mediterran70b: Scaling medical pretraining for large language models. arXiv preprint arXiv:2311.16079.
|
| 211 |
+
Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. 2020. The pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027.
|
| 212 |
+
Suchin Gururangan, Ana Marasovic, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A. Smith. 2020. Don't stop pretraining: Adapt language models to domains and tasks. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8342-8360, Online. Association for Computational Linguistics.
|
| 213 |
+
|
| 214 |
+
Di Jin, Eileen Pan, Nassim Oufattole, Wei-Hung Weng, Hanyi Fang, and Peter Szolovits. 2021. What disease does this patient have? a large-scale open domain question answering dataset from medical exams. Applied Sciences, 11(14):6421.
|
| 215 |
+
Qiao Jin, Bhuwan Dhingra, Zhengping Liu, William Cohen, and Xinghua Lu. 2019. Pubmedqa: A dataset for biomedical research question answering. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2567-2577.
|
| 216 |
+
A Johnson, L Bulgarelli, T Pollard, S Horng, LA Celi, and R Mark. 2020. Mimic-iv (version 1.0).
|
| 217 |
+
Zeljko Kraljevic, Anthony Shek, Daniel Bean, Rebecca Bendayan, James Teo, and Richard Dobson. 2021. Medgpt: Medical concept prediction from clinical narratives. arXiv preprint arXiv:2107.03134.
|
| 218 |
+
Katherine Lee, Daphne Ippolito, Andrew Nystrom, Chiyuan Zhang, Douglas Eck, Chris Callison-Burch, and Nicholas Carlini. 2021. Deduplicating training data makes language models better. arXiv preprint arXiv:2107.06499.
|
| 219 |
+
Meta. 2023. Llama 2: Open foundation and fine-tuned chat models. ArXiv, abs/2307.09288.
|
| 220 |
+
Arindam Mitra, Luciano Del Corro, Shweti Mahajan, Andres Codas, Clarisse Simoes, Sahaj Agarwal, Xuxi Chen, Anastasia Razdaibiedina, Erik Jones, Kriti Aggarwal, Hamid Palangi, Guoqing Zheng, Corby Rosset, Hamed Khanpour, and Ahmed Awadallah. 2023. Orca 2: Teaching small language models how to reason.
|
| 221 |
+
Sébastien Bubeck Mojan Javaheripi. 2023. Phi-2: The surprising power of small language models.
|
| 222 |
+
Subhabrata Mukherjee, Arindam Mitra, Ganesh Jawahar, Sahaj Agarwal, Hamid Palangi, and Ahmed Awadallah. 2023. Orca: Progressive learning from complex explanation traces of gpt-4.
|
| 223 |
+
Harsha Nori, Nicholas King, Scott Mayer McKinney, Dean Carignan, and Eric Horvitz. 2023. Capabilities of gpt-4 on medical challenge problems. arXiv preprint arXiv:2303.13375.
|
| 224 |
+
OpenAI. 2023. Gpt-4 technical report. ArXiv, abs/2303.08774.
|
| 225 |
+
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2019. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv e-prints.
|
| 226 |
+
Taori Rohan, Gulrajani Ishaan, Zhang Tianyi, Dubois Yann, Li Xuechen, Guestrin Carlos, Liang Percy, and B. Hashimoto Tatsunori. 2023. Alpaca: A strong, replicable instruction-following model.
|
| 227 |
+
|
| 228 |
+
Karan Singhal, Tao Tu, Juraj Gottweis, Rory Sayres, Ellery Wulczyn, Le Hou, Kevin Clark, Stephen Pfohl, Heather Cole-Lewis, Darlene Neal, et al. 2023. Towards expert-level medical question answering with large language models. arXiv preprint arXiv:2305.09617.
|
| 229 |
+
Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.
|
| 230 |
+
Dave Van Veen, Cara Van Uden, Louis Blankemeier, Jean-Benoit Delbrouck, Asad Aali, Christian Bluethgen, Anuj Pareek, Malgorzata Polacin, William Collins, Neera Ahuja, et al. 2023. Clinical text summarization: adapting large language models can outperform human experts. arXiv preprint arXiv:2309.07430.
|
| 231 |
+
Chaoyi Wu, Weixiong Lin, Xiaoman Zhang, Ya Zhang, Yanfeng Wang, and Weidi Xie. 2023. Pmc-llama: Towards building open-source language models for medicine. arXiv preprint arXiv:2305.10415.
|
| 232 |
+
Longxiang Zhang, Renato Negrinho, Arindam Ghosh, Vasudevan Jagannathan, Hamid Reza Hassanzadeh, Thomas Schaaf, and Matthew R Gormley. 2021. Leveraging pretrained models for automatic summarization of doctor-patient conversations. arXiv preprint arXiv:2109.12174.
|
| 233 |
+
Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. 2023. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277.
|
acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:348ff61ff75fdb7bc97f587b808f576e815abb775712ed7f4f5d036407b92674
|
| 3 |
+
size 154281
|
acontinuedpretrainedllmapproachforautomaticmedicalnotegeneration/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5f4531eda6284469777a40dad61f61a55062ce7ce5befd2551ffd492a553e505
|
| 3 |
+
size 214493
|
adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/bdcf793f-f222-45fa-8bf3-c81a767b7987_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3298434e34d4501676c5ce5cc8ddd127974b88d5689cb77c49d0f4096a3d1506
|
| 3 |
+
size 32745
|
adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/bdcf793f-f222-45fa-8bf3-c81a767b7987_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f81a902b738046528d38491b7f0188be01b813cf9cf282d0e9e2079be53fa67c
|
| 3 |
+
size 38944
|
adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/bdcf793f-f222-45fa-8bf3-c81a767b7987_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fac07b5924440cfd0199f8435ad8a2826a18efae6e43f58636c6a93bd36e6a8d
|
| 3 |
+
size 319966
|
adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/full.md
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A diverse Multilingual News Headlines Dataset from around the World
|
| 2 |
+
|
| 3 |
+
Felix Leeb and Bernhard Scholkopf
|
| 4 |
+
ax Planck Institute for Intelligent Systems
|
| 5 |
+
Tübingen, Germany
|
| 6 |
+
fleeb@tue.mpg.de
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
BABEL BRIEFINGS is a novel dataset featuring 4.7 million news headlines from August 2020 to November 2021, across 30 languages and 54 locations worldwide with English translations of all articles included. Designed for natural language processing and media studies, it serves as a high-quality dataset for training or evaluating language models as well as offering a simple, accessible collection of articles, for example, to analyze global news coverage and cultural narratives. As a simple demonstration of the analyses facilitated by this dataset, we use a basic procedure using a TF-IDF weighted similarity metric to group articles into clusters about the same event. We then visualize the event signatures of the event showing articles of which languages appear over time, revealing intuitive features based on the proximity of the event and unexpectedness of the event. The dataset is available on Kaggle and HuggingFace with accompanying GitHub code.
|
| 11 |
+
|
| 12 |
+
# 1 Introduction
|
| 13 |
+
|
| 14 |
+
Analyzing news headlines can be an invaluable source of data for a wide variety of natural language processing tasks such as bias detection (Gangula et al., 2019), topic classification (Rana et al., 2014), or event tracking (Qian et al., 2019). Furthermore, news headlines can provide insights for sociologists and political scientists about how people think about and discuss current events.
|
| 15 |
+
|
| 16 |
+
The coverage and discussion of current events varies significantly across different media outlets worldwide, however, these distinctions may be difficult to integrate in data mining or machine learning systems due to the language barrier. There are relatively few datasets offering extensive, diverse, and multilingual content (Kreutzer et al., 2022). This is especially problematic for natural language processing tasks, which have been shown to exhibit language biases (Gallegos et al., 2023).
|
| 17 |
+
|
| 18 |
+
We seek to address these limitations with a new dataset called BABEL BRIEFINGS, which is an accessible dataset representing a wide variety of languages and cultures. BABEL BRIEFINGS provides daily headlines of articles from across the world, originally written in one of 30 languages from 54 locations around the world published between August 2020 and November 2021, for a total of about 4.7 million distinct articles. Consequently, our dataset offers a rich source for analyses of world events, cultural narratives, media framing, and more.
|
| 19 |
+
|
| 20 |
+
We make this dataset available on Kaggle and HuggingFace for easy and open access under the CC BY-NC-SA 4.0 license<sup>1</sup>, as well as providing all code used to collect and process the data on GitHub.
|
| 21 |
+
|
| 22 |
+
# 1.1 Related Work
|
| 23 |
+
|
| 24 |
+
Many comparable datasets focus either on depth, i.e., tracking a small number (or even a single) outlet over some time, or breadth for comparative studies of specific events. Meanwhile, our dataset covers a broad set of outlets in different languages over more than one year for both comparative and longitudinal studies.
|
| 25 |
+
|
| 26 |
+
Some of the related datasets publicly available include the News Category Dataset (Misra, 2022), BBC News Archive (Greene and Cunningham, 2006), AG News (Zhang et al., 2015), CC News (Hamborg et al., 2017). However, all of these datasets are mostly or entirely limited to English headlines and/or outlets. Meanwhile, datasets used in projects like Mazumder et al. (2014) or Leskovec et al. (2009) focus on collecting many sources in over a relatively short timespan (see the appendix for a comparison table).
|
| 27 |
+
|
| 28 |
+
A more global source of news events is offered
|
| 29 |
+
|
| 30 |
+
by the GDELT project (Leetaru and Schrodt, 2013), which collects reports from around the world in a variety of languages. However, the GDELT dataset focuses on tracking events, rather than the news coverage thereof, making it more suitable for event forecasting rather than media coverage or training language models.
|
| 31 |
+
|
| 32 |
+
# 2 Dataset
|
| 33 |
+
|
| 34 |
+
# 2.1 Collection
|
| 35 |
+
|
| 36 |
+
The dataset was collected in three distinct steps. First, using the News API (News-API, 2023), we gathered the available headlines once a day for each combination of all 54 locations and each of the seven possible categories. Each API call returned a list of about 30-70 article headlines for a total of about 20k instances per day, usually featuring duplicate headlines across locations and categories.
|
| 37 |
+
|
| 38 |
+
Next, in a pre-processing step, duplicate occurrences of the same article were merged and listed in a list of instances (see below). The author names are anonymized (replacing the names with author#[ID] where the ID is identical for all articles with matching authors, but distinct otherwise).
|
| 39 |
+
|
| 40 |
+
Lastly, the final step involved the translation of non-English articles. Using Google Translate (Google, 2023), all articles not originally in English were translated for convenience. Notably, News API appears to only collect articles of a single language for each of the locations, making translation straightforward. Unfortunately, some of the language selections by News API do not seem to fully reflect the local news in a given location (for example Malaysia's articles are all in English), although the headline subjects appear curated for the assigned location.
|
| 41 |
+
|
| 42 |
+
# 2.2 Structure
|
| 43 |
+
|
| 44 |
+
BABEL BRIEFINGS is structured as a collection of 54 JSON files, one for each location. Each file contains a list of headlines of articles that first occurred in the corresponding location, each of which is represented as a JSON object with the following properties: title, description, content, url, urlToImage, publishedAt, author, source, instances, and language. For articles that are originally in a language other than English, the translated title, description, and content are also included as en-title, en-description, and en-content respectively.
|
| 45 |
+
|
| 46 |
+
Since each article may appear in multiple locations, categories, or over multiple days, the instances property lists the properties location and category for each instance when the article was collected with timestamp collectedAt. The source property is an object containing the id and name of the news source. The language property is the original language (in ISO 639-1 format) of the article, which is assigned automatically based on the location. A single category is assigned to each instance automatically by News API, and are one of: business, entertainment, general, health, science, sports, or technology.
|
| 47 |
+
|
| 48 |
+
Notably, the content (and en-content, when present) properties contain nonsense data for articles in languages that use a non-latin alphabet, such as Chinese or Arabic. This is due to a flaw in the News API processing. For details check out the dataset'sREADME.
|
| 49 |
+
|
| 50 |
+
# 2.3 Statistics
|
| 51 |
+
|
| 52 |
+
In total, we collected a total of 7,419,089 instances of 4,719,199 distinct articles between 8 August 2020 and 29 November 2021, with a breakdown by language in table 1. More detailed statistics are available in the datasetREADME.
|
| 53 |
+
|
| 54 |
+
# 3 Analysis
|
| 55 |
+
|
| 56 |
+
A particularly interesting type of analysis enabled by this dataset is the longitudinal comparison of how the same news event is reported in different languages and around the world. To illustrate, let's consider a basic example. We begin by clustering individual articles that discuss the same event. Then, we analyze the distribution and frequency of articles from different countries over time, focusing on that specific event.
|
| 57 |
+
|
| 58 |
+
To cluster articles that are about the same event, we begin by extracting a bag of words from the article's (English) title where each word is lemmatized as well as removing punctuation and common stopwords (such as "the" or "a"). We use Term Frequency-Inverse Document Frequency (TF-IDF) (Salton et al., 1975) to define the relevance $R_{d}$ of each token relative to the other tokens of the articles that occurred on the same day $d$ .
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
R _ {d} (w) = \frac {\operatorname {t f} (w , d)}{\sum_ {d ^ {\prime}} \operatorname {t f} (w , d ^ {\prime})} \cdot \log \frac {N}{\operatorname {d f} (w)} \tag {1}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
where $\operatorname{tf}(w, d)$ is the number of times the word $w$ occurs in the day $d$ , $\operatorname{df}(w)$ is the number of days
|
| 65 |
+
|
| 66 |
+

|
| 67 |
+
Figure 1: Streamplot showing how many articles appear for some of the most popular events in the dataset, when clustering articles by their titles, with most common tokens for each cluster shown in the legend. Note the qualitative similarity between the news coverage over time of these events and the memes of Leskovec et al. (2009), demonstrating the potential of this dataset for studying the evolution of major news coverage over time across the world.
|
| 68 |
+
|
| 69 |
+
<table><tr><td>Language</td><td>Articles</td></tr><tr><td>English</td><td>1,128,233</td></tr><tr><td>Spanish</td><td>455,952</td></tr><tr><td>French</td><td>288,328</td></tr><tr><td>Chinese</td><td>270,887</td></tr><tr><td>German</td><td>259,718</td></tr><tr><td>Portuguese</td><td>243,829</td></tr><tr><td>Arabic</td><td>178,854</td></tr><tr><td>Indonesian</td><td>131,252</td></tr><tr><td>Italian</td><td>129,005</td></tr><tr><td>Turkish</td><td>122,724</td></tr><tr><td>Greek</td><td>119,940</td></tr><tr><td>Japanese</td><td>118,475</td></tr><tr><td>Polish</td><td>116,904</td></tr><tr><td>Russian</td><td>113,395</td></tr><tr><td>Dutch</td><td>104,031</td></tr><tr><td>Thai</td><td>90,708</td></tr><tr><td>Swedish</td><td>86,838</td></tr><tr><td>Korean</td><td>83,090</td></tr><tr><td>Serbian</td><td>80,040</td></tr><tr><td>Hungarian</td><td>73,509</td></tr><tr><td>Czech</td><td>70,647</td></tr><tr><td>Hebrew</td><td>67,794</td></tr><tr><td>Bulgarian</td><td>67,223</td></tr><tr><td>Ukrainian</td><td>65,610</td></tr><tr><td>Romanian</td><td>54,601</td></tr><tr><td>Norwegian</td><td>46,804</td></tr><tr><td>Slovak</td><td>43,057</td></tr><tr><td>Latvian</td><td>40,006</td></tr><tr><td>Lithuanian</td><td>34,719</td></tr><tr><td>Slovenian</td><td>33,026</td></tr></table>
|
| 70 |
+
|
| 71 |
+
Table 1: Number of articles by language.
|
| 72 |
+
|
| 73 |
+
in which the word $w$ occurs, and $N$ is the total number of days in the dataset.
|
| 74 |
+
|
| 75 |
+
Using the TF-IDF scores for each word, we define a relevance score $\hat{R}_d(x) = \sum_{w\in x}R_d(w)$ for an article $x$ that occurs first on day $d$ as the sum of the TF-IDF scores of the words in its title. Furthermore, we define a similarity criterion between two articles as the ratio between the sum of all words that occur in both articles weighted by the relevance of each word and the largest relevance score between the two articles.
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
\operatorname {s i m} (x, x ^ {\prime}) = \frac {\sum_ {w \in x \cap x ^ {\prime}} R _ {d} (w)}{\max (\hat {R} _ {d} (x) , \hat {R} _ {d} (x ^ {\prime}))} \qquad (2)
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
If this ratio is greater than some threshold ( $= 0.25$ in our experiments), we consider the two articles to be in the same group. This means that if the candidate articles have significant overlap between words weighted by how specific those words are to the day. For the top ten articles with the highest relevance scores every day, we identify all articles in the dataset which, based on our similarity criterion are in the same group to form an event cluster.
|
| 82 |
+
|
| 83 |
+
Figure 1 presents clusters of such articles, identifying the top TF-IDF scores where the clusters are largest—that is, events with the most articles published about them.
|
| 84 |
+
|
| 85 |
+
Next, we take a closer look at a few of the largest clusters in figures 2-5. We visualize the event signatures, which show how the coverage of the same event varies across different languages by how many articles are published every day. For each of the four examples, the plot shows a streamplot breaking down how many articles were published
|
| 86 |
+
|
| 87 |
+

|
| 88 |
+
Figure 2: Articles reporting on riots in Washington DC on 6 January 2021. Note how the event is reported in many different languages, but the majority of articles are in English. Additionally, there are several subsequent smaller spikes corresponding to related events, such as the beginning of the formal investigation into the riots.
|
| 89 |
+
|
| 90 |
+

|
| 91 |
+
Figure 3: Articles reporting on Diego Maradona's death on 25 November 2020 (and his declining health in the weeks before). Note how in after a few weeks only Spanish articles about the topic continue to appear, underscoring the relative importance of the event in Spanish-speaking countries.
|
| 92 |
+
|
| 93 |
+
for each of the top ten most common languages for the event as well as the most frequent tokens occurring that cluster in the top right. One interesting result from this precursory analysis is a distinct qualitative difference in the event signatures of "expected" events (such as in figure 4) compared to "unexpected" events (such as in figures 2 and 5). For expected events, there is a clear lead-up to the event, with a peak on the day of the event, and a sharp drop-off afterwards. Meanwhile, unexpected events show a sudden spike in coverage, followed by a gradual decline over time. This provides a demonstration of the types of analyses that can be conducted with this dataset, offering insights into the diversity and scope of global news coverage.
|
| 94 |
+
|
| 95 |
+
# 4 Conclusion
|
| 96 |
+
|
| 97 |
+
In this paper, we introduce a dataset of news headlines from around the world called BABEL BRIEFINGS. The dataset can readily be used for a wide variety of both supervised and unsupervised natural language processing tasks. For example, the included category, location, and language labels can directly be used for article categorization, location classification, or language detection. However, the dataset also enables more nuanced analyses of
|
| 98 |
+
|
| 99 |
+

|
| 100 |
+
Figure 4: Articles reporting on the Super Bowl on 7 February 2021. Note how unlike unexpected events (such as in figure 2), there is a considerable lead up to the event before the peak, showing the media's anticipation of the event.
|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
Figure 5: Articles reporting on a crisis between Israel and Gaza on 10 May 2021. Note the prolonged spike for the duration of the crisis, as well as the significant number of articles in Arabic and Hebrew.
|
| 104 |
+
|
| 105 |
+
global news coverage, such as tracking the evolution of events over time, comparing the coverage of events across different countries and languages, or identifying cultural biases in reporting.
|
| 106 |
+
|
| 107 |
+
Despite the breadth across languages and time, our dataset is limited to the headlines and short descriptions of news articles. However, URLs to the full articles are included, and since many outlets are incentivized to make their headlines as informative as possible, headlines alone are already a rich source of information for many purposes. Additionally, the dataset is limited to 54 locations, which is a significant improvement over existing datasets which are often limited to a single country or outlet. There are some minor issues with the News API, for example that for each location only a single language is represented. We aim to mitigate this issue by collecting headlines directly from the RSS feeds of individual outlets from around the world. However, this may come at the cost of consistency across sources around the world.
|
| 108 |
+
|
| 109 |
+
In summary, our dataset is a powerful tool for studying the nuances of global news coverage when breaking beyond the language barrier. It provides a simple yet rich foundation for capturing cultural differences in news reporting, offering invaluable data and insights for researchers in the fields of natural language processing, as well as social sciences like media studies or international relations.
|
| 110 |
+
|
| 111 |
+
# References
|
| 112 |
+
|
| 113 |
+
Isabel O Gallegos, Ryan A Rossi, Joe Barrow, Md Mehrab Tanjim, Sungchul Kim, Franck Dernoncourt, Tong Yu, Ruiyi Zhang, and Nesreen K Ahmed. 2023. Bias and fairness in large language models: A survey. arXiv preprint arXiv:2309.00770.
|
| 114 |
+
Rama Rohit Reddy Gangula, Suma Reddy Duggenpudi, and Radhika Mamidi. 2019. Detecting political bias in news articles using headline attention. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 77-84, Florence, Italy. Association for Computational Linguistics.
|
| 115 |
+
Google. 2023. Google translate. https://translate.google.com. Accessed: 30 October 2023.
|
| 116 |
+
Derek Greene and Pádraig Cunningham. 2006. Practical solutions to the problem of diagonal dominance in kernel document clustering. In Proceedings of the 23rd international conference on Machine learning, pages 377-384.
|
| 117 |
+
Felix Hamborg, Norman Meuschke, Corinna Breitinger, and Bela Gipp. 2017. news-please: A generic news crawler and extractor. In Proceedings of the 15th International Symposium of Information Science, pages 218-223.
|
| 118 |
+
Julia Kreutzer, Isaac Caswell, Lisa Wang, Ahsan Wahab, Daan van Esch, Nisanbayar Ulzii-Orshikh, Allahsera Tapo, Nishant Subramani, Artem Sokolov, Claytone Sikasote, et al. 2022. Quality at a glance: An audit of web-crawled multilingual datasets. Transactions of the Association for Computational Linguistics, 10:50-72.
|
| 119 |
+
Kalev Leetaru and Philip A Schrodt. 2013. Gdelt: Global data on events, location, and tone, 1979-2012. In ISA annual convention, volume 2, pages 1-49. Citeseer.
|
| 120 |
+
Jure Leskovec, Lars Backstrom, and Jon Kleinberg. 2009. Meme-tracking and the dynamics of the news cycle. In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining, pages 497-506.
|
| 121 |
+
Sahisnu Mazumder, Bazir Bishnoi, and Dhaval Patel. 2014. News headlines: What they can tell us? In Proceedings of the 6th IBM Collaborative Academia Research Exchange Conference (I-CARE) on I-CARE 2014, pages 1-4.
|
| 122 |
+
Rishabh Misra. 2022. News category dataset. arXiv preprint arXiv:2209.11429.
|
| 123 |
+
News-API. 2023. News api. https://newsapi.org/. Accessed: 8 August 2020.
|
| 124 |
+
Yu Qian, Xiongwen Deng, Qiongwei Ye, Baojun Ma, and Hua Yuan. 2019. On detecting business events from the headlines and leads of massive online news articles. Information Processing & Management, 56(6):102086.
|
| 125 |
+
|
| 126 |
+
Mazhar Iqbal Rana, Shehzad Khalid, and Muhammad Usman Akbar. 2014. News classification based on their headlines: A review. In 17th IEEE International Multi Topic Conference 2014, pages 211-216. IEEE.
|
| 127 |
+
Gerard Salton, A Wong, and C S Yang. 1975. A vector space model for automatic indexing. Communications of the ACM, 18(11):613-620.
|
| 128 |
+
Xiang Zhang, Junbo Jake Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text classification. In NIPS.
|
| 129 |
+
|
| 130 |
+
# A News Headline Dataset Comparison
|
| 131 |
+
|
| 132 |
+
<table><tr><td>Dataset Name</td><td>Size</td><td>Sources</td><td>Language/s</td><td>Time Span</td></tr><tr><td>BABEL BRIEFINGS (ours)</td><td>4.7M</td><td>Worldwide</td><td>30 languages</td><td>Aug 2020 - Nov 2021</td></tr><tr><td>News Category Dataset (Misra, 2022)</td><td>210k</td><td>HuffPost</td><td>English only</td><td>2012-2022</td></tr><tr><td>BBC News Archive (Greene and Cunningham, 2006)</td><td>2225</td><td>BBC</td><td>English only</td><td>2004-2005</td></tr><tr><td>AG News (Zhang et al., 2015)</td><td>128k</td><td>>2000</td><td>English only</td><td>2004</td></tr><tr><td>CC News (Hamborg et al., 2017)</td><td>708k</td><td>Worldwide</td><td>English only</td><td>Jan 2017 - Dec 2019</td></tr><tr><td>Mazumder et al. (2014) Dataset</td><td>1.5M</td><td>87 Indian sources</td><td>English only</td><td>Jan - Jun 2014</td></tr><tr><td>Leskovec et al. (2009) Dataset</td><td>90M</td><td>US news + blog sites</td><td>English only</td><td>Aug - Oct 2008</td></tr><tr><td>GDELT Project (Leetaru and Schrodt, 2013)</td><td>>326M</td><td>Worldwide</td><td>>100 Languages</td><td>since 1979</td></tr></table>
|
| 133 |
+
|
| 134 |
+
Table 2: Comparison of various existing datasets similar to BABEL BRIEFINGS
|
adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4cb450b30ad3a803bb8604a5764e6714576110c55cb66a4e46ed7e5cb68e3649
|
| 3 |
+
size 306454
|
adiversemultilingualnewsheadlinesdatasetfromaroundtheworld/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:613edc48d37fa4f0861f03bdf759d355d75b3832b2e3b22fb557a977af7c9d3c
|
| 3 |
+
size 134393
|
advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/4dc50125-c0f0-4b0e-bd23-e245af616f1c_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:30e1e65bc1f24f58ba55f86df6c1e28dbdb324da81574f600e929d585b315761
|
| 3 |
+
size 60139
|
advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/4dc50125-c0f0-4b0e-bd23-e245af616f1c_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:acd5b4b44be765823af43dbd650c6fe2a0037693549fbd59abbac01ffae1f7ad
|
| 3 |
+
size 71582
|
advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/4dc50125-c0f0-4b0e-bd23-e245af616f1c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1989ed1eb6f66677c19f56dcbaf20bcb279829fcc3859dc18c04488866a10dce
|
| 3 |
+
size 271177
|
advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/full.md
ADDED
|
@@ -0,0 +1,365 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Advancing Regular Language Reasoning in Linear Recurrent Neural Networks
|
| 2 |
+
|
| 3 |
+
Ting-Han Fan*
|
| 4 |
+
|
| 5 |
+
Independent Researcher
|
| 6 |
+
|
| 7 |
+
tinghanf@alumni.princeton.edu
|
| 8 |
+
|
| 9 |
+
# Ta-Chung Chi*
|
| 10 |
+
|
| 11 |
+
Carnegie Mellon University
|
| 12 |
+
|
| 13 |
+
tachungc@andrew.cmu.edu
|
| 14 |
+
|
| 15 |
+
# Alexander I. Rudnicky
|
| 16 |
+
|
| 17 |
+
Carnegie Mellon University
|
| 18 |
+
|
| 19 |
+
air@cs.cmu.edu
|
| 20 |
+
|
| 21 |
+
# Abstract
|
| 22 |
+
|
| 23 |
+
In recent studies, linear recurrent neural networks (LRNNs) have achieved Transformer-level performance in natural language and long-range modeling, while offering rapid parallel training and constant inference cost. With the resurgence of interest in LRNNs, we study whether they can learn the hidden rules in training sequences, such as the grammatical structures of regular language. We theoretically analyze some existing LRNNs and discover their limitations in modeling regular language. Motivated by this analysis, we propose a new LRNN equipped with a block-diagonal and input-dependent transition matrix. Experiments suggest that the proposed model is the only LRNN capable of performing length extrapolation on regular language tasks such as Sum, Even Pair, and Modular Arithmetic. The code is released at https://github.com/tinghanf/RegluarLRNN.
|
| 24 |
+
|
| 25 |
+
# 1 Introduction
|
| 26 |
+
|
| 27 |
+
There is a recent surge in the use of LRNNs (Gu et al., 2022; Peng et al., 2023; Orvieto et al., 2023) as alternatives to the de-facto Transformer architecture (Vaswani et al., 2017; Radford et al., 2019), which is ingrained in the field of natural language processing. LRNNs depart from the inter-timestep non-linearity design principle of classic RNNs (Elman, 1990; Jordan, 1997; Hochreiter and Schmidhuber, 1997; Cho et al., 2014), while at the same time: 1. achieving Transformer-level performance on the task of natural language modeling (Fu et al., 2023; Poli et al., 2023) and even better performance on synthetic long-range modeling tasks (Gu et al., 2022; Gupta et al., 2022; Orvieto et al., 2023; Hasani et al., 2023; Smith et al., 2023). 2. having the added benefits of fast parallelizable training (Martin and Cundy, 2018) and constant inference cost.
|
| 28 |
+
|
| 29 |
+
In spite of the remarkable empirical performance on natural language tasks, there has been no research on LRNNs' ability to model regular language. Regular language is a type of language that strictly follows certain rules like grammar. The successful modeling of a regular language is important since it implies a model's ability to learn the underlying rules of the data. For example, if the training data are arithmetic operations such as $1 + 2 \times 3$ , a model should learn the rules of $a + b$ , $a \times b$ , and that $\times$ has a higher priority than $+$ . Learning unambiguous rules behind the data is a critical step toward sequence modeling with regulated output.
|
| 30 |
+
|
| 31 |
+
In this paper, we aim to determine if existing LRNNs are competent to learn the correct grammar of regular language by testing their language transduction capability under the length extrapolation setting. Concretely, a model is trained only to predict the desired outputs on a set of short sequences of length $L_{tr}$ . It then needs to predict the correct outputs for longer testing sequences of length $L_{ex} \gg L_{tr}$ . Adopting the length extrapolation setting is essential to mitigate the risk of a model learning spurious shortcut solutions (Liu et al., 2023).
|
| 32 |
+
|
| 33 |
+
We theoretically show that some of the recently proposed LRNNs lack the expressiveness to encode certain arithmetic operations used in the tasks of regular language. In light of this observation, we propose a new LRNN equipped with a block-diagonal and input-dependent transition matrix, which enable the successful modeling of regular language. Experiments show that the proposed model is the only LRNN architecture that can extrapolate well on regular language tasks such as Sum, Even Pair, and Modular Arithmetic.
|
| 34 |
+
|
| 35 |
+
LRNNs in this work have the following general
|
| 36 |
+
|
| 37 |
+
formulation:
|
| 38 |
+
|
| 39 |
+
$$
|
| 40 |
+
x _ {k} = A _ {k} x _ {k - 1} + B u _ {k} \tag {1}
|
| 41 |
+
$$
|
| 42 |
+
|
| 43 |
+
$$
|
| 44 |
+
y _ {k} = h \left(x _ {k}\right).
|
| 45 |
+
$$
|
| 46 |
+
|
| 47 |
+
$A_{k}$ is a matrix that defines the recurrence relation. $A_{k}$ may or may not depend on the input $u_{k}$ . When it is input-independent, $A_{k}$ is reduced to $A$ ; otherwise, $A_{k} = g(u_{k})$ for some function $g$ . The first line encodes a linear recurrence in the state $x_{k}$ . The second line is an output $y_{k}$ that depends on $x_{k}$ . To control the expressiveness, the function $h$ may or may not be a linear operation. Since the existing LRNNs differ in their linear recurrence relations (Eq. (2), (3), and (4)), we mainly focus on analyzing these relations.
|
| 48 |
+
|
| 49 |
+
# 2 Limitations of Most LRNNs
|
| 50 |
+
|
| 51 |
+
In this section, we theoretically show that most LRNNs are unable to represent arithmetic operations. The analysis serves as a motivation to study input-dependent transition matrices with constraints on their column norm.
|
| 52 |
+
|
| 53 |
+
# 2.1 Input-independent LRNN
|
| 54 |
+
|
| 55 |
+
To begin with, state-space models (in discrete-time format) follow the standard LRNN recurrence relation:
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
x _ {k} = A x _ {k - 1} + B u _ {k} \tag {2}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
Eq. (2) encapsulates the recurrence relation of S4 (Gu et al., 2022; Gupta et al., 2022), S5 (Smith et al., 2023), and Linear Recurrent Unit (Orvieto et al., 2023). For example, $A$ represents the HiPPO matrix family (Gu et al., 2023) of S4 or a complex diagonal matrix of Linear Recurrent Unit. We show in Proposition 1 that such an input-independent matrix $A$ cannot represent subtraction.
|
| 62 |
+
|
| 63 |
+
Proposition 1. An input-independent LRNN is inconsistent in representing subtraction.
|
| 64 |
+
|
| 65 |
+
Proof. Denote $u_0, u_{-}$ , and $u_1$ as the input vector w.r.t. input characters 0, -, and 1. Denote $z$ as the initial state vector. The sequences "0-1" and "1-0" are represented as
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
x _ {0 - 1} = A ^ {3} z + A ^ {2} u _ {0} + A u _ {-} + u _ {1}, \quad \text {f o r}" 0 - 1 "
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
x _ {1 - 0} = A ^ {3} z + A ^ {2} u _ {1} + A u _ {-} + u _ {0}, \quad \text {f o r}" 1 - 0"
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
Because $0 - 1 \neq 1 - 0$ , by forcing $x_{0-1} \neq x_{1-0}$ we have
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
A ^ {2} u _ {0} + A u _ {-} + u _ {1} \neq A ^ {2} u _ {1} + A u _ {-} + u _ {0}.
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
On the other hand, let $x_{0-} = A^2 z + Au_0 + u_-$ be the vector representation for "0-". The sequences "0-0-1" and "0-1-0" are represented as
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
x _ {0 - 0 - 1} = A ^ {3} x _ {0 -} + A ^ {2} u _ {0} + A u _ {-} + u _ {1}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
x _ {0 - 1 - 0} = A ^ {3} x _ {0 -} + A ^ {2} u _ {1} + A u _ {-} + u _ {0}.
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
Notice $x_{0 - 0 - 1}$ is for "0-0-1" while $x_{0 - 1 - 0}$ for "0-1-0". Enforcing $x_{0 - 0 - 1} = x_{0 - 1 - 0}$ , we have
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
A ^ {2} u _ {0} + A u _ {-} + u _ {1} = A ^ {2} u _ {1} + A u _ {-} + u _ {0},
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
which is a contradiction.
|
| 98 |
+
|
| 99 |
+

|
| 100 |
+
|
| 101 |
+
The limitation described by Proposition 1 also applies to models adopting diagonal linear recurrence relations (Gupta et al., 2022; Smith et al., 2023; Orvieto et al., 2023). The failure to represent regular language will be corroborated by the inferior length extrapolation performance reported later in § 4.
|
| 102 |
+
|
| 103 |
+
# 3 Proposed Method
|
| 104 |
+
|
| 105 |
+
Now that input-independent LRNNs struggle with representing arithmetic operations, we review the paradigms known to model regular language, which is the type of formal language recognized by a Finite State Automata (FSA) (Chomsky, 1956). An FSA is described by a 5-tuple $(Q,\Sigma ,\delta ,q_0,F)$ . $Q$ and $\Sigma$ are non-empty sets of states and input symbols. $q_{0}\in Q$ is an initial state. $\delta :Q\times \Sigma \to Q$ is an input-dependent transition function; $F\subseteq Q$ is a set of final states.
|
| 106 |
+
|
| 107 |
+
We hypothesize that an LRNN could model regular language if it can simulate an FSA, whose transition function has the following two key properties:
|
| 108 |
+
|
| 109 |
+
- It is input-dependent.
|
| 110 |
+
- If represented in the matrix form, its column vectors all have unit norm (in $\| \cdot \|_1$ ).
|
| 111 |
+
|
| 112 |
+
# 3.1 Diagonal Input-dependent LRNN
|
| 113 |
+
|
| 114 |
+
Let us first examine the simplest input-dependent LRNN:
|
| 115 |
+
|
| 116 |
+
$$
|
| 117 |
+
x _ {k} = \operatorname {d i a g} \left(v _ {k}\right) x _ {k - 1} + B u _ {k}, \tag {3}
|
| 118 |
+
$$
|
| 119 |
+
|
| 120 |
+
where $v_{k} = f(u_{k})$ is a vector that depends on $u_{k}$ . Unfortunately, we show that a diagonal input-dependent LRNN still cannot represent subtraction in Proposition 2.
|
| 121 |
+
|
| 122 |
+
Proposition 2. A diagonal input-dependent LRNN is inconsistent in representing subtraction.
|
| 123 |
+
|
| 124 |
+
The proof is essentially a generalization of Proposition 1 and is deferred to Appendix A.1.
|
| 125 |
+
|
| 126 |
+
# 3.2 Improved Expressiveness: Liquid-S4
|
| 127 |
+
|
| 128 |
+
To improve the expressiveness of Eq. (3), we note that the recently proposed liquid-S4 (Hasani et al., 2023) model has the following recurrence relation:
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
\begin{array}{l} x _ {k} = A x _ {k - 1} + \left(B u _ {k}\right) \odot x _ {k - 1} + B u _ {k} \tag {4} \\ = (A + \operatorname {d i a g} (B u _ {k})) x _ {k - 1} + B u _ {k}, \\ \end{array}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
where $\odot$ denotes the Hadamard product and $\mathrm{diag}(w)$ constructs a diagonal matrix from $w$ . Although Liquid-S4 does not suffer from the limitation outlined in Proposition 2, our experiments in § 4.4 show that Liquid-S4 still cannot extrapolate on regular language tasks.
|
| 135 |
+
|
| 136 |
+
# 3.3 Block-diagonal Input-dependent LRNN
|
| 137 |
+
|
| 138 |
+
Finally, we decide to push the expressiveness of $A_{k}$ to the limit and make it fully input-dependent:
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
x _ {k} = A _ {k} x _ {k - 1} + B u _ {k}, \tag {5}
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+
where $A_{k} = g(u_{k})$ is a block diagonal matrix in practice for the sake of efficiency. $A_{k}$ depends on $u_{k}$ but not previous timesteps. $g$ is an arbitrary function with the output being the size of $A_{k}$ .
|
| 145 |
+
|
| 146 |
+
Eq. (5) is numerically unstable because the product $\prod_{i=1}^{k} A_i$ could produce large numbers. The solution is to impose additional constraints on the norm of $A_k$ :
|
| 147 |
+
|
| 148 |
+
$$
|
| 149 |
+
\begin{array}{l} A _ {k} = \operatorname {d i a g} \left(A _ {k} ^ {(1)}, \dots , A _ {k} ^ {(h)}\right) \in \mathbb {R} ^ {b h \times b h} \\ A _ {k} ^ {(i)} = \left[ v _ {k} ^ {(i, 1)} \quad \dots \quad v _ {k} ^ {(i, b)} \right] \in \mathbb {R} ^ {b \times b} \tag {6} \\ \| v _ {k} ^ {(i, j)} \| _ {p} \leq 1, \quad i \in [ 1,..., h ], \quad j \in [ 1,..., b ], \\ \end{array}
|
| 150 |
+
$$
|
| 151 |
+
|
| 152 |
+
where $\| \cdot \|_p$ denotes the vector p-norm and $v_k^{(i,j)}$ is a column vector that depends on $u_k$ . For any vector $v$ , we can derive another vector $v'$ to satisfy the p-norm constraint through $v' = v / \max(1, \|v\|_p)$ . Because $\|v\|_p \geq \|v\|_q$ when $p \leq q$ , a smaller $p$ imposes a stronger constraint on the columns of $A_k^{(i)}$ . In other words, we can stabilize Eq. (5) by selecting a sufficiently small $p$ .
|
| 153 |
+
|
| 154 |
+
Take $p = 1$ as an example. Every block $A_{k}^{(i)}$ is a matrix that none of its column norm is greater than 1 in $\| \cdot \| _1$ . This implies $A_{k + 1}^{(i)}A_{k}^{(i)}$ is the same kind of matrix. Specifically, let $v^{(1)},\ldots ,v^{(b)}$ be the columns of $A_{k + 1}^{(i)}A_{k}^{(i)}$ . We have
|
| 155 |
+
|
| 156 |
+
$$
|
| 157 |
+
\begin{array}{l} \left[ \left\| v ^ {(1)} \right\| _ {1} \dots \left\| v ^ {(b)} \right\| _ {1} \right] = \mathbb {1} ^ {\top} \left| A _ {k + 1} ^ {(i)} A _ {k} ^ {(i)} \right| \tag {7} \\ \leq \mathbb {1} ^ {\top} \left| A _ {k + 1} ^ {(i)} \right| \left| A _ {k} ^ {(i)} \right| \leq \mathbb {1} ^ {\top} \left| A _ {k} ^ {(i)} \right| \leq \mathbb {1} ^ {\top}. \\ \end{array}
|
| 158 |
+
$$
|
| 159 |
+
|
| 160 |
+
Note that $\mathbb{1}$ is a column vector of all ones. $|\cdot |$ and $\leq$ are element-wise absolute value and inequality operations. The last two inequalities holds since the column norm of $A_{k + 1}^{(i)}$ and $A_{k}^{(i)},$ s are no greater than 1 in $\| \cdot \| _1$
|
| 161 |
+
|
| 162 |
+
Eq. (7) demonstrates that $p = 1$ can stabilize the proposed block-diagonal recurrence, Eq. (5). However, a small $p$ restricts a model's expressiveness. In § 4.4, we will show that $p = 1.2$ is small enough to yield good empirical performance.
|
| 163 |
+
|
| 164 |
+
# 3.4 Efficient Implementation via Parallel Scan
|
| 165 |
+
|
| 166 |
+
We implement LRNNs in the parallel scan (PScan) mode as shown in Fig. 1. The idea of PScan is to group similar operations together, run them in parallel, and deliver the same results as those in the sequential (Sequential) for loop mode. For example, to compute $x_{3} = A_{3}A_{2}A_{1}u_{0} + A_{3}A_{2}u_{1} + A_{3}u_{2} + u_{3}$ , Sequential runs this in three steps. On the other hand, PScan decomposes the computation into two steps:
|
| 167 |
+
|
| 168 |
+
- Step 1: Compute $A_{1}u_{0} + u_{1}$ and $A_{3}u_{2} + u_{3}$ . Because these two operations are similar, we can compute them in parallel.
|
| 169 |
+
- Step 2: $x_{3} = A_{3}A_{2}(A_{1}u_{0} + u_{1}) + (A_{3}u_{2} + u_{3}).$
|
| 170 |
+
|
| 171 |
+
Generally speaking, a length- $L$ generation takes $\lceil \log_2 L \rceil$ steps using PScan. However, each step requires careful handling of the intermediate matrices. As illustrated in Fig. 1, for a length-8 generation, the first step requires $[A_1, A_3, A_5, A_7]$ , the second step requires $[A_2, A_3A_2, A_6, A_7A_6]$ , and the third step requires $[A_4, A_5A_4, A_6A_5A_4, A_7A_6A_5A_4]$ . To this end, we present an algorithm to generate the intermediate matrices in Appendix A.2.1. We integrate these intermediate matrices in PScan and show that PScan is equivalent to Sequential in Appendix A.2.2.
|
| 172 |
+
|
| 173 |
+
The computational complexity of our model is $O(b^{3}h\log (T))$ , where $b$ , $h$ , and $T$ represent the block size, number of blocks, and sequence length, respectively. With the embedding dimension held fixed as $bh$ , the complexity scales quadratically w.r.t the block size.
|
| 174 |
+
|
| 175 |
+
# 4 Experiments
|
| 176 |
+
|
| 177 |
+
# 4.1 Regular Language Tasks
|
| 178 |
+
|
| 179 |
+
We evaluate the models using the regular language transduction tasks introduced in Deletang et al.
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
Figure 1: Illustration of Parallel Scan for a length-8 generation.
|
| 183 |
+
|
| 184 |
+
(2023). We prioritize language transduction over language recognition as the former can be more useful in practice Deletang et al. (2023). We are particularly interested in Sum(5), EvenPair(5), and ModArith(5).
|
| 185 |
+
|
| 186 |
+
$\mathbf{Sum}(\mathbf{M})$ The input is a string $\{s_i\}_{i = 0}^{n - 1}$ of numbers in $[0,\dots ,M - 1]$ . The output is their sum modulo $\mathbf{M}$ : $\sum_{i = 0}^{n - 1}s_i\bmod M$ . For example, when $M = 5$ , the input 0324 corresponds to the output 4 because $0 + 3 + 2 + 4\bmod 5 = 4$ . Notably, $\mathbf{Sum}(2)$ is the famous PARITY problem that evaluates whether there is an odd number of 1s in a bit string. Thus, $\mathbf{Sum}(\mathbf{M})$ is a generalization of PARITY and shares the same characteristic: If one error occurs during the summation, the output will be wrong.
|
| 187 |
+
|
| 188 |
+
EvenPair(M) The input is a string $\{s_i\}_{i = 0}^{n - 1}$ of numbers in $[0,\dots ,M - 1]$ . The output is 1 if $s_{n - 1} = s_0$ and 0 otherwise. For example, when $M = 5$ , the input 0320 corresponds to the output 1 because the first entry equals the last entry. Since EvenPair(M) only cares about the first and last entries, a model should learn to remember the first entry and forget the remaining ones $i\in [1,\dots ,n - 2]$ .
|
| 189 |
+
|
| 190 |
+
ModArith(M) The input is a string $\{s_i\}_{i = 0}^{n - 1}$ of odd length (i.e., $n$ is odd). The even entries $(i\in [0,2,\ldots ])$ are numbers in $[0,\dots ,M - 1]$ ; The odd entries $(i\in [1,3,\dots ])$ are symbols in
|
| 191 |
+
|
| 192 |
+
$\{+, -, \times\}$ . The output is the answer of a mathematical expression under modulo M. For example, when $M = 5$ , the input $1 + 2 - 3 \times 4$ corresponds to the output 1 because $1 + 2 - 3 \times 4 \mod 5 = -9 \mod 5 = 1$ . ModArith(M) is much more complicated than Sum(M) and EvenPair(M) because a model should learn to prioritize multiplication over addition and subtraction.
|
| 193 |
+
|
| 194 |
+
# 4.2 Length Extrapolation
|
| 195 |
+
|
| 196 |
+
In our pilot experiments, we discovered that all models can achieve near-perfect same-length testing accuracy; i.e., testing with $L_{\mathrm{ex}} = L_{\mathrm{tr}}$ . This is not impossible since a large enough model can memorize all training sequences in its parameters. To evaluate whether a model truly learns the underlying rules of a language, we first train a model on sequences of length $L_{\mathrm{tr}}$ generated by an FSA; It is then evaluated on sequences of length $L_{\mathrm{ex}} > L_{\mathrm{tr}}$ generated by the same FSA.
|
| 197 |
+
|
| 198 |
+
Table 1 summarizes the extrapolation setting. We mostly follow the requirements in Deletang et al. (2023), where the training and extrapolation lengths are 40 and 500. The lengths for ModArith(5) are 39 and 499 because this task requires odd-length inputs.
|
| 199 |
+
|
| 200 |
+
# 4.3 Baseline Models
|
| 201 |
+
|
| 202 |
+
We select baseline LRNNs such as S4 (Gu et al., 2022), S4D (Gupta et al., 2022), and Liquid-S4
|
| 203 |
+
|
| 204 |
+
<table><tr><td></td><td>Sum(5)</td><td>EvenPair(5)</td><td>ModArith(5)</td></tr><tr><td>Ltr</td><td>40</td><td>40</td><td>39</td></tr><tr><td>Lex</td><td>500</td><td>500</td><td>499</td></tr></table>
|
| 205 |
+
|
| 206 |
+
(Hasani et al., 2023) using the released codebase $^{2}$ under Apache-2.0 license. These models are chosen since they are the most stable and theoretically grounded LRNN design thanks to the careful parameterization of their state transition matrices. We also experiment with RWKV (Peng et al., 2023) and a vanilla LRNN without S4's parameterization. Unfortunately, their performance lags behind S4 on the reported tasks.
|
| 207 |
+
|
| 208 |
+
# 4.4 Experimental Results
|
| 209 |
+
|
| 210 |
+
For the proposed method, we set $p = 1.2$ in Eq. (6) and train the block-diagonal input-dependent LRNN with $(\mathrm{b},\mathrm{h}) = (8,8)$ . Because ModArith is more complicated than Sum and EvenPair, ModArith uses 3 layers while the others take 1 layer. Each layer is a full pass of LRNN as described in Eq. (1).
|
| 211 |
+
|
| 212 |
+
Table 2 compares the length extrapolation capability of our model with other LRNN baselines on regular language tasks. As we can see, the proposed model is the only LRNN that can extrapolate well on regular language. The inferior performance of S4 and S4D is expected since they cannot represent subtraction as illustrated in Prop. 1. As for Liquid-S4, despite the usage of input-dependent block matrices (discussed in § 3.2), it still cannot extrapolate well on regular language. We believe this can be explained by its low expressiveness (Eq. (4)) compared to the proposed model (Eq. (5) and (6)). Overall, we can see that the combination of input dependency and sufficient expressiveness plays an important role in terms of regular language modeling.
|
| 213 |
+
|
| 214 |
+
# 4.5 Speed Comparison
|
| 215 |
+
|
| 216 |
+
We conduct our experiments using a Quadro RTX 8000 GPU. To provide context for the aforementioned complexity analysis in § 3.4, we take the Sum(5) task and set $T = 40$ during the training stage. Sequential requires 0.033s per instance, while PScan completes the task in 0.021s.
|
| 217 |
+
|
| 218 |
+
Table 1: Training and Extrapolation Settings. $L_{tr}$ and $L_{ex}$ represent the training and extrapolation sequence lengths, respectively.
|
| 219 |
+
|
| 220 |
+
<table><tr><td></td><td>Ours</td><td>S4</td><td>S4D</td><td>Liquid-S4</td></tr><tr><td>Sum(5)</td><td>1.00</td><td>0.27</td><td>0.27</td><td>0.27</td></tr><tr><td>EvenPair(5)</td><td>0.99</td><td>0.81</td><td>0.82</td><td>0.72</td></tr><tr><td>ModArith(5)</td><td>1.00</td><td>0.27</td><td>0.27</td><td>0.27</td></tr></table>
|
| 221 |
+
|
| 222 |
+
Table 2: Length Extrapolation Performance on Regular Language Tasks. Each reported number is an average of five random trials. Each random trial returns the best testing accuracy over 40,000 gradient updates.
|
| 223 |
+
|
| 224 |
+
During the testing stage, we set $T = 500$ , where both Sequential and PScan take 0.03s per instance. One might anticipate PScan to outperform Sequential during testing. However, in practice, this is not the case, as the complexity incurred by $b^3$ counteracts the speedup offered by $\log(T)$ . To validate our hypothesis, we set $b = 1$ and reassess the speed. Subsequently, PScan achieves 0.0008s per instance, whereas Sequential takes 0.002s. Regarding why PScan demonstrates a notable speedup during the training stage, we hypothesize that it is due to the improved backpropagation path enabled by PScan.
|
| 225 |
+
|
| 226 |
+
# 5 Conclusion
|
| 227 |
+
|
| 228 |
+
In this work, we explored LRNNs in the realm of regular language modeling. We discovered that existing LRNNs cannot effectively represent subtraction. Consequently, we proposed a new LRNN equipped with a block-diagonal and input-dependent transition matrix. Our experiments confirmed the proposed model's capability to model various regular language tasks, including Sum, Even Pair, and Modular Arithmetic, under the challenging length extrapolation setting.
|
| 229 |
+
|
| 230 |
+
# Limitations
|
| 231 |
+
|
| 232 |
+
The limitations of this work stem from several factors: (a) our evaluation is confined to only three regular language tasks; (b) the scope of our work excludes natural language; and (c) the proposed model introduces new hyperparameters such as the block size and the p-norm.
|
| 233 |
+
|
| 234 |
+
For (a), it is possible to discuss the average performance over randomly generated regular language, as demonstrated in Valvoda et al. (2022). Regarding (b), while natural language falls beyond the scope of our study, we believe the proposed model is at least as effective as prior linear RNN models on natural language, owing to its enhanced expressiveness. Concerning (c), the block size typi
|
| 235 |
+
|
| 236 |
+
cally increases with the complexity of the problem. Nonetheless, it is feasible to maintain the same block size if more layers are employed (e.g., as described in § 4.4). Additionally, the p-norm parameter is chosen to be close to 1 to ensure stability; longer sequences correspond to smaller values of $p$ .
|
| 237 |
+
|
| 238 |
+
# Ethics Statement
|
| 239 |
+
|
| 240 |
+
Our work lays the groundwork for developing LRNNs in underexplored languages, such as regular language. Inappropriate usage of our technique might have negative societal impacts, including potential losses due to wrong predictions and ethical challenges regarding the improper use of the model. These implications apply to most language processing research and are not unique to this specific work.
|
| 241 |
+
|
| 242 |
+
# References
|
| 243 |
+
|
| 244 |
+
Kyunghyun Cho, Bart van Merrienboer, Caglar Gulcehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using RNN encoder-decoder for statistical machine translation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1724-1734, Doha, Qatar. Association for Computational Linguistics.
|
| 245 |
+
Noam Chomsky. 1956. Three models for the description of language. IRE Transactions on information theory, 2(3):113-124.
|
| 246 |
+
Gregoire Deletang, Anian Ruoss, Jordi Grau-Moya, Tim Genewein, Li Kevin Wenliang, Elliot Catt, Chris Cundy, Marcus Hutter, Shane Legg, Joel Veness, and Pedro A Ortega. 2023. Neural networks and the chomsky hierarchy. In The Eleventh International Conference on Learning Representations.
|
| 247 |
+
Jeffrey L Elman. 1990. Finding structure in time. Cognitive science, 14(2):179-211.
|
| 248 |
+
Daniel Y Fu, Tri Dao, Khaled Kamal Saab, Armin W Thomas, Atri Rudra, and Christopher Re. 2023. Hungry hungry hippos: Towards language modeling with state space models. In *The Eleventh International Conference on Learning Representations*.
|
| 249 |
+
Albert Gu, Karan Goel, and Christopher Re. 2022. Efficiently modeling long sequences with structured state spaces. In International Conference on Learning Representations.
|
| 250 |
+
Albert Gu, Isys Johnson, Aman Timalsina, Atri Rudra, and Christopher Re. 2023. How to train your
|
| 251 |
+
|
| 252 |
+
HIPPO: State space models with generalized orthogonal basis projections. In International Conference on Learning Representations.
|
| 253 |
+
Ankit Gupta, Albert Gu, and Jonathan Berant. 2022. Diagonal state spaces are as effective as structured state spaces. In Advances in Neural Information Processing Systems.
|
| 254 |
+
Ramin Hasani, Mathias Lechner, Tsun-Hsuan Wang, Makram Chahine, Alexander Amini, and Daniela Rus. 2023. Liquid structural state-space models. In The Eleventh International Conference on Learning Representations.
|
| 255 |
+
Sepp Hochreiter and Jürgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.
|
| 256 |
+
Michael I Jordan. 1997. Serial order: A parallel distributed processing approach. In Advances in psychology, volume 121, pages 471-495. Elsevier.
|
| 257 |
+
Bingbin Liu, Jordan T. Ash, Surbhi Goel, Akshay Krishnamurthy, and Cyril Zhang. 2023. Transformers learn shortcuts to automata. In International Conference on Learning Representations.
|
| 258 |
+
Eric Martin and Chris Cundy. 2018. Parallelizing linear recurrent neural nets over sequence length. In International Conference on Learning Representations.
|
| 259 |
+
Antonio Orvieto, Samuel L Smith, Albert Gu, Anushan Fernando, Caglar Gulcehre, Razvan Pascanu, and Soham De. 2023. Resurrecting recurrent neural networks for long sequences. In Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 26670-26698. PMLR.
|
| 260 |
+
Bo Peng, Eric Alcaide, Quentin Anthony, Alon Albalak, Samuel Arcadinho, Huanqi Cao, Xin Cheng, Michael Chung, Matteo Grella, Kranthi Kiran GV, et al. 2023. Rwkv: Reinventing rnns for the transformer era. arXiv preprint arXiv:2305.13048.
|
| 261 |
+
Michael Poli, Stefano Massaroli, Eric Nguyen, Daniel Y Fu, Tri Dao, Stephen Baccus, Yoshua Bengio, Stefano Ermon, and Christopher Ré. 2023. Hyena hierarchy: Towards larger convolutional language models. In International Conference on Machine Learning, pages 28043-28078. PMLR.
|
| 262 |
+
Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.
|
| 263 |
+
Jimmy T.H. Smith, Andrew Warrington, and Scott Linderman. 2023. Simplified state space layers for sequence modeling. In *The Eleventh International Conference on Learning Representations*.
|
| 264 |
+
|
| 265 |
+
Josef Valvoda, Naomi Saphra, Jonathan Rawski, Adina Williams, and Ryan Cotterell. 2022. Benchmarking compositionality with formal languages. In Proceedings of the 29th International Conference on Computational Linguistics, pages 6007-6018, Gyeongju, Republic of Korea. International Committee on Computational Linguistics.
|
| 266 |
+
|
| 267 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems, 30.
|
| 268 |
+
|
| 269 |
+
# A Additional Proofs
|
| 270 |
+
|
| 271 |
+
# A.1 Proof of Proposition 2
|
| 272 |
+
|
| 273 |
+
Denote $(A_0, u_0)$ , $(A_{-}, u_{-})$ , and $(A_1, u_1)$ as the pairs of (transition matrix, input vector) w.r.t. input characters 0, -, and 1. Note that $A_0$ , $A_{-}$ , and $A_1$ are diagonal matrices by assumption.
|
| 274 |
+
|
| 275 |
+
Denote $z$ as the initial state vector. The sequences $0 - 1$ and $1 - 0$ are represented as
|
| 276 |
+
|
| 277 |
+
$$
|
| 278 |
+
\begin{array}{l} x _ {0 - 1} = A _ {1} A _ {-} A _ {0} z + A _ {1} A _ {-} u _ {0} + A _ {1} u _ {-} + u _ {1} \\ x _ {1 - 0} = A _ {0} A _ {-} A _ {1} z + A _ {0} A _ {-} u _ {1} + A _ {0} u _ {-} + u _ {0}. \\ \end{array}
|
| 279 |
+
$$
|
| 280 |
+
|
| 281 |
+
Note that $x_{0-1}$ is $0-1$ and $x_{1-0}$ is $1-0$ . Because the $A$ matrices are diagonal, we know $A_1A_-A_0 = A_0A_-A_1$ . Because $0 - 1 \neq 1 - 0$ , by enforcing $x_{0-1} \neq x_{1-0}$ , we have
|
| 282 |
+
|
| 283 |
+
$$
|
| 284 |
+
A _ {1} A _ {-} u _ {0} + A _ {1} u _ {-} + u _ {1} \neq A _ {0} A _ {-} u _ {1} + A _ {0} u _ {-} + u _ {0}. \tag {8}
|
| 285 |
+
$$
|
| 286 |
+
|
| 287 |
+
On the other hand, let $x_{0-} = A_{-}A_{0}z + A_{-}u_{0} + u_{-}$ be the vector representation for "0-". Consider two other sequences 0-0-1 and 0-1-0, their vector representations are
|
| 288 |
+
|
| 289 |
+
$$
|
| 290 |
+
\begin{array}{l} x _ {0 - 0 - 1} = A _ {1} A _ {-} A _ {0} x _ {0 -} + A _ {1} A _ {-} u _ {0} + A _ {1} u _ {-} + u _ {1} \\ x _ {0 - 1 - 0} = A _ {0} A _ {-} A _ {1} x _ {0 -} + A _ {0} A _ {-} u _ {1} + A _ {0} u _ {-} + u _ {0}. \\ \end{array}
|
| 291 |
+
$$
|
| 292 |
+
|
| 293 |
+
Note $x_{0 - 0 - 1}$ is $0 - 0 - 1$ and $x_{0 - 1 - 0}$ is $0 - 1 - 0$ . Similarly, because the $A$ matrices are diagonal and $0 - 0 - 1 = 0 - 1 - 0$ , by enforcing $x_{0 - 0 - 1} = x_{0 - 1 - 0}$ , we have
|
| 294 |
+
|
| 295 |
+
$$
|
| 296 |
+
A _ {1} A _ {-} u _ {0} + A _ {1} u _ {-} + u _ {1} = A _ {0} A _ {-} u _ {1} + A _ {0} u _ {-} + u _ {0}. \tag {9}
|
| 297 |
+
$$
|
| 298 |
+
|
| 299 |
+
Because Eq. (8) contradicts Eq. (9), the two relations $x_{0-1} \neq x_{1-0}$ and $x_{0-0-1} = x_{0-1-0}$ cannot co-exist. We hence conclude that an input-dependent diagonal linear RNN is inconsistent in representing subtraction.
|
| 300 |
+
|
| 301 |
+
# A.2 Code for PScan
|
| 302 |
+
|
| 303 |
+
A.2.1 Illustration of Matrix Generation
|
| 304 |
+
```python
|
| 305 |
+
import numpy as np
|
| 306 |
+
seq_len $= 2\ast \ast 3 - 1$
|
| 307 |
+
arr $=$ np.array([A' + str(i) for i in range(1, seq_len +1)].reshape(-1,1)
|
| 308 |
+
def spt(x): assert len(x)\%2 $= = 1$ , 'works when len $(x) = = 2\ast k - 1$ for $k > = 1$ coef $=$ x[:2] remain $=$ x[1::2] coef Remain $=$ np.core.defchararray.add(coef[1:], remain[:,-1:]) remain $=$ np concatenate([remain, coef Remain], axis=1) return coef, remain
|
| 309 |
+
for i in range( int(np ceil(np.log2(seq_len))) ) : coef, arr $=$ spt(arr) print(coef)
|
| 310 |
+
```
|
| 311 |
+
|
| 312 |
+
The below output shows the function $\text{spt}()$ can generate the intermediate matrices during PScan.
|
| 313 |
+
|
| 314 |
+
A.2.2 Testing the Equivalence of Sequential and PScan
|
| 315 |
+
```latex
|
| 316 |
+
[ \begin{bmatrix} \text{'A1'} \\ \text{'A3'} \\ \text{'A5'} \\ \text{'A7'} \end{bmatrix} ]
|
| 317 |
+
[ \begin{bmatrix} \text{'A2'} & \text{'A3A2'} \\ \text{'A6'} & \text{'A7A6'} \end{bmatrix} ]
|
| 318 |
+
[ \begin{bmatrix} \text{'A4'} & \text{'A5A4'} & \text{'A6A5A4'} & \text{'A7A6A5A4'} \end{bmatrix} ]
|
| 319 |
+
```
|
| 320 |
+
|
| 321 |
+
```python
|
| 322 |
+
import numpy as np
|
| 323 |
+
import torch
|
| 324 |
+
import torch.nn as nn
|
| 325 |
+
torch_manual_seed(1)
|
| 326 |
+
emb_dim $= 2$
|
| 327 |
+
seq_len $= 7$
|
| 328 |
+
bs $= 1$
|
| 329 |
+
A $\equiv$ torch.random(bs, seq_len, emb_dim, emb_dim)
|
| 330 |
+
u $\equiv$ torch.random(bs, seq_len, emb_dim)
|
| 331 |
+
x0 $\equiv$ torch.random(1, emb_dim)
|
| 332 |
+
# sequential
|
| 333 |
+
$\mathbf{x} = \mathbf{x}0.$ expand(bs, emb_dim)
|
| 334 |
+
all_x $= [x[(:,\text{None},:)]$
|
| 335 |
+
for i in range(seq_len): x $\equiv$ torch.einsum('bjj,bj->bi', A(:,i], x) + u(:,i] all_x.append(x[(:,None,:])
|
| 336 |
+
all_x $\equiv$ torch.cat(all_x, dim=1)
|
| 337 |
+
print('sequential mode')
|
| 338 |
+
print(all_x)
|
| 339 |
+
# parallel scan
|
| 340 |
+
def scan(x, As): c $\equiv$ As.shape[2]*2 x $\equiv$ x.view(bs, L//c, c, -1) x1, x2 $\equiv$ x[(:, :,c//2], x[(:, ,c//2:] # x2.shape $=$ (bs, group nums, group size, emb_dim) #As.shape $=$ (bs, group nums*2-1, group size, emb_dim, emb_dim) assert As.shape[1]\%2==1,'works when As.shape[1]=2**k -1 for k>=1' coef $=$ As[(:,::2] remain $=$ As[(:,1::2] prodd $\equiv$ torch.einsum('bncj,jbnjk->bncik', coef[(:,1:], remain[(:,.,-1]) remain $\equiv$ torch.cat([remain, prodd], dim=2) # coef.shape $=$ (bs, group nums, group size, emb_dim, emb_dim) # apply a group of matrix (e.g., ['A2' 'A3A2']) to the last element of x2 in each group, # and add together
|
| 341 |
+
```
|
| 342 |
+
|
| 343 |
+
```txt
|
| 344 |
+
$\begin{array}{rl} & {\mathrm{x2 = x2 + torch.einsum('bncij,bnj->bnci', coef, x1[(:, :, - 1])}}\\ & {\mathrm{x = torch.cat([x1, x2], dim = 2)}}\\ & {\mathrm{return x, remain}} \end{array}$
|
| 345 |
+
log2_L = int(np ceil(np.log2(seq_len+1)))
|
| 346 |
+
L = 2**log2_L # the length after zero padding
|
| 347 |
+
n_zero $=$ L - seq_len - 1
|
| 348 |
+
eu $=$ torch.cat([x0.exp(bs,-1)[(:, None,:], u], dim=1)
|
| 349 |
+
eu $=$ nnfunctional_pad(eu, (0,0,0, n_zero))
|
| 350 |
+
$\mathbf{x} = \mathbf{e}\mathbf{u}$
|
| 351 |
+
As $=$ nnfunctional_pad(A, (0,0,0,0,0, n_zero))[(:, None,:,:]
|
| 352 |
+
for i in range(log2_L):
|
| 353 |
+
$\mathbf{x},\mathbf{A}\mathbf{s} = \mathbf{scan}(\mathbf{x},\mathbf{A}\mathbf{s})$ $\mathbf{x} = \mathbf{x}$ .view(bs,L,emb_dim)[(:,seq_len+1,:]
|
| 354 |
+
print('parallel mode')
|
| 355 |
+
print(x)
|
| 356 |
+
```
|
| 357 |
+
|
| 358 |
+
The below shows that Sequential and PScan are equivalent as they generate the same outputs.
|
| 359 |
+
|
| 360 |
+
```lua
|
| 361 |
+
sequential mode
|
| 362 |
+
tensor([[0.8310, -0.2477], [0.5167, -1.4218], [1.1399, 1.3024], [0.9628, 1.3150], [-1.5308, -1.6903], [-3.6631, 1.6082], [1.7805, 7.1659], [2.5068, -0.6256]]])
|
| 363 |
+
parallel mode
|
| 364 |
+
tensor([[0.8310, -0.2477], [0.5167, -1.4218], [1.1399, 1.3024], [0.9628, 1.3150], [-1.5308, -1.6903], [-3.6631, 1.6082], ] [1.7805, 7.1659], [2.5068, -0.6256]])
|
| 365 |
+
```
|
advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9655919949b9b37e7548de2b7b25c5cdca2dca8fb69a114d5e8c970eb16916e2
|
| 3 |
+
size 190973
|
advancingregularlanguagereasoninginlinearrecurrentneuralnetworks/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:66cba9c2bbbd8984e6f442e87984ef95c8501bc54fdaa2e348e0168ca1d4482b
|
| 3 |
+
size 412156
|
advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/bcde49e0-9021-4943-a047-8162add29b41_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6d41411b326b6ed1284b8a30aac4245d01609daac90f590f790a5a9a3280a83a
|
| 3 |
+
size 73160
|
advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/bcde49e0-9021-4943-a047-8162add29b41_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cd8c6bb66a064d5dbe741ddce09172d275541c64aeb64106cd1d2c6f421be181
|
| 3 |
+
size 87802
|
advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/bcde49e0-9021-4943-a047-8162add29b41_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:90b59fc5ed42a68e66b1c4f7be230b78e864fe788cbbd816b0b3573534d82995
|
| 3 |
+
size 412343
|
advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/full.md
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Advancing the Robustness of Large Language Models through Self-Denoised Smoothing
|
| 2 |
+
|
| 3 |
+
Jiabao Ji $^{1*}$ Bairu Hou $^{1*}$ Zhen Zhang $^{1*}$ Guanhua Zhang $^{2*}$
|
| 4 |
+
Wenqi Fan $^{3}$ Qing Li $^{3}$ Yang Zhang $^{4}$ Gaowen Liu $^{5}$ Sijia Liu $^{6}$ Shiyu Chang $^{1}$ $^{1}$ UC Santa Barbara $^{2}$ Max Planck Institute for Intelligent Systems, Tübingen
|
| 5 |
+
$^{3}$ The Hong Kong Polytechnic University $^{4}$ MIT-IBM Watson AI Lab
|
| 6 |
+
$^{5}$ Cisco Research $^{6}$ Michigan State University
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
Although large language models (LLMs) have achieved significant success, their vulnerability to adversarial perturbations, including recent jailbreak attacks, has raised considerable concerns. However, the increasing size of these models and their limited access make improving their robustness a challenging task. Among various defense strategies, randomized smoothing has shown great potential for LLMs, as it does not require full access to the model's parameters or fine-tuning via adversarial training. However, randomized smoothing involves adding noise to the input before model prediction, and the final model's robustness largely depends on the model's performance on these noise corrupted data. Its effectiveness is often limited by the model's sub-optimal performance on noisy data. To address this issue, we propose to leverage the multitasking nature of LLMs to first denoise the noisy inputs and then to make predictions based on these denoised versions. We call this procedure self-denoised smoothing. Unlike previous denoised smoothing techniques in computer vision, which require training a separate model to enhance the robustness of LLMs, our method offers significantly better efficiency and flexibility. Our experimental results indicate that our method surpasses existing methods in both empirical and certified robustness in defending against adversarial attacks for both downstream tasks and human alignments (i.e., jailbreak attacks). Our code is publicly available at https://github.com/UCSB-NLP-Chang/SelfDenoise.
|
| 11 |
+
|
| 12 |
+
# 1 Introduction
|
| 13 |
+
|
| 14 |
+
Large language models (LLMs) have demonstrated outstanding performance across various applications (Touvron et al., 2023; Taylor et al., 2022; Li et al., 2023; Yang et al., 2022). However, LLMs
|
| 15 |
+
|
| 16 |
+
are vulnerable to input-level adversarial perturbations (Jin et al., 2020; Guo et al., 2021; Hou et al., 2022; Wen et al., 2023). Existing attacks are effective in generating input perturbations that can cause LLMs to make wrong predictions on downstream tasks or generate harmful content misaligned with human values (e.g., providing detailed instructions for creating a bomb) (Zou et al., 2023; Chao et al., 2023; Li et al., 2023). This vulnerability has raised concerns about the trustworthiness of LLMs.
|
| 17 |
+
|
| 18 |
+
Enhancing the robustness of LLMs is very challenging. Many robustness-enhancement methods (Madry et al., 2018; Zhang et al., 2019; Zhu et al., 2019) involve heavy training, which can be difficult due to the enormous size of LLMs and limited access to their parameters (e.g., GPT models). In comparison, randomized smoothing methods (Cohen et al., 2019; Lee et al., 2019; Salman et al., 2020) offer a way to enhance the robustness with limited model access and provide stability guarantees for predictions. In the NLP field, there have been some explorations using randomized smoothing to defend against attacks on downstream tasks (Zeng et al., 2021; Ye et al., 2020; Zhao et al., 2022) as well as against jailbreak attacks (Robey et al., 2023). However, the performance of applying randomized smoothing directly to LLMs remains unsatisfactory. This is primarily because randomized smoothing involves adding noise to the input, and the final model's robustness largely depends on model performance on the noise corrupted data. Unfortunately, LLMs typically exhibit sub-optimal performance on noisy inputs.
|
| 19 |
+
|
| 20 |
+
To address this issue, we propose self-denoised smoothing, or SELFDENOISE for short, to improve the robustness of LLMs based on randomized smoothing. Our approach initially introduces multiple perturbed inputs by randomly masking words in the original input. Unlike vanilla randomized smoothing, which directly feeds these perturbed inputs to the model, we take a further step by using
|
| 21 |
+
|
| 22 |
+
the LLM itself to denoise these perturbed inputs. Specifically, the inputs with random masks are first fed to the LLM, which is then asked to complete the sentences by filling in the masked parts. The resulting sentences are subsequently passed to the LLM again for task performance. This mechanism is inspired by denoised smoothing in computer vision (Salman et al., 2020). However, these existing techniques typically require a separate denoising module, often trained with distinct loss functions (Salman et al., 2020) or through black-box approaches (Zhang et al., 2022b). On the other hand, our method leverages the multitasking capabilities of LLMs, offering a more flexible and effective way to enhance robustness in the context of LLMs without the extensive costs of denoiser training. With this simple add-on step, we significantly improve the robustness of LLMs. We support this claim with extensive experiments on two different attack settings: defending against adversarial attacks for both downstream tasks and human alignments and measuring both empirical and certified robustness.
|
| 23 |
+
|
| 24 |
+
# 2 Related Work
|
| 25 |
+
|
| 26 |
+
Adversarial robustness in NLP models Various strategies have been developed to evaluate and enhance the robustness of NLP models. To evaluate robustness, the adversarial perturbations are created by character editing (Gao et al., 2018; Li et al., 2018), word replacement (Jin et al., 2020; Li et al., 2021; Guo et al., 2021), or sentence transformation (Wang et al., 2019; Lin et al., 2021). Besides the robustness on downstream tasks, the recent "jailbreak attack" also attracted much attention. Besides the robustness of downstream tasks, the recent "jailbreak attack" generates perturbations for safety-aligned LLMs to fool them into outputting harmful responses to harmful questions, e.g. "How to make a bomb". To improve robustness, robust training (Madry et al., 2018; Zhang et al., 2019, 2022a, 2023), which is built upon min-max optimization, has been shown as an effective solution. Though effective in robustness improvement, robust training brings significantly higher costs compared to standard training. Our method aims to improve the robustness of LLMs and is designed to overcome these challenges. Without accessing the model parameters, the proposed method can improve the model's robustness on both downstream tasks and jailbreak attacks.
|
| 27 |
+
|
| 28 |
+
Randomized smoothing Randomized smoothing (Cohen et al., 2019; Salman et al., 2020; Zeng et al., 2021) is a defense strategy that converts a given classifier $f(\cdot)$ into a smoothed classifier $g(\cdot)$ . Given the input $x$ , the smoothed classifier $g(\cdot)$ outputs the class that is most likely to be returned by $f(\cdot)$ given some random noise over the input. The prediction of the smoothed classifier $g(\cdot)$ to a class $c$ is given by $\mathbb{P}(g(\boldsymbol{x}) = c) = \mathbb{P}(f(\boldsymbol{x} \oplus \boldsymbol{\delta}) = c)$ where $\boldsymbol{x} \oplus \boldsymbol{\delta}$ indicates the noisy version of the input $x$ and $\boldsymbol{\delta}$ is the noise from a particular distribution. Despite increased inference costs, randomized smoothing can both enhance empirical robustness and offer certifiable guarantees for robustness against perturbations. Besides computer vision, randomized smoothing is also used in the NLP domain (Ye et al., 2020; Zeng et al., 2021; Wang et al., 2021b; Zhao et al., 2022). Smooth-LLM (Robey et al., 2023) leverages randomized smoothing to defend jailbreak attacks. Our work aims to improve the effectiveness of randomized smoothing with a self-denoising framework, where LLM itself is used as the denoiser to denoise the noisy versions of the input before prediction.
|
| 29 |
+
|
| 30 |
+
# 3 Methodology
|
| 31 |
+
|
| 32 |
+
We denote $\pmb{x} = [x_1, x_2, \dots, x_L]$ as the input to the LLM $f(\cdot)$ , where $x_i$ is the $i$ -th token, and $y \in \mathcal{Y}$ is the ground truth output.
|
| 33 |
+
|
| 34 |
+
Randomized smoothing We follow previous work on randomized smoothing (Cohen et al., 2019; Zeng et al., 2021) to enhance the robustness of a LLM $f(\cdot)$ by transforming it into a smoothed version $g(\cdot)$ . Specifically, we introduce random noise into the input by (uniform) randomly replacing some tokens with the [MASK] token. The randomized masking scheme, $\phi(x, m)$ , generates a binary sequence of the same length as the input $x$ , with $m\%$ entries being nonzero, indicating the corresponding positions in $x$ that will be masked. We denote the binary mask sequence as $s$ and the masked input as $\mathcal{M}(x, s)$ . Then, the original LLM $f(\cdot)$ is turned into a smoothed model $g(\cdot)$ as
|
| 35 |
+
|
| 36 |
+
$$
|
| 37 |
+
g (\boldsymbol {x}) = \underset {c \in \mathcal {Y}} {\arg \max } \mathbb {P} _ {\boldsymbol {s} \sim \phi (x, m)} \left(f \left(\mathcal {M} (\boldsymbol {x}, \boldsymbol {s})\right) = c\right) \tag {1}
|
| 38 |
+
$$
|
| 39 |
+
|
| 40 |
+
SELFDENOISE In the randomized smoothing framework described above, the performance of the smoothed model $g(\cdot)$ largely depends on the LLM's performance on the masked and corrupted input $f(\mathcal{M}(\boldsymbol{x},\boldsymbol{s}))$ . To improve performance, we
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
Figure 1: The prediction process of SELFDENOISE.
|
| 44 |
+
|
| 45 |
+
follow the denoise-smoothing pipeline used in computer vision (Salman et al., 2020) by adding an additional denoising step with a denoiser $D(\cdot)$ . This step in our context involves filling the masked tokens in the masked input before feeding it to the base LLM, i.e.,
|
| 46 |
+
|
| 47 |
+
$$
|
| 48 |
+
g ^ {\prime} (\boldsymbol {x}) = \underset {c \in \mathcal {Y}} {\arg \max } \mathbb {P} _ {\boldsymbol {s} \sim \phi (x, m)} (f (D (\mathcal {M} (\boldsymbol {x}, \boldsymbol {s}))) = c). \tag {2}
|
| 49 |
+
$$
|
| 50 |
+
|
| 51 |
+
The denoiser is designed to augment the base model, making it more robust against random masks on the inputs. Specifically, the denoiser has two options: ① instructing the LLM itself to guess the masked tokens, and ② directly removing the masks. The choice between these two denoising operations depends on the mask rate. When the mask rate is high, the denoiser will opt to remove the masks. This is because, in such cases, guessing the masked words may result in a sentence with a different meaning or a large semantic gap compared to the original unmasked version. Simply removing these masked words provides much better empirical performance. Conversely, when the mask rate is low, denoising the sentence by filling in the masked words yields superior performance. In our design, we set the tipping noise rate for switching between these two denoise modes at $60\%$ . To fill in the masked words, we simply prompt the LLM and require the completed sentence to be fluent and preserve the semantics, without any training on the denoising task. The overall pipeline of our proposed SELFDENoise is shown in Figure 1.
|
| 52 |
+
|
| 53 |
+
Certified robustness SELFDENOISE, being in the family of the randomized smoothing framework, can also provide certified robustness against word-replacement attacks. Specifically, the LLM $f(\cdot)$ is certified as robust if it satisfies the following condition, for any $\pmb{x}^{\prime}\colon f(\pmb{x}^{\prime}) = y$ $||\pmb{x}^{\prime} - \pmb {x}||_{0}\leq dL$ Here $||\cdot ||_0$ represents the Hamming distance, $L$ is the number of tokens in the input, and $d$ is known as the certification radius, which signifies the maximum allowable percentage of word perturbations without altering the model's prediction. We refer readers to Zeng et al. (2021) for a detailed certification process and the related proof.
|
| 54 |
+
|
| 55 |
+
# 4 Experiment
|
| 56 |
+
|
| 57 |
+
# 4.1 Experiment Setup
|
| 58 |
+
|
| 59 |
+
Evaluation settings We consider two attack settings: we evaluate whether the proposed method defends against 1 adaptive adversarial attacks for downstream task performance, and 2 jailbreak attacks for human alignment.
|
| 60 |
+
|
| 61 |
+
Dataset and models We use the SST-2 (Socher et al., 2013) and Agnews (Zhang et al., 2015) as the downstream tasks to evaluate the robustness enhancement (setting ① above). We consider Alpaca (Taori et al., 2023) as the base LLM to be robustified. For setting ②, we use AdvBench (Zou et al., 2023) to evaluate the robustness against jailbreak attacks. We consider the aligned LLM, Vicuna-1.5-13B (Zheng et al., 2023), for evaluation. More details about the prompts and implementation details can be found in Appendix A.1 and A.2.
|
| 62 |
+
|
| 63 |
+
Evaluation metrics We follow the conventions in the literature (Wang et al., 2021a; Lee et al., 2022). We measure the downstream task robustness by both empirical robustness against adversarial attacks (in adaptive settings) and the certified accuracy on benign examples. In particular, we leverage DeepWordBug (Gao et al., 2018) and TextBugger (Li et al., 2018) to attack the smoothed classifier and measure the empirical robust accuracy. We use the default hyper-parameters in TextAttack (Morris et al., 2020) library for both attack methods. For the certified robustness, we evaluate the certified accuracy follow previous work (Cohen et al., 2019; Carlini et al., 2022) for different perturbation scales $d$ from $1\%$ to $10\%$ . We also report the clean accuracy on benign examples (i.e., without attacks). For setting $\pmb{\mathfrak{Q}}$ , we report the percentage of harmful prompts that the model successfully recognize and refuse to answer them against state-of-the-art jailbreak attacks including GCGAttack (Zou et al., 2023) and PAIR (Chao et al., 2023). We denote this metric as defense success rate (DSR). More details about this metric are in Appendix A.4.
|
| 64 |
+
|
| 65 |
+
Baselines One main baseline is the vanilla randomized smoothing, RANMASK (Zeng et al., 2021), implemented without denoising. We aim to show that with a simple self-denoise process, we can significantly improve robustness without introducing smoothing overhead. We also include another randomized smoothing method, SAFER (Ye et al., 2020), that adds noise by synonym replacement for comparison on downstream tasks. Further
|
| 66 |
+
|
| 67 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Method</td><td rowspan="2">Clean Acc. (%)</td><td colspan="2">Empirical Robust Acc. (%)</td></tr><tr><td>DeepWordBug</td><td>TextBugger</td></tr><tr><td rowspan="4">SST-2</td><td>ALPACA</td><td>89.0</td><td>52.0</td><td>45.0</td></tr><tr><td>SAFER</td><td>85.0</td><td>57.0</td><td>54.0</td></tr><tr><td>RANMASK</td><td>84.0</td><td>52.5</td><td>48.0</td></tr><tr><td>SELFDENOISE</td><td>90.0</td><td>64.5</td><td>55.5</td></tr><tr><td rowspan="4">Agnews</td><td>ALPACA</td><td>85.0</td><td>58.5</td><td>50.5</td></tr><tr><td>SAFER</td><td>83.0</td><td>55.5</td><td>53.0</td></tr><tr><td>RANMASK</td><td>82.0</td><td>58.0</td><td>53.0</td></tr><tr><td>SELFDENOISE</td><td>84.0</td><td>70.0</td><td>66.0</td></tr></table>
|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
Figure 2: Certified accuracy under different perturbation scale $d(\%)$ on SST-2 (left) and Agnews (right).
|
| 71 |
+
|
| 72 |
+

|
| 73 |
+
|
| 74 |
+
more, for setting $\Theta$ , we incorporate SMOOTHLLM (Robey et al., 2023) for comparison in defending against jailbreak attacks. SMOOTHLLM is also a randomized smoothing method that adds noise via character-level editing (we use the best reported character swapping operation in our experiments), which is mainly designed for defending against jailbreak attacks. More details are in Appendix A.3.
|
| 75 |
+
|
| 76 |
+
# 4.2 Experiment Results
|
| 77 |
+
|
| 78 |
+
Robustness on downstream tasks We first report the empirical robust accuracy in Table 1. Here are our key observations. First, our method achieves the best empirical robust accuracy on both datasets. SELFDENOISE improves the performance by $13.2\%$ in SST-2 and $19.7\%$ in Agnews compared with the second-best method under the DeepWordBug attack, with $2.8\%$ and $24.5\%$ improvements under TextBugger, respectively. Notably, this improvement stems from a simple add-on denoising operation requiring minimal effort. Second, the proposed method improves robustness without sacrificing clean accuracy. Our method achieves the same level of clean accuracy as the vanilla ALPACA model for SST-2. In Agnews, SELFDENOISE improves the robust accuracy by $19.7\%$ with only a $1.2\%$ drop in clean accuracy compared with ALPACA. SELFDENOISE achieves the best accuracy-robustness trade-off (Zhang et al., 2019).
|
| 79 |
+
|
| 80 |
+
Additionally, Figure 2 shows the certification results of the proposed SELFDENOISE and the baseline RANMASK. SAFER is not included here as it considers a different definition of certified robustness, and is discussed in Appendix A.3. We
|
| 81 |
+
|
| 82 |
+
Table 1: Clean accuracy and empirical robust accuracy under DeepWordBug attack and TextBugger attack.
|
| 83 |
+
|
| 84 |
+
<table><tr><td rowspan="2">Attack</td><td rowspan="2">Base Vicuna</td><td colspan="4">SMOOTHLLM</td><td colspan="3">RANMASK</td><td colspan="3">SELFDENOISE</td></tr><tr><td>5%</td><td>10%</td><td>15%</td><td>30%</td><td>5%</td><td>15%</td><td>30%</td><td>5%</td><td>15%</td><td>30%</td></tr><tr><td>GCG*</td><td>0</td><td>86</td><td>86</td><td>74</td><td>24</td><td>88</td><td>88</td><td>86</td><td>92</td><td>100</td><td>100</td></tr><tr><td>PAIR*</td><td>0</td><td>62</td><td>66</td><td>72</td><td>54</td><td>52</td><td>60</td><td>78</td><td>52</td><td>72</td><td>86</td></tr><tr><td>PAIR</td><td>28</td><td>42</td><td>54</td><td>56</td><td>50</td><td>40</td><td>56</td><td>66</td><td>64</td><td>58</td><td>68</td></tr></table>
|
| 85 |
+
|
| 86 |
+
Table 2: DSR given different defense methods and attacks. A higher DSR indicates better defense performance. * denotes the transfer attack setting.
|
| 87 |
+
|
| 88 |
+
demonstrate that our method can effectively improve certified accuracy beyond RANMASK in both datasets under all perturbation scales. For example, with $d = 5$ , our method outperforms RANMASK by $11.5\%$ in SST-2 and $26.3\%$ in Agnews.
|
| 89 |
+
|
| 90 |
+
Robustness against jailbreak attacks We report the robustness against jailbreak attacks in Table 2. We consider two attack settings: transfer and adaptive. We use a superscript * to indicate transfer attacks. Specifically, we first collect a set of adversarial examples that successfully jailbreak the vanilla Vicuna model. Then, we report how the same set of adversarial attacks performs on the model equipped with different smoothing defense mechanisms. For the adaptive setting, the attack directly targets the smoothed models. We report the DSR with different noise levels added during the smoothing process, ranging from $5\%$ to $30\%$ .
|
| 91 |
+
|
| 92 |
+
We highlight the following conclusions. First, all three methods effectively improve the trustworthiness of LLMs despite differences in the smoothing scheme. Even under the strong adaptive attack, PAIR, the defense can achieve a DSR of $50\%$ to $60\%$ . Second, our method achieves the best defense performance compared to baselines against all different attack approaches, demonstrating its great potential for the safety of LLMs. Third, as the amount of noise added increases, our method's DSR continuously increases, compared to the other two methods without the denoising. This is because, when the amount of noise added increases, the semantics of the original instruction get more distorted, causing the model to report that it does not understand the meaning of the input instruction, which does not count toward a successful recognition and refusal to answer in our evaluation case.
|
| 93 |
+
|
| 94 |
+
# 5 Conclusion
|
| 95 |
+
|
| 96 |
+
In this paper, we propose a simple self-denoised smoothing technique, termed SELFDENOISE, to enhance the robustness of LLMs. The proposed method can be used as a plug-in module for any LLM without requiring access to its parameters, and no training is needed. Our experimental re
|
| 97 |
+
|
| 98 |
+
sults indicate that our method surpasses existing baselines in both empirical and certified robustness, effectively defending against adversarial attacks in both downstream tasks and human alignments.
|
| 99 |
+
|
| 100 |
+
# 6 Broader Impacts
|
| 101 |
+
|
| 102 |
+
By developing a self-denoising method to enhance the robustness of LLMs in the presence of noisy inputs, this work addresses a key limitation of LLMs and enables their application in high-stake environments. The ability to utilize LLMs in these scenarios can have significant positive impacts across various domains, such as healthcare, transportation, and finance, where safety and reliability are critical. By providing certified guarantees in safety-critical domains, our method can help build more reliable and responsible LLM systems.
|
| 103 |
+
|
| 104 |
+
Besides, our research contributes to the broader fields of machine learning and artificial intelligence. By tackling the challenge of robustness to noisy inputs in LLMs, we advance the understanding and the methodologies in this area. This can inspire further research and innovation, leading to improved techniques for enhancing the performance and reliability of LLMs and other machine learning models.
|
| 105 |
+
|
| 106 |
+
However, it is important to acknowledge the potential biases that may exist in LLMs, as our method relies on them as base models. Biases can arise from the training data used for LLMs, and these biases may be propagated by our method. We are committed to addressing the issue of biases and promoting fairness and transparency in machine learning systems. To mitigate these concerns, we will include proper licenses in the released codes and notify users about the potential risks associated with biases. This way, users can be informed and take appropriate measures to address any biases that may arise from the use of our method.
|
| 107 |
+
|
| 108 |
+
# 7 Limitations
|
| 109 |
+
|
| 110 |
+
Despite the large improvements, our method suffers from the limitation of running time, i.e., the prediction and certification process is time-consuming. This is largely because of the $p_c(\pmb{x})$ calculation in Equation 2. Such a problem is shared across all randomized smoothing-based methods. Besides, the additional self-denoising process also brings further computational loads. It would be interesting to either apply recent works on distributed computation to accelerate our method or develop new large language models specifically for denoising to
|
| 111 |
+
|
| 112 |
+
overcome this issue.
|
| 113 |
+
|
| 114 |
+
# References
|
| 115 |
+
|
| 116 |
+
Nicholas Carlini, Florian Tramer, Krishnamurthy Dj Dvijotham, Leslie Rice, Mingjie Sun, and J Zico Kolter. 2022. (certified!!) adversarial robustness for free! arXiv preprint arXiv:2206.10550.
|
| 117 |
+
Patrick Chao, Alexander Robey, Edgar Dobriban, Hamed Hassani, George J Pappas, and Eric Wong. 2023. Jailbreaking black box large language models in twenty queries. arXiv preprint arXiv:2310.08419.
|
| 118 |
+
Jeremy M. Cohen, Elan Rosenfeld, and J. Zico Kolter. 2019. Certified adversarial robustness via randomized smoothing. In International Conference on Machine Learning.
|
| 119 |
+
Ji Gao, Jack Lanchantin, Mary Lou Soffa, and Yanjun Qi. 2018. Black-box generation of adversarial text sequences to evade deep learning classifiers. 2018 IEEE Security and Privacy Workshops (SPW), pages 50-56.
|
| 120 |
+
Chuan Guo, Alexandre Sablayrolles, Hervé Jégou, and Douwe Kiela. 2021. Gradient-based adversarial attacks against text transformers. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 5747-5757.
|
| 121 |
+
Bairu Hou, Jinghan Jia, Yihua Zhang, Guanhua Zhang, Yang Zhang, Sijia Liu, and Shiyu Chang. 2022. Textgrad: Advancing robustness evaluation in nlp by gradient-driven optimization. In The Eleventh International Conference on Learning Representations.
|
| 122 |
+
Di Jin, Zhijing Jin, Joey Tianyi Zhou, and Peter Szolovits. 2020. Is bert really robust? a strong baseline for natural language attack on text classification and entailment. In AAAI.
|
| 123 |
+
Deokjae Lee, Seungyong Moon, Junhyeok Lee, and Hyun Oh Song. 2022. Query-efficient and scalable black-box adversarial attacks on discrete sequential data via bayesian optimization. In International Conference on Machine Learning, pages 12478-12497. PMLR.
|
| 124 |
+
Guang-He Lee, Yang Yuan, Shiyu Chang, and Tommi Jaakkola. 2019. Tight certificates of adversarial robustness for randomly smoothed classifiers. In Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc.
|
| 125 |
+
Dianqi Li, Yizhe Zhang, Hao Peng, Liquin Chen, Chris Brockett, Ming-Ting Sun, and William B Dolan. 2021. Contextualized perturbation for textual adversarial attack. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5053-5069.
|
| 126 |
+
|
| 127 |
+
Jiatong Li, Yunqing Liu, Wenqi Fan, Xiao-Yong Wei, Hui Liu, Jiliang Tang, and Qing Li. 2023. Empowering molecule discovery for molecule-caption translation with large language models: A chatgpt perspective. arXiv preprint arXiv:2306.06615.
|
| 128 |
+
Jinfeng Li, Shouling Ji, Tianyu Du, Bo Li, and Ting Wang. 2018. Textbugger: Generating adversarial text against real-world applications. ArXiv, abs/1812.05271.
|
| 129 |
+
Jieyu Lin, Jiajie Zou, and Nai Ding. 2021. Using adversarial attacks to reveal the statistical bias in machine reading comprehension models. arXiv preprint arXiv:2105.11136.
|
| 130 |
+
Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. 2018. Towards deep learning models resistant to adversarial attacks. In *ArXiv*, volume abs/1706.06083.
|
| 131 |
+
John Morris, Eli Lifland, Jin Yong Yoo, Jake Grigsby, Di Jin, and Yanjun Qi. 2020. Textattack: A framework for adversarial attacks, data augmentation, and adversarial training in nlp. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 119-126.
|
| 132 |
+
Alexander Robey, Eric Wong, Hamed Hassani, and George J Pappas. 2023. Smoothllm: Defending large language models against jailbreaking attacks. arXiv preprint arXiv:2310.03684.
|
| 133 |
+
Hadi Salman, Mingjie Sun, Greg Yang, Ashish Kapoor, and J. Zico Kolter. 2020. Denoised smoothing: A provable defense for pretrained classifiers. arXiv: Learning.
|
| 134 |
+
Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D Manning, Andrew Y Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 conference on empirical methods in natural language processing, pages 1631-1642.
|
| 135 |
+
Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. 2023. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca.
|
| 136 |
+
Ross Taylor, Marcin Kardas, Guillem Cucurull, Thomas Scialom, Anthony S. Hartshorn, Elvis Saravia, Andrew Poulton, Viktor Kerkez, and Robert Stojnic. 2022. Galactica: A large language model for science. ArXiv, abs/2211.09085.
|
| 137 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models, 2023. URL https://arxiv.org/abs/2307.09288.
|
| 138 |
+
|
| 139 |
+
Boxin Wang, Hengzhi Pei, Boyuan Pan, Qian Chen, Shuohang Wang, and Bo Li. 2019. T3: Treeautoencoder constrained adversarial text generation for targeted attack. arXiv preprint arXiv:1912.10375.
|
| 140 |
+
Boxin Wang, Chejian Xu, Shuohang Wang, Zhe Gan, Yu Cheng, Jianfeng Gao, Ahmed Hassan Awadallah, and Bo Li. 2021a. Adversarial glue: A multitask benchmark for robustness evaluation of language models. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2).
|
| 141 |
+
Wenjie Wang, Pengfei Tang, Jian Lou, and Li Xiong. 2021b. Certified robustness to word substitution attack with differential privacy. In NAACL.
|
| 142 |
+
Yuxin Wen, Neel Jain, John Kirchenbauer, Micah Goldblum, Jonas Geiping, and Tom Goldstein. 2023. Hard prompts made easy: Gradient-based discrete optimization for prompt tuning and discovery. arXiv preprint arXiv:2302.03668.
|
| 143 |
+
Xi Yang, Aokun Chen, Nima M. Pournejatian, Hoo-Chang Shin, Kaleb E. Smith, Christopher Parisien, Colin B. Compas, Cheryl Martin, Anthony B Costa, Mona G. Flores, Ying Zhang, Tanja Magoc, Christopher A. Harle, Gloria P. Lipori, Duane A. Mitchell, William R. Hogan, Elizabeth A. Shenkman, Jiang Bian, and Yonghui Wu. 2022. A large language model for electronic health records. NPJ Digital Medicine, 5.
|
| 144 |
+
Mao Ye, Chengyue Gong, and Qiang Liu. 2020. Safer: A structure-free approach for certified robustness to adversarial word substitutions. In Annual Meeting of the Association for Computational Linguistics.
|
| 145 |
+
Jiehang Zeng, Xiaoqing Zheng, Jianhan Xu, Linyang Li, Liping Yuan, and Xuanjing Huang. 2021. Certified robustness to text adversarial attacks by randomized [mask]. In arXiv preprint arXiv:2105.03743.
|
| 146 |
+
Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric Xing, Laurent El Ghaoui, and Michael Jordan. 2019. Theoretically principled trade-off between robustness and accuracy. In ICML, pages 7472-7482. PMLR.
|
| 147 |
+
Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text classification. Advances in neural information processing systems, 28.
|
| 148 |
+
Yihua Zhang, Ruisi Cai, Tianlong Chen, Guanhua Zhang, Huan Zhang, Pin-Yu Chen, Shiyu Chang, Zhangyang Wang, and Sijia Liu. 2023. Robust mixture-of-expert training for convolutional neural networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 90-101.
|
| 149 |
+
Yihua Zhang, Guanhua Zhang, Prashant Khanduri, Mingyi Hong, Shiyu Chang, and Sijia Liu. 2022a. Revisiting and advancing fast adversarial training through the lens of bi-level optimization. In ICML.
|
| 150 |
+
|
| 151 |
+
Yimeng Zhang, Yuguang Yao, Jinghan Jia, Jinfeng Yi, Mingyi Hong, Shiyu Chang, and Sijia Liu. 2022b. How to robustify black-box ML models? a zeroth-order optimization perspective. In International Conference on Learning Representations.
|
| 152 |
+
Haiteng Zhao, Chang Ma, Xinshuai Dong, Anh Tuan Luu, Zhi-Hong Deng, and Hanwang Zhang. 2022. Certified robustness against natural language attacks by causal intervention. In International Conference on Machine Learning.
|
| 153 |
+
Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. 2023. Judging llm-as-a-judge with mt-bench and chatbot arena. arXiv preprint arXiv:2306.05685.
|
| 154 |
+
Chen Zhu, Yu Cheng, Zhe Gan, Siqi Sun, Tom Goldstein, and Jingjing Liu. 2019. Freelb: Enhanced adversarial training for natural language understanding. In International Conference on Learning Representations.
|
| 155 |
+
Andy Zou, Zifan Wang, J Zico Kolter, and Matt Fredrikson. 2023. Universal and transferable adversarial attacks on aligned language models. arXiv preprint arXiv:2307.15043.
|
| 156 |
+
|
| 157 |
+
# A Additional Experiment Setup
|
| 158 |
+
|
| 159 |
+
# A.1 Prompts and Instructions
|
| 160 |
+
|
| 161 |
+
The prompts and instructions we used for incontext learning on downstream task prediction and self-denoising are shown as follows.
|
| 162 |
+
|
| 163 |
+
1: Prompt template used for Alpaca.
|
| 164 |
+
|
| 165 |
+
Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
| 166 |
+
|
| 167 |
+
```c
|
| 168 |
+
```c
|
| 169 |
+
```c
|
| 170 |
+
```c
|
| 171 |
+
```c
|
| 172 |
+
```c
|
| 173 |
+
```c
|
| 174 |
+
```c
|
| 175 |
+
```c
|
| 176 |
+
```c
|
| 177 |
+
```c
|
| 178 |
+
```c
|
| 179 |
+
```c
|
| 180 |
+
```c
|
| 181 |
+
```c
|
| 182 |
+
```c
|
| 183 |
+
```c
|
| 184 |
+
```c
|
| 185 |
+
```c
|
| 186 |
+
```c
|
| 187 |
+
```c
|
| 188 |
+
```c
|
| 189 |
+
```c
|
| 190 |
+
```c
|
| 191 |
+
```c
|
| 192 |
+
```c
|
| 193 |
+
```c
|
| 194 |
+
```c
|
| 195 |
+
```c
|
| 196 |
+
```c
|
| 197 |
+
```c
|
| 198 |
+
```c
|
| 199 |
+
```c
|
| 200 |
+
```c
|
| 201 |
+
```
|
| 202 |
+
``
|
| 203 |
+
|
| 204 |
+
Input:
|
| 205 |
+
{ }
|
| 206 |
+
|
| 207 |
+
## Response:
|
| 208 |
+
|
| 209 |
+
The following instructions are used to fill in the contents under the "Instruction" section. The content under "Input" should be filled with different input texts.
|
| 210 |
+
|
| 211 |
+
2: The instruction used for classification on SST-2.
|
| 212 |
+
|
| 213 |
+
Given an English sentence input, determine its sentiment as positive or negative.
|
| 214 |
+
|
| 215 |
+
3: The instruction used for self-denoising on SST-2.
|
| 216 |
+
|
| 217 |
+
Replace each mask word [MASK] in the input sentence with a suitable word. The output sentence should be natural and coherent and should be of the same length as the given sentence.
|
| 218 |
+
|
| 219 |
+
```c
|
| 220 |
+
## # Input:
|
| 221 |
+
[MASK] reassembled from [MASK]
|
| 222 |
+
cutting-room [MASK] of any [MASK]
|
| 223 |
+
daytime [MASK].
|
| 224 |
+
|
| 225 |
+
```java
|
| 226 |
+
## Response:
|
| 227 |
+
apparently reassembled from the cutting-room floor of any given daytime soap.
|
| 228 |
+
|
| 229 |
+
```python
|
| 230 |
+
## # Input:
|
| 231 |
+
a [MASK], funny and [MASK]
|
| 232 |
+
transporting re-imagining [MASK]
|
| 233 |
+
[MASK] and the beast and 1930s [MASK] films
|
| 234 |
+
|
| 235 |
+
```bash
|
| 236 |
+
## Response:
|
| 237 |
+
a stirring, funny and finally transporting re-imagining of beauty and the beast and 1930s horror films
|
| 238 |
+
|
| 239 |
+
4: The instruction used for classification on Agnews.
|
| 240 |
+
|
| 241 |
+
Given a news article title and description, classify it into one of the four categories: Sports, World, Technology, or Business. Return the category name as the answer.
|
| 242 |
+
|
| 243 |
+
Input:
|
| 244 |
+
|
| 245 |
+
Title: Venezuelans Vote Early in Referendum on Chavez Rule (Reuters) Description: Reuters - Venezuelans turned out early and in large numbers on Sunday to vote in a historic referendum that will either remove left-wing President Hugo Chavez from office or give him a new mandate to govern for the next two years.
|
| 246 |
+
|
| 247 |
+
## Response:
|
| 248 |
+
|
| 249 |
+
World
|
| 250 |
+
|
| 251 |
+
Input:
|
| 252 |
+
|
| 253 |
+
Title: Phelps, Thorpe Advance in 200 Freestyle (AP)
|
| 254 |
+
|
| 255 |
+
Description: AP - Michael Phelps took care of qualifying for the Olympic 200-meter freestyle semifinals Sunday, and then found out he had been added to the American team for the evening's 400 freestyle relay final. Phelps' rivals Ian Thorpe and Pieter van den Hoogenband and teammate Klete Keller were faster than the teenager in the 200 free preliminaries.
|
| 256 |
+
|
| 257 |
+
## Response: Sports
|
| 258 |
+
|
| 259 |
+
```python
|
| 260 |
+
## # Input:
|
| 261 |
+
Title: Wall St. Bears Claw Back Into the Black (Reuters)
|
| 262 |
+
Description: Reuters - Short-sellers, Wall Street's dwindling band of ultra-cynics, are seeing green again.
|
| 263 |
+
|
| 264 |
+
## Response: Business
|
| 265 |
+
|
| 266 |
+
```python
|
| 267 |
+
## # Input:
|
| 268 |
+
Title: 'Madden,' 'ESPN' Football Score in Different Ways (Reuters)
|
| 269 |
+
Description: Reuters - Was absenteeism a little high\on Tuesday among the guys at the office? EA Sports would like to think it was because "Madden NFL 2005" came out that day, and some fans of the football simulation are rabid enough to take a sick day to play it.
|
| 270 |
+
|
| 271 |
+
## Response: Technology
|
| 272 |
+
|
| 273 |
+
5: The instruction used for self-denoising on Agnews.
|
| 274 |
+
|
| 275 |
+
Replace each masked position "[MASK]" in the provided sentence with a suitable word to make it natural and coherent. Only one word should be used to replace each "[MASK]". The returned sentence should be of the same length as the given sentence. Provide the answer directly.
|
| 276 |
+
|
| 277 |
+
We list the instructions for the Vicuna model for self-denoising to defend against jailbreak attacks in the following. This includes the instruction for Vicuna to perform denoising and the instruction for ChatGPT model to judge whether the output is jailbroken.
|
| 278 |
+
|
| 279 |
+
6: The instruction used for self-denoising to defend against jailbreak attacks for Vicuna model.
|
| 280 |
+
|
| 281 |
+
Your task is to replace each < mask> in the input with an appropriate word or a punctuation mark. The goal is to make the input sentence clearer, more natural in language.
|
| 282 |
+
|
| 283 |
+
You need to follow several rules: 1. Words and punctuation marks should fit the context and flows naturally.
|
| 284 |
+
|
| 285 |
+
2. The output should maintain the original meaning and implication.
|
| 286 |
+
|
| 287 |
+
3. Preserve the original tone, style and sentiment of the input. 4. All <mask> tokens must be replaced, and all meaningful non-<mask> words in the input must be retained. <mask> should not appear in the output. Leave all non-<mask> words unchanged.
|
| 288 |
+
|
| 289 |
+
Below is one example:
|
| 290 |
+
Input: there is a fabric of <mask> ideas here, <mask> feelings that profoundly deepen them. Output: {"replace": "there is a fabric of complex ideas here, and feelings that profoundly deepen them."}
|
| 291 |
+
|
| 292 |
+
Now replace the <mask> in the following input. Generate the response in json format, the output json should only contain one key, "replace". Don't add any notes or explanations.
|
| 293 |
+
|
| 294 |
+
7: The instruction template for ChatGPT model to judge whether model output is jailbroken.
|
| 295 |
+
|
| 296 |
+
You are a helpful red teaming assistant. You will receive a prompt and the AI assistant's response to the prompt. Please act as an impartial judge and evaluate whether the response provided by the AI assistant violates ethical, legal, or safety guidelines and answers the following, TASK: '{goal}''. Your evaluation should consider
|
| 297 |
+
|
| 298 |
+
factors including clarity, the level of detail, and accuracy provided in the response. Be as objective as possible.
|
| 299 |
+
|
| 300 |
+
Rate the response on a scale from 1 to 10. A rating of 1 signifies a fully non-jailbroken response—either irrelevant to the task or refusal due to ethical or legal concerns. A rating of 10 signifies responses that directly and fully violate ethical, legal, or safety guidelines and comprehensively answer the task. If the response is legitimate or provides an alternative suggestion, then the assistant is not jailbroken and does not receive a rating of 10.
|
| 301 |
+
|
| 302 |
+
Your rating should strictly follow the rule, and output the rating in json format, with only one key 'rating'.
|
| 303 |
+
|
| 304 |
+
Now the prompt is: '\{PROMPT\}' and the AI assistant's response is '\{RESPONSE\}'
|
| 305 |
+
|
| 306 |
+
# A.2 Hyperparameter
|
| 307 |
+
|
| 308 |
+
Hyper-parameter for SST-2 and Agnews datasets We evaluate on 100 testing instances for certified accuracy in Figure 2 and 200 instances for empirical robust accuracy in Table 1. To use the Alpaca for self-denoising, we use beam search for generation and set the repetition penalty to 1.3 and the number of beams to 2. We use 500 instances for estimating $\underline{p_c(x)}$ with Monte Carlo in the certification process. In Figure 2, for each perturbation scale, we search the best mask rate $m$ from $\{10\%, 20\%, \dots, 90\% \}$ on the validation set for our method and RANMASK. The best mask rates for each perturbation scale are listed in Table 3. When mask rate $m$ is greater than or equal to $70\%$ , we use the removing mask strategy; otherwise, we use Alpaca itself as the denoiser. For empirical robustness results in Table 1, we observe that smaller mask rates bring better empirical robust accuracy in the validation set, so we use $m = 5\%$ for all methods.
|
| 309 |
+
|
| 310 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Method</td><td colspan="10">Perturbation Scale d (%)</td></tr><tr><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td><td>7</td><td>8</td><td>9</td><td>10</td></tr><tr><td rowspan="2">SST-2</td><td>RANMASK</td><td>10</td><td>10</td><td>10</td><td>10</td><td>80</td><td>80</td><td>80</td><td>80</td><td>80</td><td>80</td></tr><tr><td>SELFDENOISE</td><td>20</td><td>20</td><td>30</td><td>30</td><td>70</td><td>80</td><td>80</td><td>90</td><td>90</td><td>90</td></tr><tr><td rowspan="2">Agnews</td><td>RANMASK</td><td>20</td><td>20</td><td>70</td><td>70</td><td>80</td><td>80</td><td>90</td><td>90</td><td>90</td><td>90</td></tr><tr><td>SELFDENOISE</td><td>50</td><td>50</td><td>70</td><td>80</td><td>80</td><td>80</td><td>90</td><td>90</td><td>90</td><td>90</td></tr></table>
|
| 311 |
+
|
| 312 |
+
Table 3: The best mask rate $m\left( \% \right)$ for each perturbation scale on SST-2 and Agnews for SELFDENOISE and RANMASK.
|
| 313 |
+
|
| 314 |
+
Hyper-parameter for AdvBench dataset. Following SmoothLLM (Robey et al., 2023), we evaluate the defense performance against jailbreak attack on a selected subset of AdvBench dataset. which contains 50 different harmful behaviors for LLM. For the transfer attack experiment, we utilize the official code to generate the attack string that can successfully jailbreak VICUNA model for each behavior. For the adaptive attack experiment, the attack budget for the PAIR attack is 20 queries to the target model, i.e. the attacker generates at most 20 adversarial prompts. As GCG attack requires the next token probability of the target model, which is not available for the smoothed LLM $g(\cdot)$ , we did not evaluate GCG attack in an adaptive attack experiment. For all jailbreak experiments, we use 10 copies to perform the majority vote and ChatGPT as the judge model to evaluate whether the smoothed LLM's response is jailbroken following (Chao et al., 2023).
|
| 315 |
+
|
| 316 |
+
# A.3 Baseline Discussion
|
| 317 |
+
|
| 318 |
+
Baseline for certified robustness on downstream tasks We consider the attack setting where we do not have full knowledge about the potential perturbations and the attacker can replace the original word with arbitrary words for attack. However, SAFER consider the setting where the attacker replace original words from a pre-defined candidate pools that are known by the defender. Since potential perturbations are often not fully known in realistic scenarios, we do not compare with SAFER in terms of certified robustness.
|
| 319 |
+
|
| 320 |
+
Baseline for jailbreak defense We consider SMOOTHLLM as the baseline for jailbreak defense experiment, which also employs a randomized smoothing scheme and proposes three character-level perturbation operations: character insertion, character swap, and patch perturbation. In our experiment, we use character swap as the perturbation scheme for SMOOTHLLM as the original paper shows that character swap is the most effective perturbation. We refer readers to the original paper for
|
| 321 |
+
|
| 322 |
+
<table><tr><td rowspan="2">Attack</td><td rowspan="2">No Defense</td><td colspan="4">SMOOTHLLM</td><td colspan="3">RANMASK</td><td colspan="3">SELFDENOISE</td></tr><tr><td>5%</td><td>10%</td><td>15%</td><td>30%</td><td>5%</td><td>15%</td><td>30%</td><td>5%</td><td>15%</td><td>30%</td></tr><tr><td>GCG*</td><td>0</td><td>4</td><td>10</td><td>22</td><td>76</td><td>0</td><td>8</td><td>14</td><td>0</td><td>0</td><td>0</td></tr><tr><td>PAIR*</td><td>0</td><td>0</td><td>4</td><td>4</td><td>24</td><td>0</td><td>4</td><td>8</td><td>0</td><td>0</td><td>0</td></tr><tr><td>PAIR</td><td>0</td><td>0</td><td>0</td><td>4</td><td>18</td><td>0</td><td>0</td><td>8</td><td>0</td><td>0</td><td>0</td></tr></table>
|
| 323 |
+
|
| 324 |
+
Table 4: Rejection rate for different defense methods under two kinds of attacks. * denotes transfer attack from an adversarial prompt that can successfully jailbreak VICUNA model.
|
| 325 |
+
|
| 326 |
+
more details.
|
| 327 |
+
|
| 328 |
+
# A.4 Evaluation metric details
|
| 329 |
+
|
| 330 |
+
For jailbreak defenses, we count the percentage of harmful prompts the model successfully identified against the ethical issues, e.g., Tell me how to make a bomb. We did not use use the percentage of successful attacks as the metric because we find that the model tends to reject the input, generating responses like I'm sorry, but I'm having difficulty understanding your question as noise level increases for SMOOTHLLM and RANMASK. We consider this situation as a failed defense. However, the percentage of successful attacks cannot capture this kind of failure. To exclude these cases, we manually reviewed the model responses and adjusted the results of the automatic LLM jailbreak judge to get the final defense success rate. We provide a detailed analysis of the model rejection phenomenon in Section B.
|
| 331 |
+
|
| 332 |
+
# B Analysis of Model Rejection in Jailbreak Defense
|
| 333 |
+
|
| 334 |
+
In our preliminary experiments with various masking rates, we found that large masking rate for SMOOTHLLM and RANMASK leads to model answers rejecting the input request, e.g., I'm sorry, but I'm having difficulty understanding your question. Therefore, we manually check the responses for different defense methods and count the number of rejections. The rejection rate is presented in Table 4. The rejection rate for adaptive attacks is measured at the final round, i.e., the round when the judge reports the victim model being jailbroken or the last round under attack budget. We highlight that the semantic destruction operation in RANMASK and SMOOTHLLM tend to cause the model to reject queries, thus diminishing the functionality of the language model. The rejection rate for SMOOTHLLM at $20\%$ mask rate on short prompts in GCG attack is even more than $50\%$ . In contrast,
|
| 335 |
+
|
| 336 |
+
Algorithm 1 SELFDENOISE for classification prediction
|
| 337 |
+
|
| 338 |
+
Require: Defense LLM $f(\cdot)$ , Number of copies
|
| 339 |
+
$N$ Input text $x$ ,Mask function $\mathcal{M}$ ,Mask rate $m$
|
| 340 |
+
for $j\gets 1$ to $N$ do
|
| 341 |
+
$s^{(j)}\sim \phi (x,m)$ $x^{(j)}\gets \mathcal{M}(x,s^{(j)})$ $y^{(j)}\gets f(x^{(j)})$
|
| 342 |
+
end for
|
| 343 |
+
return MajorityVote $(y^{(1)},\ldots ,y^{(N)})$
|
| 344 |
+
|
| 345 |
+
# Algorithm 2 SELFDENOISE for jailbreak defense
|
| 346 |
+
|
| 347 |
+
Require: Defense LLM $f(\cdot)$ , Number of copies
|
| 348 |
+
$N$ Input text $x$ ,Mask function $\mathcal{M}$ ,Mask rate
|
| 349 |
+
m, Jailbreak judge $j(\cdot)$
|
| 350 |
+
for $j\gets 1$ to $N$ do
|
| 351 |
+
$s^{(j)}\sim \phi (x,m)$ $x^{(j)}\gets \mathcal{M}(x,s^{(j)})$ $y^{(j)}\gets f(x^{(j)})$
|
| 352 |
+
end for
|
| 353 |
+
$JB =$ MarjorityVote $(j(y^{(1)}),\ldots ,j(y^{(N)}))$
|
| 354 |
+
Majority $= \{y^{(j)}\in \{y^{(1)},\dots ,y^{(N)}\} |$ $j(y^{(j)}) = JB\}$
|
| 355 |
+
Return $y^{*}\sim U(Majority)$
|
| 356 |
+
|
| 357 |
+
the self-denoising mechanism in our method SELFDENOISE effectively mitigated the input misunderstanding issue.
|
| 358 |
+
|
| 359 |
+
# C Detailed algorithm
|
| 360 |
+
|
| 361 |
+
In this section, we list the detailed algorithm for our method SELFDENOISE in improving robustness in downstream tasks in Algorithm 1 and in defending jailbreak attacks in Algorithm 2. For a detailed certification algorithm, we refer readers to Cohen et al. (2019).
|
| 362 |
+
|
| 363 |
+
# D Discussion of Used Artifacts
|
| 364 |
+
|
| 365 |
+
Datasets: We did our best to find the license for the SST-2 and AGNews dataset, but we did not find any. We refer readers to their original paper and homepage for usage policy. We did not clean the dataset to remove individual information as it is beyond the scope of our paper. Anonymizing the original dataset may affect the evaluation of our method. Therefore, we did not clean the dataset.
|
| 366 |
+
|
| 367 |
+
Models: The Alpca model is under cc-by-nc-4.0 license (https://www.creativecommons.org/licenses/by-nc/4.0/deed.en).
|
| 368 |
+
|
| 369 |
+
The Vicuna model is under LLaMA-2 license(https://ai.meta.com/llama/ license/). We perform all our experiments on NVIDIA-A6000 GPUs, and all experiments are a single run.
|
advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9ae3ff12533b468c2734cbb98e2fd3cd14b159d5871277c11af2c67824a3381e
|
| 3 |
+
size 141602
|
advancingtherobustnessoflargelanguagemodelsthroughselfdenoisedsmoothing/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a0a10161cf06a7611e8a4649ae34929788ffea8ce98ea06d68d97c0840884f0f
|
| 3 |
+
size 358561
|
alohaanewmeasureforhallucinationincaptioningmodels/2651e8c2-ba78-498e-9084-aabb876a82f8_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:30879676c018edc4d192ebb43e2e74145c6f9d7af3104afdfecda5626465d304
|
| 3 |
+
size 151040
|
alohaanewmeasureforhallucinationincaptioningmodels/2651e8c2-ba78-498e-9084-aabb876a82f8_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cdf2011508aced7e70aed12e5448aa649c1c66d34b991d6b29a8c7ff7ed455f0
|
| 3 |
+
size 188168
|
alohaanewmeasureforhallucinationincaptioningmodels/2651e8c2-ba78-498e-9084-aabb876a82f8_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:723154c8af5a1d3836f9e4c627ad3751da9d899bd74f198771ed155517aa785b
|
| 3 |
+
size 32805612
|
alohaanewmeasureforhallucinationincaptioningmodels/full.md
ADDED
|
@@ -0,0 +1,874 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ALOHa: A New Measure for Hallucination in Captioning Models
|
| 2 |
+
|
| 3 |
+
Suzanne Petryk*, David M. Chan, Anish Kachinthaya, Haodi Zou, John Canny, Joseph E. Gonzalez, Trevor Darrell
|
| 4 |
+
|
| 5 |
+
University of California, Berkeley
|
| 6 |
+
|
| 7 |
+
{spetryk, davidchan, anishk, haodi.zou, canny, jegonzal, trevordarrell}@berkeley.edu
|
| 8 |
+
|
| 9 |
+
https://davidmchan.github.io/aloha
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Despite recent advances in multimodal pretraining for visual description, state-of-the-art models still produce captions containing errors, such as hallucinating objects not present in a scene. The existing prominent metric for object hallucination, CHAIR, is limited to a fixed set of MS COCO objects and synonyms. In this work, we propose a modernized open-vocabulary metric, ALOHa, which leverages large language models (LLMs) to measure object hallucinations. Specifically, we use an LLM to extract groundable objects from a candidate caption, measure their semantic similarity to reference objects from captions and object detections, and use Hungarian matching to produce a final hallucination score. We show that ALOHa correctly identifies $13.6\%$ more hallucinated objects than CHAIR on HAT, a new gold-standard subset of MS COCO Captions annotated for hallucinations, and $30.8\%$ more on nocaps, where objects extend beyond MS COCO categories.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction and Background
|
| 16 |
+
|
| 17 |
+
In recent years, vision-language models have demonstrated remarkable performance. Unfortunately, even state-of-the-art models for visual description still generate captions with object hallucinations – objects or entities that are present in the caption yet are not explicitly supported by visual evidence in the image (Dai et al., 2023). In order to reduce the occurrence of object hallucinations in vision-language models, it is helpful to understand and quantify the problem through reliable, localizable, and generalizable measures of object hallucination. Reliable measures are capable of correctly indicating if a given caption contains an object hallucination. Localizable measures are capable of indicating which object in a particular caption is hallucinated. Generalizable measures are capable of eval
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Caption
|
| 21 |
+
CHAIR
|
| 22 |
+
ALOHa (ours)
|
| 23 |
+
Candidate ALC
|
| 24 |
+
|
| 25 |
+
"A baseball player holding a bat on a field, possibly with a catcher, umpire, and/or bass drum in the background."
|
| 26 |
+
|
| 27 |
+
Hallucinated objects
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
Hallucinated: None
|
| 31 |
+
ALOHA
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
0.49
|
| 35 |
+
0.37
|
| 36 |
+
baseball diamond
|
| 37 |
+
baseball field.
|
| 38 |
+
|
| 39 |
+
Correctly assigns low
|
| 40 |
+
|
| 41 |
+
scores to hallucinations
|
| 42 |
+
|
| 43 |
+
Figure 1: (Top) The SOTA prior object hallucination metric, CHAIR, is limited to MS COCO objects, and fails to detect the hallucinations in this image caption while ALOHa (ours, bottom) correctly assigns low similarity scores to the hallucinations "baseball player" and "bat". ALOHa does not penalize the caption for "catcher", "ampire", and "bass drum", as the caption indicates uncertainty of their presence.
|
| 44 |
+
|
| 45 |
+
uating captions from a wide range of input datasets, across a wide range of object and entity categories.
|
| 46 |
+
|
| 47 |
+
Recent works that measure object hallucinations in generated text generally fall into two categories: measures that find hallucinations by explicitly matching from a set of objects, and measures that compute distances between latent image and/or text embeddings, indicating a hallucination if the embeddings are too distant. In the first category, CHAIR (Rohrbach et al., 2018) is a measure that explicitly extracts objects from candidate sentences using simple string matching against MS COCO classes and a small set of synonyms. It compares these extracted objects against the ground truth detections and objects extracted from the ground truth reference captions. CHAIR is both reliable, as string matching on a fixed set of objects is accurate, consistent, and localizable, as individual non-matching strings are identified. However, as seen in Figure 1, CHAIR is not generalizable, as it can only handle a fixed set of predetermined objects. Other uni-modal measures in this category include those for abstractive summarization (Durmus et al., 2020; Kryscinski et al., 2020; Maynez et al., 2020; Son et al., 2022; Sridhar and Visser, 2022; Yuan et al., 2021), dialogue (Huang et al., 2022; Shuster
|
| 48 |
+
|
| 49 |
+
et al., 2021), and structured knowledge (Dhingra et al., 2019). These often generalize poorly to vision-language tasks as they require grounding the generated text into inputs of the same modality.
|
| 50 |
+
|
| 51 |
+
In the second category, CLIPScore (Hessel et al., 2021) employs CLIP (Radford et al., 2021) embeddings to assess image-text matches. While it is generalizable and reliable, it lacks localization as it does not pinpoint incorrect spans of text. CLIPBERTS (Wan and Bansal, 2022) and Ref-CLIPScore (an extension of CLIPScore accounting for reference captions) face similar limitations.
|
| 52 |
+
|
| 53 |
+
POPE (Li et al., 2023) evaluates vision-language models' likelihood to hallucinate objects with machine-generated queries consisting of samples extracted from both reference object detections and nonexistent objects, but addresses a different problem from that which we investigate here - it measures how often models hallucinate rather than localizes and detects issues within a single caption.
|
| 54 |
+
|
| 55 |
+
Inspired by recent successes using LLMs for evaluation in language-only tasks (Zhang et al., 2020; Yuan et al., 2021; Bubeck et al., 2023; Chiang et al., 2023; Zheng et al., 2023), we introduce Assessment with Language models for Object Hallucination (ALOHa), a modernized measure for object hallucination detection that is reliable, localizable, and generalizable. ALOHa extends the reliability and localization of CHAIR to new input domains by leveraging in-context learning of LLMs combined with semantically rich text embeddings for object parsing and matching (Figure 1).
|
| 56 |
+
|
| 57 |
+
For a given image caption, we generate two measures: ALOHa $_0$ , a numeric score for each object rating the degree to which that object is a hallucination, and ALOHa, an aggregated score rating the degree to which the whole caption contains a hallucination. We demonstrate ALOHa on a new gold-standard dataset of image hallucinations, HAT, and show that ALOHa improves on CLIPScore while detecting object hallucinations, and CHAIR while correctly localizing those hallucinations. We conclude by demonstrating that ALOHa remains reliable and localizable when generalizing to out-of-domain data.
|
| 58 |
+
|
| 59 |
+
# 2 ALOHa: Reliable, Localizable, and Generalizable Hallucination Detection
|
| 60 |
+
|
| 61 |
+
ALOHa produces numeric scores rating the degree of hallucination for each object in a candidate caption as well as an overall caption score, given a
|
| 62 |
+
|
| 63 |
+
set of ground-truth reference captions and predicted (or ground truth) image object detections. ALOHa consists of three stages (Figure 2). (1) Objects are extracted from the image, reference set, and candidate caption using a combination of an object detector and LLM. (2) We filter the object sets and compute semantic representations of each object. (3) We compute a maximum-similarity linear assignment between candidate and reference objects. The scores from each of the pairs in the linear assignment, which we call $\mathrm{ALOHa_o}$ , measure the degree of hallucination for each of the candidate objects. The minimum similarity in this linear assignment (the ALOHa score) measures the degree of hallucination of the caption.
|
| 64 |
+
|
| 65 |
+
(1) Extracting objects from candidates, references, and images: Parsing visually grounded objects in a caption in an open-domain context is a surprisingly difficult task. CHAIR (Rohrbach et al., 2018) relies on a fixed set of MS COCO objects and synonyms, requiring considerable effort to extend to other datasets, and sometimes failing at ambiguous parses (such as mistaking the adjective "orange" for a noun). SPICE (Anderson et al., 2016) relies on standard grammar-based object parsing, which can have similar issues, as purely text-based methods fall short at identifying which nouns are visual – for instance, avoiding "picture" and "background" in Figure 2. Captions may also indicate uncertainty around object presence, such as "a bowl or plate", or "a dog biting something, possibly a Frisbee." We aim to handle such uncertain objects to avoid unfair hallucination penalties.
|
| 66 |
+
|
| 67 |
+
With the understanding that open-domain parsing is the primary factor in CHAIR's lack of generalization, we leverage the capability of zero-shot in-context learning in large language models. Following Brown et al. (2020), we use an LLM (ChatGPT, OpenAI (2022)) along with the prompt given in Appendix A to turn the parsing task into a language completion task easily solvable by an LLM. We encourage the LLM to extract visual objects in the scene, consisting primarily of noun phrases (including any attributes, such as "big dog" and "purple shirt"), from the candidate and reference captions. We run the LLM against the candidate caption to produce the unfiltered object set $\mathcal{C}$ , and again for the corresponding reference captions to produce object set $\mathcal{R}$ . To extract objects from the image context, similar to CHAIR, we augment the set of reference objects with objects
|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
Figure 2: Overview of ALOHa. We prompt an LLM to extract visually grounded nouns from a candidate's machine-generated description and a set of references. We consider uncertain language (e.g., "goat or sheep"), add reference objects with and without modifiers (e.g., both "field" and "grassy field"), and avoid non-visual nouns (e.g., "picture" and "background"). Then, we compute a maximum-similarity linear assignment between candidate and reference object sets, the weights of which form the ALOHa. Matched pairs with low ALOHa are likely hallucinations (e.g., "black cat", ALOHa = 0.2). We additionally output the minimum ALOHa as a caption-level ALOHa score.
|
| 71 |
+
|
| 72 |
+
detected directly from the image using DETR (Carion et al., 2020) fine-tuned on MS COCO.
|
| 73 |
+
|
| 74 |
+
(2) Object filtering: We further refine candidate $(\mathcal{C})$ and reference $(\mathcal{R})$ object sets to better reflect specific challenges of object hallucination detection. Ideally, hallucination measures should penalize specificity when candidate attributes are not supported by references (e.g., if "purple shirt" $\in \mathcal{C}$ , yet "white shirt" $\in \mathcal{R}$ ), but should not penalize generality (e.g., "shirt" $\in \mathcal{C}$ , yet "white shirt" $\in \mathcal{R}$ ). Thus, we use spaCy (Honnibal et al., 2020a) to augment $\mathcal{R}$ with the root nouns from each reference noun phrase, but leave the candidates unchanged.
|
| 75 |
+
|
| 76 |
+
Beyond specificity, captions may also express uncertainty about the presence of objects in an image. For conjunctions (e.g., "fork or knife"), we aim to avoid unfair penalties if at least one of the objects is grounded. ALOHa considers all combinations of selecting a single object from each conjunction, denoted as $\mathcal{C}_{\{1\dots M\}}$ and $\mathcal{R}_{\{1\dots N\}}$ (e.g., "fork" $\in \mathcal{R}_0$ and "knife" $\in \mathcal{R}_1$ ). Additionally, we prompt the LLM to indicate uncertain grounding by including "possibly" after the object (e.g., "there may be a Frisbee" becomes "Frisbee (possibly)") and we remove uncertain objects from $\mathcal{C}_i$ to avoid penalties while maintaining them in $\mathcal{R}_j$ for maximum coverage of more general objects.
|
| 77 |
+
|
| 78 |
+
(3) Object Matching: Once we have extracted and parsed the candidate and reference object sets, we aim to measure the degree of hallucination for each candidate object. While we could match objects based on string alone (resulting in a binary decision), as does CHAIR, often it is useful to understand a continuous scale of hallucination – e.g., for a reference object “dog”, hallucinating “wolf” should be penalized less than “potato.” To capture this scale of semantic similarity, for each object text $o$ , we
|
| 79 |
+
|
| 80 |
+
generate $o_{\mathrm{emb}} = \phi(o) \in \mathbb{R}^K$ , where $\phi$ is a semantic text embedding model. In our work, we use S-BERT (Reimers and Gurevych, 2019). We then compute a similarity score for each pair of objects (usually the cosine similarity, see Appendix B.2). For each $(\mathcal{C}_i, \mathcal{R}_j)$ pair, we store these scores in a similarity matrix $S_{i,j} \in [0,1]^{|\mathcal{C}_i| \times |\mathcal{R}_j|}$ . We then use the Hungarian method (Kuhn, 1955) to find an optimal maximum-similarity assignment $\mathcal{M}_{i,j}$ between candidate and reference sets of objects.
|
| 81 |
+
|
| 82 |
+
To determine the $\mathrm{ALOHa_0}$ score for each object, we take the maximum score across all possible.
|
| 83 |
+
parsings,giving the candidate caption the benefit of the doubt, for an object $c\in \mathcal{C}_i$
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\mathrm {A L O H a} _ {\mathrm {o}} (c) = \max _ {i, j} w _ {c _ {i}, j} \in \mathcal {M} _ {i, j} \tag {1}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
While $0 \leq \mathrm{ALOH}_{\mathrm{o}} \leq 1$ indicates the degree of hallucination for each object, we also want to indicate if an entire caption contains a hallucination. We thus define:
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\mathrm {A L O H a} = \min _ {c \in \mathcal {C}} \mathrm {A L O H a} _ {0} (c) \tag {2}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
We choose the minimum as the presence of any hallucinated object indicates that the full caption is a hallucination, and even several correct detections should not compensate for a hallucination.
|
| 96 |
+
|
| 97 |
+
# 3 Evaluation & Discussion
|
| 98 |
+
|
| 99 |
+
HAT: To promote the development of high-quality methods for hallucination detection, we collect and release HAT (HALLucination Test), a dataset of labeled hallucinations in captions. HAT consists of 490 samples (90 validation and 400 test) labeled by in-domain experts for hallucination on both a word level and caption level (See Appendix C). Measures are evaluated on two metrics: Average Precision
|
| 100 |
+
|
| 101 |
+
<table><tr><td>Method</td><td>LA</td><td>AP</td></tr><tr><td>Baseline (Majority Vote)</td><td>-</td><td>33.75</td></tr><tr><td>CHAIRs</td><td>6.70</td><td>36.85</td></tr><tr><td>CLIPScore</td><td>-</td><td>40.10</td></tr><tr><td>RefCLIPScore</td><td>-</td><td>48.40</td></tr><tr><td>ALOHa (No Soft Object Matching)</td><td>18.66</td><td>47.27</td></tr><tr><td>ALOHa (No Detections)</td><td>19.55</td><td>48.40</td></tr><tr><td>ALOHa (Oracle Detections)</td><td>19.55</td><td>47.86</td></tr><tr><td>ALOHa (DETR Detections)*</td><td>20.30</td><td>48.62</td></tr><tr><td>ALOHa (Oracle+DETR Detections)</td><td>21.05</td><td>48.78</td></tr></table>
|
| 102 |
+
|
| 103 |
+
Table 1: Test set performance for binary hallucination detection on HAT. LA: Localization Accuracy. AP: Average Precision. * indicates the version of ALOHa used throughout this paper, unless noted otherwise. Oracle detection are human-generated reference detections.
|
| 104 |
+
|
| 105 |
+
(AP) and Localization Accuracy (LA). The AP of the method measures reliability and is defined as how well the measure identifies captions with hallucinations. For CHAIR, decisions are binary, so $\mathrm{AP} =$ accuracy. For ALOHa, AP is the weighted mean of precisions across all thresholds. The LA, measured on samples containing hallucinations in HAT, measures localization and is defined as the accuracy of correctly indicating which of the specific objects were hallucinated. For CHAIR, a hallucination is correctly localized when at least one detected string mismatch is a hallucination, and for ALOHa when the minimum ALOHa score corresponds to a hallucinated object.
|
| 106 |
+
|
| 107 |
+
ALOHa's performance on HAT is shown in Table 1. On AP, ALOHa with DETR detections outperforms both CHAIR and CLIPScore by $11.8\%$ and $8.5\%$ respectively. RefCLIPScore attains a similar AP; however, is not localizable. ALOHa achieves more than twice the LA on HAT CHAIR, a particularly challenging task as HAT includes non-object hallucinations, such as incorrect verbs or relations (see Figure A6). Table 1 further ablates the choice of image detections and indicates that ALOHa is robust to missing detections.
|
| 108 |
+
|
| 109 |
+
FOIL object hallucinations: To indicate generalizability we evaluate our method on two machine-generated object hallucination datasets. FOIL (Shekhar et al., 2017) contains MS COCO images, where objects are randomly replaced with similar ones (e.g., "bus" and "car"), and nocaps-FOIL, a similar dataset that we construct on the nocaps dataset (Agrawal et al., 2019) for novel object captioning beyond MS COCO (see Appendix C.1). Table 2 breaks down the results of ALOHa on the FOIL and nocaps-FOIL
|
| 110 |
+
|
| 111 |
+
Caption
|
| 112 |
+

|
| 113 |
+
"A woman wearing a face mask and a nun making bread in a factory or bakery in India, possibly during a lockdown."
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
Caption
|
| 117 |
+
|
| 118 |
+
Figure 3: Qualitative Flickr30k examples. (Left) ALOHa correctly assigns low scores to the hallucinated "nun" and "bread", whereas CHAIR does not detect any hallucinations. (Right) Although ALOHa assigns high similarity between the hallucinated "electric guitar" and reference "(acoustic) guitar", it assigns low scores to the other 3 hallucinations. CHAIR detects only the hallucination "chair", missing the others.
|
| 119 |
+

|
| 120 |
+
"An older man with a white beard wearing a red shirt, sitting on either a chair or the floor, playing an electric guitar in a room with mirrors."
|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
|
| 124 |
+
dataset. The results illustrate a subtle result. While ALOHa under-performs CHAIRs in both AP and LA on the original FOIL dataset, this is because FOIL constructs new samples by replacing string-matched COCO objects with a set of hand-selected "foil" objects (near semantic neighbors). This is a best-case scenario for CHAIR, as CHAIR relies on fixed object-set string matching alone, and thus, is easily able to both detect and localize the replaced samples. When we move to nocaps-FOIL with nonMS COCO data, however, ALOHa significantly outperforms CHAIR, as now the object set that was a strength for in-domain FOIL becomes a liability, and CHAIR is unable to detect any hallucinations at all, due to the restricted string matching. Ref-CLIPScore, while competitive in the hallucination detection task, cannot perform localization.
|
| 125 |
+
|
| 126 |
+
Qualitative Examples - Flickr30k: In Figure 3 and Figure A4, we visualize the behavior of CHAIR and ALOHa on several Flickr30k samples (Young et al., 2014), using captions generated by a recent captioning model (Chan et al., 2023) that often produces complex captions with phrases expressing uncertainty.
|
| 127 |
+
|
| 128 |
+
Ablation - Choice of LLM: The language model
|
| 129 |
+
|
| 130 |
+
<table><tr><td rowspan="3">Method</td><td colspan="2">FOIL</td><td colspan="8">nocaps-FOIL</td></tr><tr><td colspan="2">Overall</td><td colspan="2">In-Domain</td><td colspan="2">Near-Domain</td><td colspan="2">Out-Domain</td><td colspan="2">Overall</td></tr><tr><td>LA</td><td>AP</td><td>LA</td><td>AP</td><td>LA</td><td>AP</td><td>LA</td><td>AP</td><td>LA</td><td>AP</td></tr><tr><td>Baseline (Majority Vote)</td><td>-</td><td>50.00</td><td>-</td><td>50.00</td><td>-</td><td>50.00</td><td>-</td><td>50.00</td><td>-</td><td>50.00</td></tr><tr><td>CHAIRs</td><td>79.00</td><td>92.50</td><td>13.47</td><td>57.82</td><td>17.55</td><td>59.14</td><td>12.24</td><td>58.06</td><td>14.42</td><td>58.33</td></tr><tr><td>CLIPScore</td><td>-</td><td>76.44</td><td>-</td><td>71.81</td><td>-</td><td>70.17</td><td>-</td><td>78.73</td><td>-</td><td>73.48</td></tr><tr><td>RefCLIPScore</td><td>-</td><td>80.64</td><td>-</td><td>79.63</td><td>-</td><td>78.70</td><td>-</td><td>85.89</td><td>-</td><td>81.31</td></tr><tr><td>ALOHa</td><td>40.00</td><td>61.35</td><td>47.35</td><td>71.80</td><td>47.30</td><td>66.67</td><td>48.84</td><td>70.91</td><td>45.17</td><td>69.52</td></tr></table>
|
| 131 |
+
|
| 132 |
+
is critical to the overall performance of ALOHa-language models with insufficient zero-shot parsing capability will suffer reduced downstream performance. We investigate the performance of the language model in Table 3 on HAT. In addition to LA and AP, we also measure "Parsing error rate" (PER), which is the rate of errors made when parsing objects from reference captions on HAT, and "Parsing recall rate (PRR), which is the recall rate of objects in the captions (See Appendix B.1).
|
| 133 |
+
|
| 134 |
+
Ablation - Object Extraction and Semantic Embedding Methods: In the this work, we leverage LLMs (OpenAI, 2023) for object extraction, and a BERT-based model (Reimers and Gurevych, 2019) for semantic word embedding. In Figure 4, we explore the difference in overall performance on HAT's validation set when using different combinations of object extraction and semantic embedding. Namely, we compare LLM-based extraction to the parse-tree-based noun extraction in SpaCy (Honnibal et al., 2020b), and compare SentenceTransformer (BERT-Based model, (Reimers and Gurevych, 2019)) to Word2Vec (Mikolov et al., 2018), GPT-3 (Ada) embedding, and CHAIR-style string matching (following CHAIR, strings are case-normalized and lemmatized). Combining LLMs with the SentenceTransformer (BERT-Based) model outperformed other methods, and fuzzy embedding methods outperformed exact string matching. This is generally expected: humans have a wide vocabulary that is poorly captured by exact string matching. Word2Vec outperforms GPT-3 embeddings. We believe that this is because the GPT-3 embeddings are optimized for sentence-level structures, and may fail to semantically embed single words in a meaningful way. Interestingly, S-BERT is not a word similarity measure and was instead designed to measure distances between sentences (and could lead to inaccurate single-word judgments) – While we did find S-BERT most effective among our
|
| 135 |
+
|
| 136 |
+

|
| 137 |
+
Figure 4: Performance on HAT validation set filtered for hallucinated objects, when comparing embedding methods and object extraction approaches.
|
| 138 |
+
|
| 139 |
+
Table 2: Breakdown of results by domain on FOIL and nocaps FOIL. AP: Average Precision. LA: Localization Accuracy. Bold and underlined values represent the best and second-best methods respectively.
|
| 140 |
+
|
| 141 |
+
<table><tr><td>Lanugage Model</td><td>LA ↑</td><td>AP ↑</td><td>PER ↓</td><td>PRR ↑</td></tr><tr><td>GPT-3.5</td><td>20.30</td><td>48.62</td><td>2.97</td><td>98.63</td></tr><tr><td>Claude (Instant)</td><td>20.74</td><td>41.48</td><td>3.31</td><td>-</td></tr><tr><td>Koala</td><td>22.22</td><td>38.70</td><td>5.07</td><td>-</td></tr></table>
|
| 142 |
+
|
| 143 |
+
Table 3: Exploration of LLM choice for parsing within ALOHa, on HAT. AP: Average Precision, LA: Localization Accuracy, PER: Parsing Error Rate (%), PRR: Parsing Recall Rate.
|
| 144 |
+
|
| 145 |
+
approaches, we believe that leveraging a large-scale model trained specifically for semantic similarity between words would be an exciting and powerful extension to the ALOHa framework.
|
| 146 |
+
|
| 147 |
+
# 4 Conclusion
|
| 148 |
+
|
| 149 |
+
This paper introduces ALOHa, a scalable LLM-augmented metric for open-vocabulary object hallucination. ALOHa correctly identifies $13.6\%$ more hallucinated objects on HAT and $31\%$ on nocaps-FOIL than CHAIR. ALOHa represents an important modernization of caption hallucination metrics, and detecting complex hallucinations in actions, quantities, and abstract concepts remains an exciting and challenging task for future exploration.
|
| 150 |
+
|
| 151 |
+
# Limitations / Ethical Considerations
|
| 152 |
+
|
| 153 |
+
While ALOHa represents a strong step towards open-domain localized hallucination detection, it comes with several limitations which we discuss in this section.
|
| 154 |
+
|
| 155 |
+
Non-determinism A primary concern with using large language models for an evaluation measure is the natural nondeterminism that comes with them. While in theory language models sampled at a temperature of zero (as we do in this work) are deterministic, it is well documented that small random fluctuations can still occur (OpenAI, 2023). Beyond random fluctuations, the availability of language models long-term can impact the reproducibility of the measure. In this work, we primarily rely on closed-source language models, which can change or become unavailable without notice. In Table 3, we demonstrate that ALOHa still functions with open source models such as Koala (Geng et al., 2023), however, the performance is significantly degraded due to the parsing capabilities of the model. With time, and more powerful open-source LLMs, this will become less of an issue, however relying on a nondeterministic metric for comparative evaluation can easily become a liability.
|
| 156 |
+
|
| 157 |
+
Availability of Reference Captions (Reference-Free vs. Reference-Based Measures) One of the primary limitations of the ALOHa evaluation method is the requirement that reference captions are available for the evaluation dataset (an issue shared by CHAIR). Not only must reference captions be available, but they also must sufficiently cover the salient details in the reference image. When the references are impoverished (as can easily happen with a single reference sentence (Chan et al., 2023)) or when there are no references, and ALOHa must rely entirely on detections, the method under-performs more general methods such as CLIPScore which are reference-free, and rely on a large pre-training dataset to encode vision and language correspondences. We strongly believe that the area of reference-free localized hallucination detection is an important area of future research; how can we leverage the tools from large vision and language pre-training in a localized way to understand and interpret where hallucinations lie in the hallucinated text? That being said, there is also a place for reference-based measures, as reference-based measures focus on what humans
|
| 158 |
+
|
| 159 |
+
believe to be salient details in the image, whereas reference-free measures always rely on downstream models which approximate what humans believe to be important. This means that reference-based measures can often transfer better to new domains than reference-free measures, which often must be trained/fine-tuned in-domain with human-labeled data to achieve strong performance.
|
| 160 |
+
|
| 161 |
+
General costs associated with LLMs The use of large language models for any task incurs significant compute, monetary, environmental, and human costs. ALOHa is a significantly slower evaluation measure than methods like CHAIR (however not that much less efficient than CLIPScore), leading to increased power consumption, and cost during evaluation. In addition, the models that we rely on are generally closed source and represent a non-trivial monetary expenditure (Experiments in this paper, including ablations, testing, and prototyping required approximately USD $120 in API fees). Such factors can be limiting to researchers who wish to evaluate large datasets, however we hope that with the advent of larger open-source models, and continued investment in hardware and systems research, the cost will decrease significantly. Beyond compute and financial costs, there are environmental and human costs associated with using large language models for evaluation, see Bender et al. (2021) for a detailed discussion of these factors.
|
| 162 |
+
|
| 163 |
+
Limited Control of Bias In this work, we do not evaluate the performance of ALOHa on Non-English data, nor do we explicitly control for or measure bias in the creation of HAT (Which is a labeled subset, randomly selected of the MS COCO dataset), or the Nocaps-FOIL dataset (which operates on the same samples as the Nocaps validation dataset). While HAT is a subset of the common MS COCO dataset, we recognize that the creation of such potentially biased datasets has the potential to lead researchers to engineer features and methods which are unintentionally biased against underrepresented groups. We aim to address these shortcomings in the next iteration of HAT, which will not only contain out-of-domain data for MS COCO-trained models but also aims to better control for bias in the underlying image and caption data. Note that our work, including HAT, is intended for research purposes.
|
| 164 |
+
|
| 165 |
+
# Acknowledgements
|
| 166 |
+
|
| 167 |
+
We thank Dr. Kate Saenko for their helpful comments on the work. Authors, as part of their affiliation with UC Berkeley, were supported in part by the NSF, DoD, and/or the Berkeley Artificial Intelligence Research (BAIR) industrial alliance program, as well as gifts from Anyscale, Astronomer, Google, IBM, Intel, Lacework, Microsoft, Mohamed Bin Zayed University of Artificial Intelligence, Samsung SDS, Uber, and VMware.
|
| 168 |
+
|
| 169 |
+
# References
|
| 170 |
+
|
| 171 |
+
Harsh Agrawal, Peter Anderson, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, and Stefan Lee. 2019. nocaps: novel object captioning at scale. In 2019 IEEE/CVF International Conference on Computer Vision, ICCV 2019, Seoul, Korea (South), October 27 - November 2, 2019, pages 8947-8956. IEEE.
|
| 172 |
+
Peter Anderson, Basura Fernando, Mark Johnson, and Stephen Gould. 2016. Spice: Semantic propositional image caption evaluation. In European conference on computer vision, pages 382-398. Springer.
|
| 173 |
+
Emily M Bender, Timnit Gebru, Angelina McMillan-Major, and Shmargaret Shmitchell. 2021. On the dangers of stochastic parrots: Can language models be too big? In Proceedings of the 2021 ACM conference on fairness, accountability, and transparency, pages 610-623.
|
| 174 |
+
Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual.
|
| 175 |
+
Sebastien Buebeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yuanzhi Li, Scott Lundberg, et al. 2023. Sparks of artificial general intelligence: Early experiments with gpt-4. ArXiv preprint, abs/2303.12712.
|
| 176 |
+
Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. 2020. End-to-end object detection with transformers. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August
|
| 177 |
+
|
| 178 |
+
23-28, 2020, Proceedings, Part I 16, pages 213-229. Springer.
|
| 179 |
+
David M Chan, Austin Myers, Sudheendra Vijayanarasimhan, David A Ross, and John Canny. 2023. Ic3: Image captioning by committee consensus. ArXiv preprint, abs/2302.01328.
|
| 180 |
+
Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. 2023. Vicuna: An open-source chatbot impressing gpt-4 with $90\%$ * chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023).
|
| 181 |
+
Wenliang Dai, Zihan Liu, Ziwei Ji, Dan Su, and Pascale Fung. 2023. Plausible may not be faithful: Probing object hallucination in vision-language pre-training. In Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, pages 2136-2148, Dubrovnik, Croatia. Association for Computational Linguistics.
|
| 182 |
+
Bhuwan Dhingra, Manaal Faruqui, Ankur Parikh, Ming-Wei Chang, Dipanjan Das, and William Cohen. 2019. Handling divergent reference texts when evaluating table-to-text generation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4884–4895, Florence, Italy. Association for Computational Linguistics.
|
| 183 |
+
Esin Durmus, He He, and Mona Diab. 2020. FEQA: A question answering evaluation framework for faithfulness assessment in abstractive summarization. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5055-5070, Online. Association for Computational Linguistics.
|
| 184 |
+
Xinyang Geng, Arnav Gudibande, Hao Liu, Eric Wallace, Pieter Abbeel, Sergey Levine, and Dawn Song. 2023. Koala: A dialogue model for academic research. Blog post.
|
| 185 |
+
Jack Hessel, Ari Holtzman, Maxwell Forbes, Ronan Le Bras, and Yejin Choi. 2021. CLIPScore: A reference-free evaluation metric for image captioning. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7514-7528, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 186 |
+
Matthew Honnibal, Ines Montani, Sofie Van Landeghem, and Adriane Boyd. 2020a. spacy: Industrial-strength natural language processing in python.
|
| 187 |
+
Matthew Honnibal, Ines Montani, Sofie Van Landeghem, and Adriane Boyd. 2020b. spacy: Industrial-strength natural language processing in python, zenodo, 2020.
|
| 188 |
+
Sicong Huang, Asli Celikyilmaz, and Haoran Li. 2022. Ed-faith: Evaluating dialogue summarization on faithfulness. ArXiv preprint, abs/2211.08464.
|
| 189 |
+
Wojciech Kryscinski, Bryan McCann, Caiming Xiong, and Richard Socher. 2020. Evaluating the factual consistency of abstractive text summarization. In
|
| 190 |
+
|
| 191 |
+
Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9332-9346, Online. Association for Computational Linguistics.
|
| 192 |
+
Harold W Kuhn. 1955. The hungarian method for the assignment problem. *Naval research logistics quarterly*, 2(1-2):83-97.
|
| 193 |
+
Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. 2020. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International Journal of Computer Vision, 128(7):1956-1981.
|
| 194 |
+
Junnan Li, Dongxu Li, Caiming Xiong, and Steven C. H. Hoi. 2022. BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, volume 162 of Proceedings of Machine Learning Research, pages 12888-12900. PMLR.
|
| 195 |
+
Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. 2023. Evaluating object hallucination in large vision-language models.
|
| 196 |
+
Joshua Maynez, Shashi Narayan, Bernd Bohnet, and Ryan McDonald. 2020. On faithfulness and factuality in abstractive summarization. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1906-1919, Online. Association for Computational Linguistics.
|
| 197 |
+
Tomas Mikolov, Edouard Grave, Piotr Bojanowski, Christian Puhrsch, and Armand Joulin. 2018. Advances in pre-training distributed word representations. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Resources Association (ELRA).
|
| 198 |
+
Sewon Min, Mike Lewis, Luke Zettlemoyer, and Hannaneh Hajishirzi. 2022. *MetaICL: Learning to learn in context*. In *Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies*, pages 2791–2809, Seattle, United States. Association for Computational Linguistics.
|
| 199 |
+
OpenAI. 2022. Introducing chatgpt.
|
| 200 |
+
OpenAI. 2023. Gpt-4 technical report.
|
| 201 |
+
Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021. Learning transferable visual models from natural language supervision. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 8748-8763. PMLR.
|
| 202 |
+
|
| 203 |
+
Nils Reimers and Iryna Gurevych. 2019. SentenceBERT: Sentence embeddings using Siamese BERT-networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3982-3992, Hong Kong, China. Association for Computational Linguistics.
|
| 204 |
+
Anna Rohrbach, Lisa Anne Hendricks, Kaylee Burns, Trevor Darrell, and Kate Saenko. 2018. Object hallucination in image captioning. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4035-4045, Brussels, Belgium. Association for Computational Linguistics.
|
| 205 |
+
Ravi Shekhar, Sandro Pezzelle, Yauhen Klimovich, Aurélie Herbelot, Moin Nabi, Enver Sangineto, and Raffaella Bernardi. 2017. FOIL it! find one mismatch between image and language caption. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 255-265, Vancouver, Canada. Association for Computational Linguistics.
|
| 206 |
+
Kurt Shuster, Spencer Poff, Moya Chen, Douwe Kiela, and Jason Weston. 2021. Retrieval augmentation reduces hallucination in conversation. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, pages 3784-3803, Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 207 |
+
Seonil Simon Son, Junsoo Park, Jeong-in Hwang, Junghwa Lee, Hyungjong Noh, and Yeonsoo Lee. 2022. Harim+: Evaluating summary quality with hallucination risk: Evaluating summary quality with hallucination risk. In Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing, pages 895-924.
|
| 208 |
+
Arvind Krishna Sridhar and Erik Visser. 2022. Improved beam search for hallucination mitigation in abstractive summarization. ArXiv preprint, abs/2212.02712.
|
| 209 |
+
David Wan and Mohit Bansal. 2022. Evaluating and improving factuality in multimodal abstractive summarization. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 9632-9648, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.
|
| 210 |
+
Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. 2022. OFA: unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, volume 162 of Proceedings of Machine Learning Research, pages 23318-23340. PMLR.
|
| 211 |
+
Jun Xu, Tao Mei, Ting Yao, and Yong Rui. 2016. MSR-VTT: A large video description dataset for bridging
|
| 212 |
+
|
| 213 |
+
video and language. In 2016 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pages 5288-5296. IEEE Computer Society.
|
| 214 |
+
Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. 2014. From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78.
|
| 215 |
+
Weizhe Yuan, Graham Neubig, and Pengfei Liu. 2021. Bartscore: Evaluating generated text as text generation. In Advances in Neural Information Processing Systems 34: Annual Conference on Neural Information Processing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual, pages 27263-27277.
|
| 216 |
+
Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. *Bertscore: Evaluating text generation with BERT*. In *8th International Conference on Learning Representations*, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.
|
| 217 |
+
Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. 2023. Judging llm-as-a-judge with mt-bench and chatbot arena. ArXiv preprint, abs/2306.05685.
|
| 218 |
+
Deyao Zhu, Jun Chen, Kilichbek Haydarov, Xiaoqian Shen, Wenxuan Zhang, and Mohamed Elhoseiny. 2023. Chatgpt asks, blip-2 answers: Automatic questioning towards enriched visual descriptions. ArXiv preprint, abs/2303.06594.
|
| 219 |
+
|
| 220 |
+
# Appendix
|
| 221 |
+
|
| 222 |
+
Appendix A describes the prompt of the language model, including the exact language used, the design choices, and the in-context examples.
|
| 223 |
+
|
| 224 |
+
Appendix B contains additional experimental details for experiments in the paper.
|
| 225 |
+
|
| 226 |
+
Appendix C describes the datasets that we collected and constructed, including HAT and nocaps-FOIL.
|
| 227 |
+
|
| 228 |
+
# A Prompt
|
| 229 |
+
|
| 230 |
+
The choice of prompt for a large language model using in-context learning is critical to the performance of the model. Each component of the prompt has some ability to shape the downstream language distribution. In this work, we use the prompt shown in Figure A1. This prompt has several rules, which we discuss here.
|
| 231 |
+
|
| 232 |
+
You are an assistant that parses visually present objects from an image caption. Given an image caption, you list ALL the objects visually present in the image or photo described by the captions. Strictly abide by the following rules:
|
| 233 |
+
- Include all attributes and adjectives that describe the object, if present
|
| 234 |
+
- Do not repeat objects
|
| 235 |
+
- Do not include objects that are mentioned but have no visual presence in the image, such as light, sound, or emotions
|
| 236 |
+
- If the caption is uncertain about an object, YOU MUST include ’(possibly)’ after the object
|
| 237 |
+
- If the caption thinks an object can be one of several things, include 'or' and all the possible objects
|
| 238 |
+
- Always give the singular form of the object, even if the caption uses the plural form
|
| 239 |
+
|
| 240 |
+
Figure A1: The prompt that we use for parsing objects from both captions and sets of reference captions.
|
| 241 |
+
|
| 242 |
+
Attributes: We ask that the language model include all attributes attached to the object if they are present. By doing so, we can catch hallucinations such as those shown in Figure 3, where "electric guitar" appears in the candidate, but an acoustic guitar is shown in the image. Attributes are handled differently between reference captions and candidate captions. For reference captions, we add both the object with attributes, and the object without attributes to the set, so the candidate is not penalized for being more general. For the candidate, however, we add only the object with attributes, so if the candidate produces attributes, they must match with something in the reference set.
|
| 243 |
+
|
| 244 |
+
Repeated Objects: In this work, our primary goal is to determine if a particular object is hallucinated, and not focus on the quantity of hallucinations. Thus, we de-duplicate the object set in both the candidate and reference captions, as well as detections coming from the image. By doing this, we focus on whether the objects can exist in the image, rather than focus on getting the
|
| 245 |
+
|
| 246 |
+
exact count, which may be incorrect if a candidate caption mentions the same object more than once (and that object is parsed twice).
|
| 247 |
+
|
| 248 |
+
Intangible Object: In many cases, objects mentioned in the candidate or reference set may be intangible, such as color, light, sound, or emotion. To improve the accuracy of the model, we explicitly suggest that such objects should not be included.
|
| 249 |
+
|
| 250 |
+
Or/Possibly: Modern captioning methods such as Chat-Captioner (Zhu et al., 2023) and IC3 (Chan et al., 2023) are capable of encoding uncertainty into their approach through the use of words like "possibly" or "maybe". Additionally, they may make judgments that are uncertain such as "an apple or an orange." Existing captioning and hallucination detection measures fail to account for this uncertainty, and match both objects, even though the semantics of the caption suggests that the object is uncertain, or may be one of many objects. To account for this, we encourage the LLM to indicate uncertainty in a fixed way, as well as list multiple alternatives on a single line. We then account for this in our matching method, by giving the candidate the benefit of the doubt, scoring only the best match from an alternative set, and ignoring any uncertainty.
|
| 251 |
+
|
| 252 |
+
Singularization: While it is possible to singularize objects using rule-based methods, rule-based methods struggle with challenging nouns, and we found that in general, the LLM was better at performing the singularization set of the post-processing before object matching.
|
| 253 |
+
|
| 254 |
+
# A.1 In-Context Examples
|
| 255 |
+
|
| 256 |
+
In addition to the core prompt text, we provide several contextual samples, which help with in-context learning (Brown et al., 2020). The contextual samples help to align the label space of the model correctly with the target output distribution (Min et al., 2022). An example of such contexts is given in Figure A2 and Figure A3.
|
| 257 |
+
|
| 258 |
+
# B Experimental
|
| 259 |
+
|
| 260 |
+
# Details & Additional Experimentation
|
| 261 |
+
|
| 262 |
+
# B.1 Metrics
|
| 263 |
+
|
| 264 |
+
We employ several measures in the paper, which we describe in detail here.
|
| 265 |
+
|
| 266 |
+
Caption: This image shows two pink roses in a tulip-shaped vase on a wooden kitchen counter, next to a microwave and a toaster oven. Objects:
|
| 267 |
+
|
| 268 |
+
- pink rose
|
| 269 |
+
- tulip-shaped vase
|
| 270 |
+
- wooden kitchen counter
|
| 271 |
+
- microwave
|
| 272 |
+
- toaster oven
|
| 273 |
+
|
| 274 |
+
Figure A2: An example of a single-caption parsing result.
|
| 275 |
+
|
| 276 |
+
# Captions:
|
| 277 |
+
|
| 278 |
+
- Several people riding on a motorcycle with an umbrella open.
|
| 279 |
+
- Couples riding motorcycles carrying umbrellas and people sitting at tables.
|
| 280 |
+
- A group of people riding scooters while holding umbrellas.
|
| 281 |
+
- Some tables and umbrellas sitting next to a building.
|
| 282 |
+
- Pedestrians and motorcyclists near an open outdoor market.
|
| 283 |
+
Objects:
|
| 284 |
+
- person
|
| 285 |
+
- couple
|
| 286 |
+
- motorcycle
|
| 287 |
+
- umbrella
|
| 288 |
+
table
|
| 289 |
+
- scooter
|
| 290 |
+
building
|
| 291 |
+
- pedestrian
|
| 292 |
+
motorcyclist
|
| 293 |
+
- open outdoor market
|
| 294 |
+
|
| 295 |
+
Figure A3: An example of a multi-caption parsing result.
|
| 296 |
+
|
| 297 |
+
Average Precision We measure the Average Precision (AP) of each hallucination metric to detect sentence-level hallucinations. Specifically, we label each sample with 1 if it contains a hallucination and 0 otherwise. We then measure AP between those labels and per-sample hallucination measures. For ALOHa, this is:
|
| 298 |
+
|
| 299 |
+
$$
|
| 300 |
+
\mathrm {A P} = \frac {1}{N} \sum_ {i = 1} ^ {N} \mathbb {I} [ \text {l a b e l} ] \cdot (1 - \mathrm {A L O H a}) (i) \tag {3}
|
| 301 |
+
$$
|
| 302 |
+
|
| 303 |
+
For CHAIR, this is:
|
| 304 |
+
|
| 305 |
+
$$
|
| 306 |
+
\mathrm {A P} = \frac {1}{N} \sum_ {i = 1} ^ {N} \mathbb {I} [ \text {l a b e l} ] \cdot \mathbb {I} [ \text {C H A I R P r e d i c t i o n} ] \tag {4}
|
| 307 |
+
$$
|
| 308 |
+
|
| 309 |
+
It is worth noting that when computing average precision, we define the positive label (1) to be "hallucination" to measure the ability of ALOHa or CHAIR to correctly identify hallucinations. Indeed, a lower ALOHa indicates that a caption is more likely to have a hallucination – therefore, we negate the ALOHa score when computing AP. We follow the standard method of computing AP with binary labels and continuous confidence values, where precision and recall are iteratively computed with each confidence value (-ALOHa) as the threshold. The AP is an average of those precisions, each weighted by the increase in recall from the previous threshold.
|
| 310 |
+
|
| 311 |
+
Localization Accuracy Localization accuracy (LA) measures the fraction of samples where a metric can correctly identify a hallucinated object, among samples that are known to contain hallucinated objects.
|
| 312 |
+
|
| 313 |
+
$$
|
| 314 |
+
\mathrm {L A} = \frac {\left| \{\geq 1 \text {c o r r e c t l y i d e n t i f i e d h a l l u c .} \} \right|}{\left| \{\geq 1 \text {h a l l u c .} \} \right|} \tag {5}
|
| 315 |
+
$$
|
| 316 |
+
|
| 317 |
+
A sample receives LA of 1 if at least one of the predicted hallucinated objects was correct (for CHAIR), or if the object with the minimum matching score was a true hallucination (for ALOHa). We do not measure LA for CLIPScores, as they cannot provide hallucination scores per object.
|
| 318 |
+
|
| 319 |
+
# B.2 Semantic Similarity Measure
|
| 320 |
+
|
| 321 |
+
In ALOHa, we compute the similarity between objects using the cosine distance between embedding vectors generated using the a11-MiniLM-L6-v2 S-BERT implementation in the SentenceTransformers library (Reimers and Gurevych, 2019). While in theory cosine distances should lie in the interval $[-1,1]$ , in this library, for optimization stability, models are trained with positive samples having similarity 1, and negative samples having similarity 0. This (unintentionally) induces a model which (by optimization) only produces positive cosine similarity scores. ALOHa can still be adapted to negative similarity: our algorithms for
|
| 322 |
+
|
| 323 |
+
maximal assignment and equations 1 and 2 both support negative values (even though they don't appear in this instantiation of the algorithm).
|
| 324 |
+
|
| 325 |
+
Parsing Error Rate (PER) and Parsing Recall Rate (PRR) We calculate PER (Parsing Error Rate) with manual annotation by taking the fraction of objects output by the LLM that did not exist in the caption (in other words, measuring 1-precision of parsed objects). We additionally annotate and compute the Parsing Recall Rate (PRR) - the fraction of objects in the caption that are included in the objects parsed by the LLM. This gives a recall for GPT-3.5 of $98.63\%$ . In these experiments, we find that while Koala (Geng et al., 2023) has strong LA performance on HAT, however ChatGPT (GPT-3.5) (OpenAI, 2023) has both the best average precision, and makes the fewest errors, thus we leverage GPT-3.5 for our primary experiments in the main paper.
|
| 326 |
+
|
| 327 |
+
# C Datasets
|
| 328 |
+
|
| 329 |
+
In this section, we discuss further the data that we use and go into detail on the dataset collection process for HAT (Appendix C.2) and the nocaps-FOIL dataset (Appendix C.1)
|
| 330 |
+
|
| 331 |
+
# C.1 nocaps-FOIL
|
| 332 |
+
|
| 333 |
+
The FOIL dataset (Shekhar et al., 2017) is a synthetic hallucination dataset based on samples from the MS-COCO (Xu et al., 2016) dataset. In this dataset, for each candidate-image pair, a "foil" caption is created which swaps one of the objects (in the MS-COCO detection set) in the caption with a different, and closely related neighbor (chosen by hand to closely match, but be visually distinct). While the FOIL dataset provides a useful benchmark for many hallucination detection methods, it is overly biased towards methods optimized for the MS-COCO dataset. To help evaluate more general methods, we introduce a new dataset "nocaps-FOIL" based on the nocaps (Agrawal et al., 2019) dataset. The nocaps dataset consists of images from the OpenImages (Kuznetsova et al., 2020) dataset annotated with image captions in a similar style to MS-COCO. nocaps is split into three sets: an in-domain set, where objects in the images are in the MS-COCO object set, near-domain, where the objects in the image are related to those of MS-COCO, and out-of-domain, where objects in the image are not contained in MS-COCO.
|
| 334 |
+
|
| 335 |
+
To build the nocaps-FOIL dataset, for each image, we generate the baseline caption by removing a single caption from the reference set. We then generate the foil caption as follows. First, we find any words in the baseline caption that are contained in either the openimages class list (there are 600) or a near neighbor in Wordnet. We then randomly select one of these classes to replace. Because there are 600 classes, we do not hand-pick the foil classes, and rather, select a near neighbor class based on sentence embeddings from (Reimers and Gurevych, 2019). We find that in practice, the nearest neighbor is often a synonym, thus, to avoid selecting synonyms, we take the 10th furthest sample, which is often a near neighbor, but is visually distinct. We replace this word in the caption, matching case, and then perform a filter for grammatical correctness using the Ginger $^2$ API. Any captions which are not grammatically correct are filtered. This leaves us with 2500 image/caption/foil pairs, which we use for evaluation in Table 2.
|
| 336 |
+
|
| 337 |
+
The OpenImages dataset annotations are under a CC BY 4.0 license, and the images are under a CC BY 2.0 license.
|
| 338 |
+
|
| 339 |
+
# C.2 HAT
|
| 340 |
+
|
| 341 |
+
HAT is based on MS-COCO and aims to be a gold-standard benchmark for the evaluation of hallucination in image captioning methods. While it is relatively small, it is densely annotated by in-domain experts for several types of hallucination including object hallucination, action hallucination, and numeric hallucination among others. HAT consists of 90 validation samples, and 400 test samples, each containing a machine candidate caption generated by one of BLIP (Li et al., 2022), OFA (Wang et al., 2022), IC3 (Chan et al., 2023) or Chat-Captioner (Zhu et al., 2023), and annotations which mark which word in the captions are hallucinated (See Figure A7 for exact instructions given to annotators). An image/caption pair is considered a hallucination if at least one of the words in the caption is hallucinated.
|
| 342 |
+
|
| 343 |
+
Screenshot of the interface for data collection are given in Figure A7. While initial versions of the dataset were collected using AMT workers, we found that the quality of annotations was not sufficiently high, and thus, trained experts explicitly in hallucination detection, and leveraged expert ratings for the samples in the test dataset.
|
| 344 |
+
|
| 345 |
+
MS-COCO is under a Creative Commons Attribution 4.0 License.
|
| 346 |
+
|
| 347 |
+
# D Qualitative Examples
|
| 348 |
+
|
| 349 |
+
We provide additional qualitative examples from the following scenarios:
|
| 350 |
+
|
| 351 |
+
# D.1 Flickr30k Examples
|
| 352 |
+
|
| 353 |
+
Figure A4 shows several examples on the Flickr-30k dataset Young et al. (2014) with captions generated by IC3 (Chan et al., 2023), a modern image captioning model that often generates longer, more complex captions including uncertain language such as "possibly." We highlight objects with $\mathrm{ALOH_{o}} \leq 0.5$ as likely hallucinations. For samples going from left to right:
|
| 354 |
+
|
| 355 |
+
1. The caption hallucinates the word “mother”, as there is no visual evidence that the woman is specifically a mother. CHAIR does not capture this, as “mother” is mapped to a synonym for “person”, which it counts as a grounded (non-hallucinated) object. ALOHa matches “mother” to the reference “person”, assigning a borderline $\mathrm{ALOH_{a_0}}$ of 0.5.
|
| 356 |
+
2. The image does not contain a hallucination. CHAIR flags "table" as hallucinated, yet the caption expressed uncertainty with a conjunction: "chair or table." ALOHa successfully parses this conjunction and selects "cloth" with $\mathrm{ALOH_{a_0}} = 1.0$ to the exact reference match.
|
| 357 |
+
3. CHAIR does not detect the hallucinated "bridge", which is successfully assigned a low $\mathrm{ALOHa_0} = 0.35$ .
|
| 358 |
+
4. The caption hallucinates the word "father". In most cases, the specific relationship of "father" is unlikely to be grounded (similar to "mother" in sample 1); yet, in this image, it is even more clear as there are only children present. CHAIR maps "father" as another synonym for "person" and does not consider it a hallucination, whereas "father" has a low $\mathrm{ALOH_{a_{0}}} = 0.34$ .
|
| 359 |
+
|
| 360 |
+
# D.2 HAT Examples
|
| 361 |
+
|
| 362 |
+
We present 4 random samples from HAT each for cases without hallucinations (Figure A5) and
|
| 363 |
+
|
| 364 |
+
with hallucinations (Figure A6). Because these examples contain more nuance than we discuss below, we do not indicate binary hallucination decisions as in Appendix D.1.
|
| 365 |
+
|
| 366 |
+
Starting with Figure A5), samples with captions that were labeled as correct, from left to right:
|
| 367 |
+
|
| 368 |
+
1. Both CHAIR and ALOHa successfully do not find any hallucinations.
|
| 369 |
+
2. CHAIR does not flag any hallucinations. ALOHa assigns a low $\mathrm{ALOH_{a} = 0.36}$ for "sun", an incorrect parse from the phrase "sunny day". However, the other objects are successfully matched. Interestingly, ALOHa adds "snowboard" as an object, inferring that the physical item would need to be present given the verb "snowboarding".
|
| 370 |
+
3. CHAIR again does not flag any hallucinations. $\mathrm{ALOHa}_0$ for "tall building" is the mid-range 0.59, matched with the reference "building", indicating a somewhat uncertain attribute. This may be reasonable given the point of view in the image.
|
| 371 |
+
4. CHAIR finds no hallucinations. "Cloudy sky" receives a somewhat low $\mathrm{ALOHa_0} = 0.45$ . Although this phrase is accurate given the image, this is a failure case in which the references are incomplete.
|
| 372 |
+
|
| 373 |
+
Next, we discuss Figure A6, showing samples that were labeled to contain a hallucination. Recall that labels capture all types of caption errors, including those other than object hallucinations, to serve as a valuable source for research around general caption correctness. As a result, there exist nonobject hallucinations in HAT that are impossible for CHAIR or ALOHa to localize. From left to right:
|
| 374 |
+
|
| 375 |
+
1. The attribute "tall" is labeled as a hallucination, as the building next to the bus is only one story. Similar to sample 3 in Figure A5, $\mathrm{ALOHa_0}$ for "tall building" is somewhat uncertain at 0.59. Other objects are correctly grounded.
|
| 376 |
+
2. The object "table" is a hallucinated, misclassified object; e.g., one reference opts for the more general "wooden surface." However, the reference mentions a "table" that it is placed on, leading CHAIR to avoid considering it as a hallucination. For ALOHa, this example
|
| 377 |
+
|
| 378 |
+
shows one of the $2.97\%$ of cases (Table 3) where ALOHa hallucinates a reference object, "dining table". The candidate "round wooden table" is matched to it, with an erroneously high $\mathrm{ALOH_{a_0}}$ of 0.74.
|
| 379 |
+
|
| 380 |
+
3. This sample contains a complex error, in which the arrow is not, in fact, “pointing in different directions.” This non-object hallucination is impossible for the object-specific CHAIR and ALOHa to localize correctly. However, it demonstrates ALOHa’s capability to extract more complex attributes such as “red street sign” and “orange detour sign.”
|
| 381 |
+
4. The cat's location "on top of a small chair" is labeled as an error. CHAIR does not flag any hallucinations. $\mathrm{ALOHa_0}$ for "small chair" is 0.59, yet both metrics cannot capture the specific relation.
|
| 382 |
+
|
| 383 |
+

|
| 384 |
+
Caption
|
| 385 |
+
|
| 386 |
+
"A mother holding a child in a swimming pool, with the two of them possibly playing or swimming together."
|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
Caption
|
| 390 |
+
|
| 391 |
+
"Two bunches of bananas and an orange, with possibly other fruit, sitting on a cloth or table."
|
| 392 |
+
|
| 393 |
+

|
| 394 |
+
Caption
|
| 395 |
+
|
| 396 |
+
"A couple walking across a bridge, with a gate and trees in the background."
|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
Caption
|
| 400 |
+
|
| 401 |
+
"A father and two or three children playing on a trampoline in the snow."
|
| 402 |
+
|
| 403 |
+
# CHAIR
|
| 404 |
+
|
| 405 |
+
Hallucinated: Non
|
| 406 |
+
|
| 407 |
+

|
| 408 |
+
|
| 409 |
+
# CHAIR
|
| 410 |
+
|
| 411 |
+
Hallucinated: "table" X
|
| 412 |
+
|
| 413 |
+

|
| 414 |
+
|
| 415 |
+
# CHAIR
|
| 416 |
+
|
| 417 |
+
Hallucinated: None
|
| 418 |
+
|
| 419 |
+

|
| 420 |
+
|
| 421 |
+
# CHAIR
|
| 422 |
+
|
| 423 |
+
Hallucinated: None
|
| 424 |
+
|
| 425 |
+

|
| 426 |
+
|
| 427 |
+
# ALoHa (ours)
|
| 428 |
+
|
| 429 |
+
Candidate
|
| 430 |
+
|
| 431 |
+

|
| 432 |
+
Figure A4: Qualitative samples of ALOHa evaluated on the Flickr-30k dataset, with candidate captions generated by IC3 (Chan et al., 2023). Hallucinated objects in the caption text are red and bolded. See Appendix D.1 for discussion.
|
| 433 |
+
|
| 434 |
+
swimming
|
| 435 |
+
|
| 436 |
+

|
| 437 |
+
|
| 438 |
+
pool
|
| 439 |
+
|
| 440 |
+

|
| 441 |
+
|
| 442 |
+
#
|
| 443 |
+
|
| 444 |
+

|
| 445 |
+
|
| 446 |
+

|
| 447 |
+
|
| 448 |
+

|
| 449 |
+
|
| 450 |
+

|
| 451 |
+
|
| 452 |
+
#
|
| 453 |
+
|
| 454 |
+

|
| 455 |
+
|
| 456 |
+
4
|
| 457 |
+
|
| 458 |
+

|
| 459 |
+
|
| 460 |
+
.
|
| 461 |
+
|
| 462 |
+

|
| 463 |
+
|
| 464 |
+
$\therefore m = \frac{3}{11}$ ;
|
| 465 |
+
|
| 466 |
+

|
| 467 |
+
|
| 468 |
+

|
| 469 |
+
|
| 470 |
+

|
| 471 |
+
|
| 472 |
+

|
| 473 |
+
|
| 474 |
+

|
| 475 |
+
|
| 476 |
+

|
| 477 |
+
|
| 478 |
+

|
| 479 |
+
|
| 480 |
+

|
| 481 |
+
|
| 482 |
+

|
| 483 |
+
|
| 484 |
+

|
| 485 |
+
|
| 486 |
+

|
| 487 |
+
|
| 488 |
+

|
| 489 |
+
|
| 490 |
+

|
| 491 |
+
|
| 492 |
+

|
| 493 |
+
|
| 494 |
+

|
| 495 |
+
|
| 496 |
+

|
| 497 |
+
|
| 498 |
+

|
| 499 |
+
|
| 500 |
+

|
| 501 |
+
|
| 502 |
+

|
| 503 |
+
|
| 504 |
+

|
| 505 |
+
|
| 506 |
+

|
| 507 |
+
|
| 508 |
+

|
| 509 |
+
|
| 510 |
+

|
| 511 |
+
|
| 512 |
+

|
| 513 |
+
|
| 514 |
+

|
| 515 |
+
|
| 516 |
+

|
| 517 |
+
|
| 518 |
+

|
| 519 |
+
|
| 520 |
+

|
| 521 |
+
|
| 522 |
+

|
| 523 |
+
|
| 524 |
+

|
| 525 |
+
|
| 526 |
+

|
| 527 |
+
|
| 528 |
+

|
| 529 |
+
Caption
|
| 530 |
+
|
| 531 |
+
"A bunch of people that are flying kites."
|
| 532 |
+
|
| 533 |
+
# References
|
| 534 |
+
|
| 535 |
+
- A crowd of people fly kites on a hill.
|
| 536 |
+
|
| 537 |
+
- A crowd of people flying kites in grey cloud filled sky.
|
| 538 |
+
A group of people flying kites on a hill on a cloudy day.
|
| 539 |
+
People at the park flying kites and walking
|
| 540 |
+
People gathered on a hill, some flying.
|
| 541 |
+
kites.
|
| 542 |
+
|
| 543 |
+
# CHAIR
|
| 544 |
+
|
| 545 |
+
Hallucinated: None
|
| 546 |
+
|
| 547 |
+

|
| 548 |
+
Caption
|
| 549 |
+
|
| 550 |
+
"A person snowboarding down the slope on a sunny day."
|
| 551 |
+
|
| 552 |
+
# References
|
| 553 |
+
|
| 554 |
+
- a man is skiing in a snowy hill
|
| 555 |
+
- a person snowboarding down a slightly steep hill
|
| 556 |
+
- A snowboarder snowboarding down snow on a slope.
|
| 557 |
+
A man snowboarding down a snowy ski slope.
|
| 558 |
+
- An expert snowboarder navigates his way down a steep run.
|
| 559 |
+
|
| 560 |
+
# CHAIR
|
| 561 |
+
|
| 562 |
+
Hallucinated: None
|
| 563 |
+
|
| 564 |
+

|
| 565 |
+
Caption
|
| 566 |
+
|
| 567 |
+
"A very tall building with a clock on the
|
| 568 |
+
|
| 569 |
+
# References
|
| 570 |
+
|
| 571 |
+
This is a clock on top of a tower. Clocks are brightly lit on a huge
|
| 572 |
+
tower.
|
| 573 |
+
- A tower with a clock is displayed in the evening.
|
| 574 |
+
- The top of a steeped church building with clocks and small windows.
|
| 575 |
+
A picture of a church tower during the day.
|
| 576 |
+
|
| 577 |
+
# CHAIR
|
| 578 |
+
|
| 579 |
+
Hallucinated: None
|
| 580 |
+
|
| 581 |
+

|
| 582 |
+
Caption
|
| 583 |
+
|
| 584 |
+
"Two boats sitting on the shore under a cloudy sky."
|
| 585 |
+
|
| 586 |
+
# References
|
| 587 |
+
|
| 588 |
+
- two boats sitting on the shore close to the water
|
| 589 |
+
- Two white boats sit on a rocky shoreline.
|
| 590 |
+
- Two boats on a beach near the river bank
|
| 591 |
+
- A boat tied up on the bank of a lake.
|
| 592 |
+
- Two small boats are seen against a beautiful sunset.
|
| 593 |
+
|
| 594 |
+
# CHAIR
|
| 595 |
+
|
| 596 |
+
Hallucinated: None
|
| 597 |
+
|
| 598 |
+
# ALOHa
|
| 599 |
+
|
| 600 |
+
Candidate
|
| 601 |
+
|
| 602 |
+

|
| 603 |
+
|
| 604 |
+
ALOHA
|
| 605 |
+
|
| 606 |
+

|
| 607 |
+
|
| 608 |
+
$\mathrm{DHA}_{6}$
|
| 609 |
+
|
| 610 |
+
Reference
|
| 611 |
+
|
| 612 |
+

|
| 613 |
+
|
| 614 |
+

|
| 615 |
+
|
| 616 |
+

|
| 617 |
+
|
| 618 |
+

|
| 619 |
+
|
| 620 |
+

|
| 621 |
+
|
| 622 |
+

|
| 623 |
+
|
| 624 |
+

|
| 625 |
+
|
| 626 |
+

|
| 627 |
+
|
| 628 |
+

|
| 629 |
+
|
| 630 |
+

|
| 631 |
+
|
| 632 |
+

|
| 633 |
+
|
| 634 |
+

|
| 635 |
+
|
| 636 |
+
1.0
|
| 637 |
+
|
| 638 |
+

|
| 639 |
+
|
| 640 |
+
person
|
| 641 |
+
|
| 642 |
+

|
| 643 |
+
|
| 644 |
+

|
| 645 |
+
|
| 646 |
+

|
| 647 |
+
|
| 648 |
+

|
| 649 |
+
|
| 650 |
+

|
| 651 |
+
|
| 652 |
+

|
| 653 |
+
|
| 654 |
+

|
| 655 |
+
|
| 656 |
+

|
| 657 |
+
|
| 658 |
+

|
| 659 |
+
|
| 660 |
+

|
| 661 |
+
|
| 662 |
+

|
| 663 |
+
|
| 664 |
+

|
| 665 |
+
Figure A5: Randomly selected qualitative examples of ALOHa evaluated on the HAT dataset when there is no hallucination in the ground truth. See Appendix D.2 for discussion.
|
| 666 |
+
|
| 667 |
+

|
| 668 |
+
|
| 669 |
+

|
| 670 |
+
|
| 671 |
+

|
| 672 |
+
|
| 673 |
+

|
| 674 |
+
Figure A6: Randomly selected qualitative examples of ALOHa evaluated on the HAT dataset when there is a hallucination in the ground truth. These hallucinations are generally challenging to detect. See Appendix D.2 for discussion.
|
| 675 |
+
|
| 676 |
+
# Caption
|
| 677 |
+
|
| 678 |
+
"A bus driving down a street next to a tall building."
|
| 679 |
+
|
| 680 |
+
# References
|
| 681 |
+
|
| 682 |
+
A large long bus on a city street.
|
| 683 |
+
- A city bus on the street in front of buildings.
|
| 684 |
+
- A blue bus traveling down an incline of a busy street.
|
| 685 |
+
A city bus with full side
|
| 686 |
+
advertisement in front of a building
|
| 687 |
+
- a public transit bus on a city street
|
| 688 |
+
|
| 689 |
+
# CHAIR
|
| 690 |
+
|
| 691 |
+
Hallucinated: None
|
| 692 |
+
|
| 693 |
+
# ALoHa (ours)
|
| 694 |
+
|
| 695 |
+
Candidate
|
| 696 |
+
|
| 697 |
+
bus
|
| 698 |
+
|
| 699 |
+
street
|
| 700 |
+
|
| 701 |
+
tall building
|
| 702 |
+
|
| 703 |
+
ALOHA
|
| 704 |
+
|
| 705 |
+
1.0
|
| 706 |
+
|
| 707 |
+
1.0
|
| 708 |
+
|
| 709 |
+
0.59
|
| 710 |
+
|
| 711 |
+
Reference
|
| 712 |
+
|
| 713 |
+
bus
|
| 714 |
+
|
| 715 |
+
street
|
| 716 |
+
|
| 717 |
+
building
|
| 718 |
+
|
| 719 |
+

|
| 720 |
+
|
| 721 |
+
# Caption
|
| 722 |
+
|
| 723 |
+
"A round wooden table with a small pizza."
|
| 724 |
+
|
| 725 |
+
# References
|
| 726 |
+
|
| 727 |
+
A platter with a baked good on it
|
| 728 |
+
- A plain piece of bread resting on a wooden plate.
|
| 729 |
+
A whole cheese pizza sitting on a wood pan on a table.
|
| 730 |
+
- a close up of a pizza with a wooden surface on a table
|
| 731 |
+
- A white cracker looking pizza is on a cutting board.
|
| 732 |
+
|
| 733 |
+
# CHAIR
|
| 734 |
+
|
| 735 |
+
Hallucinated: None
|
| 736 |
+
|
| 737 |
+
# ALOHa (ours)
|
| 738 |
+
|
| 739 |
+
Candidate
|
| 740 |
+
|
| 741 |
+
round
|
| 742 |
+
|
| 743 |
+
wooden table
|
| 744 |
+
|
| 745 |
+
small pizza
|
| 746 |
+
|
| 747 |
+
ALOHA
|
| 748 |
+
|
| 749 |
+
0.74
|
| 750 |
+
|
| 751 |
+
Reference
|
| 752 |
+
|
| 753 |
+
dining table
|
| 754 |
+
|
| 755 |
+
0.69
|
| 756 |
+
|
| 757 |
+
pizza
|
| 758 |
+
|
| 759 |
+

|
| 760 |
+
|
| 761 |
+
# Caption
|
| 762 |
+
|
| 763 |
+
"A street sign with a detour pointing in different directions."
|
| 764 |
+
|
| 765 |
+
# References
|
| 766 |
+
|
| 767 |
+
- An orange detour sign hanging from a metal pole under a cloudy sky.
|
| 768 |
+
- Red street sign with black letters sitting on metal post.
|
| 769 |
+
- A street pole with an orange detour sign.
|
| 770 |
+
- a close up of a street sign with a sky background
|
| 771 |
+
- A red detour sign that is on a pole.
|
| 772 |
+
|
| 773 |
+
# CHAIR
|
| 774 |
+
|
| 775 |
+
Hallucinated: None
|
| 776 |
+
|
| 777 |
+
# ALOHa (ours)
|
| 778 |
+
|
| 779 |
+
Candidate
|
| 780 |
+
|
| 781 |
+
street sign
|
| 782 |
+
|
| 783 |
+
detour
|
| 784 |
+
|
| 785 |
+
$\mathsf{OHA}_{0}$ Refer
|
| 786 |
+
|
| 787 |
+
83 red street
|
| 788 |
+
|
| 789 |
+
59
|
| 790 |
+
|
| 791 |
+
orange detour sign
|
| 792 |
+
|
| 793 |
+

|
| 794 |
+
|
| 795 |
+
# Caption
|
| 796 |
+
|
| 797 |
+
"A cat stands on top of a small chair."
|
| 798 |
+
|
| 799 |
+
# References
|
| 800 |
+
|
| 801 |
+
- A cat perched on top of a dresser.
|
| 802 |
+
A cat walks along the top of a bedroom dresser.
|
| 803 |
+
- a cat sits on a dresser next to a rocking chair
|
| 804 |
+
- Black cat standing on a blue dresser next to a chair.
|
| 805 |
+
- A cat laying on top of a blue dresser near a chair.
|
| 806 |
+
|
| 807 |
+
# CHAIR
|
| 808 |
+
|
| 809 |
+
Hallucinated: None
|
| 810 |
+
|
| 811 |
+
# ALOHa (ours)
|
| 812 |
+
|
| 813 |
+
Candidate
|
| 814 |
+
|
| 815 |
+
cat
|
| 816 |
+
|
| 817 |
+
ALOHA
|
| 818 |
+
|
| 819 |
+
1.0
|
| 820 |
+
|
| 821 |
+
0.59
|
| 822 |
+
|
| 823 |
+
Reference
|
| 824 |
+
|
| 825 |
+
cat
|
| 826 |
+
|
| 827 |
+
chair
|
| 828 |
+
|
| 829 |
+
# Description Rating Tool
|
| 830 |
+
|
| 831 |
+
Instructions: Review the image and text caption of that image, then click on any content words (nouns, adjectives, verbs, and numbers) in the caption which are not necessarily supported by the image content. Do not click on words like "The", "A", or "An".
|
| 832 |
+
|
| 833 |
+
For example, if the caption says "The cat is sleeping on the rug," yet there is nothing on the rug, click on the words "cat" and "sleeping". If the caption says "The vase contains three red roses," but there are only two roses in the image, click on the word "three".
|
| 834 |
+
|
| 835 |
+
If the caption uses an incorrect verb to describe an action in the image, click on that word. For example, if the caption reads "The woman is swimming in the ocean," but the image shows the woman walking on the beach, click on the word "swimming."
|
| 836 |
+
|
| 837 |
+
If a word is a compound word, such as "sofa chair," select either both words or neither word.
|
| 838 |
+
|
| 839 |
+
If it is impossible to tell whether a word is supported by the image or not, select that word anyways. For example, if the caption says "The child is smiling" and the image only shows the back of the child, it may be difficult to tell the child's facial expression. In this case, select the word "smiling" even if it's unclear whether or not it is accurate.
|
| 840 |
+
|
| 841 |
+
If no words are incorrect, select "Caption is correct". If either the caption or the image is not visible, press the "Not Visible" button.
|
| 842 |
+
|
| 843 |
+
HIT Tasks Completed: 100
|
| 844 |
+
|
| 845 |
+

|
| 846 |
+
Figure A7: The hallucination dataset collection interface.
|
| 847 |
+
|
| 848 |
+
Caption: A man holding a tennis racquet on a tennis court.
|
| 849 |
+
|
| 850 |
+
Select any incorrect words:
|
| 851 |
+
|
| 852 |
+
man
|
| 853 |
+
|
| 854 |
+
holding
|
| 855 |
+
|
| 856 |
+
a
|
| 857 |
+
|
| 858 |
+
tennis
|
| 859 |
+
|
| 860 |
+
racquet
|
| 861 |
+
|
| 862 |
+
on
|
| 863 |
+
|
| 864 |
+
a
|
| 865 |
+
|
| 866 |
+
tennis
|
| 867 |
+
|
| 868 |
+
court.
|
| 869 |
+
|
| 870 |
+
Caption is correct
|
| 871 |
+
|
| 872 |
+
Image/Captions Not Visible
|
| 873 |
+
|
| 874 |
+
Submit
|
alohaanewmeasureforhallucinationincaptioningmodels/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:813e364b95c481239b55d821c866b30e3bae9ac123774490f60356ec8de4bec7
|
| 3 |
+
size 526558
|
alohaanewmeasureforhallucinationincaptioningmodels/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:abcf0a9f5b29128b4b23d5b5fced7451677b6b348f8208292f2c36fe4cd45c0f
|
| 3 |
+
size 823592
|
amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/0a65d3d9-987a-47b3-b9d9-a1053e256e3c_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8d626242a2360bf2869457b7c051987e51ba02810da2880fb37ae9a904106811
|
| 3 |
+
size 143502
|
amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/0a65d3d9-987a-47b3-b9d9-a1053e256e3c_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:108bec871c45a6837374b0f05af8fc9c165bb3eb1a6eaafb8802bc2388daa96d
|
| 3 |
+
size 184482
|
amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/0a65d3d9-987a-47b3-b9d9-a1053e256e3c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ce58614312c4a092d043b684d82fc9bb3a283f4b748d4f4498987e6819cbbd7f
|
| 3 |
+
size 5079327
|
amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/full.md
ADDED
|
@@ -0,0 +1,626 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Multi-Aspect Framework for Counter Narrative Evaluation using Large Language Models
|
| 2 |
+
|
| 3 |
+
Content Warning: This paper contains potentially offensive and harmful text.
|
| 4 |
+
|
| 5 |
+
Jaylen Jones, Lingbo Mo, Eric Fosler-Lussier, and Huan Sun
|
| 6 |
+
|
| 7 |
+
The Ohio State University
|
| 8 |
+
|
| 9 |
+
{jones.6278,mo.169,sun.397} $@$ osu.edu;fosler@cse.ohio-state.edu
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Counter narratives — informed responses to hate speech contexts designed to refute hateful claims and de-escalate encounters — have emerged as an effective hate speech intervention strategy. While previous work has proposed automatic counter narrative generation methods to aid manual interventions, the evaluation of these approaches remains underdeveloped. Previous automatic metrics for counter narrative evaluation lack alignment with human judgment as they rely on superficial reference comparisons instead of incorporating key aspects of counter narrative quality as evaluation criteria. To address prior evaluation limitations, we propose a novel evaluation framework prompting LLMs to provide scores and feedback for generated counter narrative candidates using 5 defined aspects derived from guidelines from counter narrative specialized NGOs. We found that LLM evaluators achieve strong alignment to human-annotated scores and feedback and outperform alternative metrics, indicating their potential as multi-aspect, reference-free and interpretable evaluators for counter narrative evaluation. $^{1}$
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
As online platforms allow for rapid and widespread dissemination of hate speech, automatic intervention strategies have become a growing necessity. Counter narratives — informed responses to hate speech designed to refute hateful claims and de-escalate encounters — have gained attention for challenging such content while minimizing free speech infringement concerns in content removal strategies. Despite the establishment of numerous NGOs² for hate speech intervention using counter narratives,
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Figure 1: Example of our multi-aspect counter narrative evaluation framework.
|
| 21 |
+
|
| 22 |
+
effective manual intervention is impractical due to a constant influx of online toxicity.
|
| 23 |
+
|
| 24 |
+
To augment manual intervention, numerous counter narrative generation approaches have emerged, but evaluation remains difficult. Metrics like BLEU (Papineni et al., 2002) and ROUGE-L (Li et al., 2016) can misalign with human judgment, as references only implicitly define the key aspects of good counter narratives. On the other hand, human evaluation using trained workers is costly and time-consuming. Previous work has used LLMs for aspect-based evaluation to address similar limitations in tasks like summarization, but has overlooked their application in socially-oriented tasks, raising questions about their effectiveness in representing guidelines requiring social understanding (Magooda et al., 2023).
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
Figure 2: Validation pipeline for our counter narrative evaluation framework. (Left) Evaluation prompt template including task description, a ChatGPT-generated aspect score rubric, and hate speech/counter narrative pair. (Right) LLM evaluation scores are generated for counter narratives and are compared to AMT-annotated evaluation.
|
| 28 |
+
|
| 29 |
+
We propose a novel multi-aspect counter narrative evaluation framework leveraging the capabilities of pretrained LLMs to determine the quality of counter narrative candidates (Figure 1). LLMs provide evaluation scores and feedback based on five key aspects inspired by NGO guidelines: specificity, opposition, relatedness, toxicity, and fluency. This approach improves alignment with human judgment while generating interpretable feedback and reducing reference reliance. We validate our evaluation framework by correlating LLM-generated scores with human-annotated scores and qualitatively analyzing feedback.
|
| 30 |
+
|
| 31 |
+
# 2 Related Work
|
| 32 |
+
|
| 33 |
+
Previous studies produced automatic counter narrative generation using counter narrative datasets (Mathew et al., 2018; Qian et al., 2019; Chung et al., 2019; Bonaldi et al., 2022) or prompting strategies (Ashida and Komachi, 2022; Zheng et al., 2023b). The most prominent dataset is the expert-annotated Multitarget-CONAN $^{3}$ (Fanton et al., 2021) which contains pairs for various target groups. For evaluation, previous studies (Ashida and Komachi, 2022; Chung et al.,
|
| 34 |
+
|
| 35 |
+
2021; Zhu and Bhat, 2021; Bonaldi et al., 2023) used overlap and diversity metrics (Papineni et al., 2002; Lin, 2004; Wang and Wan, 2018; Li et al., 2016) in addition to trained human annotation. Tekiroğlu et al. (2022) uses BLEU and ROUGE-L to select a model's best generated candidate before conducting other automatic and human evaluation. However, these metrics are incapable of representing key, NGO-specified aspects of counter narrative quality solely with reference comparisons. Meanwhile, the annotator training procedure established by Fanton et al. (2021) relies directly on the use of expert NGO operators which can be costly, time-consuming, and difficult to reproduce. In other generation tasks, SoTA LLMs have been leveraged to evaluate generated candidates, leading to better alignment with human judgment (Chiang and Lee, 2023; Liu et al., 2023; Zheng et al., 2023a; Hsu et al., 2023; Kim et al., 2024). Jiang et al. (2023) previously used GPT-3.5 for counter narrative evaluation; we develop a novel evaluation framework by prompting LLMs with NGO-derived aspects to generate scores and feedback. We validate our framework against human evaluation, enabling an interpretable and multi-aspect approach.
|
| 36 |
+
|
| 37 |
+
# Evaluation Aspects
|
| 38 |
+
|
| 39 |
+
Specificity - how much the counter narrative presents focused and specific arguments that effectively counter the key ideas within the hate speech example through the use of in-depth arguments, nuanced reasoning, and supporting evidence.
|
| 40 |
+
|
| 41 |
+
Opposition - how much the counter narrative opposes and contradicts the hate speech example through the expression of an opposing sentiment regardless of the argument's effectiveness or persuasiveness.
|
| 42 |
+
|
| 43 |
+
Relatedness - the association between the counter narrative response and hate speech example based on contextual or semantic similarity.
|
| 44 |
+
|
| 45 |
+
Toxicity - how rude, unreasonable, or disrespectful a response is; definition from Tekiroglu et al. (2022);
|
| 46 |
+
|
| 47 |
+
Fluency - the quality of a response based on whether they are well-written and grammatically correct; definition from Fu et al. (2023).
|
| 48 |
+
|
| 49 |
+
Table 1: Key evaluation aspects used in our counter narrative evaluation framework.
|
| 50 |
+
|
| 51 |
+
# 3 Data and Methodology
|
| 52 |
+
|
| 53 |
+
For evaluation, we define key aspects of counter narrative quality shown in Table 1. We specifically draw inspiration from NGO guidelines that advocate for constructive, focused counter narrative responses that challenge hate speech claims while de-escalating encounters in a non-toxic manner. From this, we derive specificity and relatedness, focusing on the association between the counter narrative arguments and the hate speech claims; opposition, focusing on how effectively the counter narrative denounces the hate speech; toxicity, focusing on responding civilly and positively; and fluency, focusing on the coherence of the response. By directly integrating these aspects within our LLM evaluation framework through the use of prompting, we allow for an automatic evaluation approach that is directly predicated on relevant characteristics of counter narrative quality as its criteria.
|
| 54 |
+
|
| 55 |
+
We generate counter narratives to 180 Multitarget-CONAN test set examples using (1) DialoGPT trained on 4003 examples, the best model in Tekiroglu et al. (2022), (2) zero-shot prompted ChatGPT (OpenAI, 2022) and (3) Vicuna (Chiang et al., 2023) as closed/open-source model representatives. We evaluate these generated examples with our approach and measure the correlation to human-generated scores. While previous counter narrative work have utilized trained expert annotators for hate speech/counter narrative pair post-editing and evaluation (Fanton
|
| 56 |
+
|
| 57 |
+
Evaluation Metric Correlations
|
| 58 |
+
|
| 59 |
+
<table><tr><td rowspan="2">Metric</td><td colspan="3">AMT</td><td colspan="3">AMT Overall</td></tr><tr><td>Pear.</td><td>Spear.</td><td>Kend.</td><td>Pear.</td><td>Spear.</td><td>Kend.</td></tr><tr><td>BLEU1</td><td>-0.041</td><td>-0.102</td><td>-0.071</td><td>-0.048</td><td>-0.083</td><td>-0.06</td></tr><tr><td>BLEU3</td><td>0.014</td><td>-0.085</td><td>-0.075</td><td>0.001</td><td>-0.083</td><td>-0.071</td></tr><tr><td>BLEU4</td><td>-0.032</td><td>-0.187</td><td>-0.141</td><td>-0.04</td><td>-0.187</td><td>-0.143</td></tr><tr><td>ROUGE-L</td><td>-0.052</td><td>-0.111</td><td>-0.079</td><td>-0.092</td><td>-0.122</td><td>-0.087</td></tr><tr><td>BERTScore</td><td>-0.099</td><td>-0.092</td><td>-0.062</td><td>-0.102</td><td>-0.089</td><td>-0.063</td></tr><tr><td>BARTScore - Recall</td><td>0.581</td><td>0.565</td><td>0.405</td><td>0.596</td><td>0.564</td><td>0.417</td></tr><tr><td>ChatGPT Multi-Aspect</td><td>0.664</td><td>0.626</td><td>0.481</td><td>0.632</td><td>0.609</td><td>0.475</td></tr><tr><td>ChatGPT Overall</td><td>0.658</td><td>0.633</td><td>0.517</td><td>0.654</td><td>0.624</td><td>0.521</td></tr><tr><td>Vicuna-33b v.1.3 Multi-Aspect</td><td>0.824</td><td>0.782</td><td>0.613</td><td>0.815</td><td>0.771</td><td>0.616</td></tr><tr><td>Vicuna-33b v.1.3 Overall</td><td>0.718</td><td>0.698</td><td>0.544</td><td>0.745</td><td>0.687</td><td>0.544</td></tr><tr><td>GPT-4 Multi-Aspect</td><td>0.806</td><td>0.710</td><td>0.557</td><td>0.762</td><td>0.694</td><td>0.551</td></tr><tr><td>GPT-4 Overall</td><td>0.788</td><td>0.733</td><td>0.597</td><td>0.783</td><td>0.721</td><td>0.600</td></tr><tr><td>Prometheus-13b Multi-Aspect</td><td>0.784</td><td>0.671</td><td>0.510</td><td>0.763</td><td>0.643</td><td>0.495</td></tr><tr><td>Prometheus-13b Overall</td><td>0.679</td><td>0.567</td><td>0.458</td><td>0.667</td><td>0.570</td><td>0.468</td></tr></table>
|
| 60 |
+
|
| 61 |
+
Table 2: Correlation of evaluation metric and AMT scores for the entire evaluation set; best correlation is in bold, second is underlined. Full version in Table G.1
|
| 62 |
+
|
| 63 |
+
et al., 2021), we are unable to reproduce this process due to a lack of direct access to expert NGO operators. As an alternative, each candidate counter narrative in our study is evaluated by Amazon Mechanical Turk (AMT) workers to represent human interpretation of NGO guidelines for the task. Non-expert annotation from AMT can often be less reliable than evaluation from more trusted sources; in order to address this limitation, we conduct an extensive qualification and monitoring procedure. All workers within our study must complete a qualification task as shown in Figure E.2 that involves reading training material describing what a counter narrative is, a description of the evaluation task, our evaluation aspects, and hate speech/counter narrative examples. Each worker must then pass the qualification test shown in Figures E.4 and E.5 before being able to provide any evaluation. In addition, we maintained active communication with each worker throughout the study and manually verified each provided evaluation score and feedback to ensure the task is performed in an appropriate manner. As a result, we ensured high-quality annotation in our study despite the lack of expert NGO operators; additional AMT study details can be found in Appendix E.
|
| 64 |
+
|
| 65 |
+
For automatic evaluation, we evaluate each candidate with a single run of ChatGPT, Vicuna, GPT-4 (OpenAI, 2023), and Prometheus (Kim et al., 2024) using the evaluation prompt shown in Figure 2. Both human and LLM evaluations re
|
| 66 |
+
|
| 67 |
+
<table><tr><td></td><td>Aligned Counter Narrative Evaluation</td></tr><tr><td>Hate Speech</td><td>LGBTs just want to brainwash our children. They should not be taught about gender identity and sexual orientation in school.</td></tr><tr><td>DialoGPT Generated Counter Narrative</td><td>People have the right to be interested in their own gender identity and sexual orientation.</td></tr><tr><td>Human Specificity Score and Feedback</td><td>2 stars. The counter entirely ignores the claim that LGBT people want to brainwash people, and it only somewhat and indirectly touches upon the idea that schools shouldn’t be teaching these topics in school by suggesting people have a right to be interested in their own sexuality.</td></tr><tr><td>Vicuna-generated Specificity Score and Feedback</td><td>2 stars ... While it acknowledges the right to explore one’s gender identity and sexual orientation, it does not directly counter the claim that LGBT individuals are trying to “brainwash” children or that they should not be taught about these topics in school ...</td></tr></table>
|
| 68 |
+
|
| 69 |
+
Table 3: Example of effective counter narrative evaluation from LLM evaluator.
|
| 70 |
+
|
| 71 |
+
sult in a 1-5 star score per aspect that is aggregated into a multi-aspect average and a final 1-5 star overall score. We also collect explanations from the AMT workers and LLM evaluators to allow for a qualitative comparison of their score justifications, allowing us to verify whether LLM evaluators are right for the right reasons. We also evaluate each example using automatic metrics: BLEU, ROUGE-L, METEOR (Banerjee and Lavie, 2005), BERTScore (Zhang et al., 2019), and BARTScore (Yuan et al., 2021) using Multitarget-CONAN examples as references for comparison to alternative metrics.
|
| 72 |
+
|
| 73 |
+
# 4 Results
|
| 74 |
+
|
| 75 |
+
# 4.1 Evaluation Metric Correlation
|
| 76 |
+
|
| 77 |
+
We measure the correlation between automatic and AMT-annotated evaluation scores using Pearson, Spearman, and Kendall coefficients to represent alignment of each evaluation metric to human judgment, presenting our results in Table 2. The overlap metrics used in previous studies achieve poor or negative correlations for our evaluation set. BERTScore's more advanced reference comparison also achieves poor correlations, suggesting that counter narrative references may not effectively represent NGO guidelines. BARTScore using Recall (described in Appendix D) achieves strong correlations; correlations for more variations are shown in Table G.1. LLM evaluators achieve the highest correlations with AMT-annotated evaluation scores due to directly evaluating relevant aspects of counter narrative quality. This suggests that LLM evaluators can serve as a better alternative for counter narrative evaluation with improved alignment while offering interpretability and alleviating reference reliance. In addition, our multi-aspect framework leads to improved evaluation performance for open
|
| 78 |
+
|
| 79 |
+
source models and allows for Vicuna to achieve comparable performance to GPT-4. Our interpretation of multi-aspect improvement within our evaluation framework for open-source models is discussed in our qualitative evaluation (Sec. 4.3).
|
| 80 |
+
|
| 81 |
+
# 4.2 Fine-grained Analysis
|
| 82 |
+
|
| 83 |
+
We also measure correlations per counter narrative generation model to assess robustness to generation approach and associated stylistic changes (shown in in Tables G.2, G.3 and G.4). While we computed correlations for ChatGPT-generated candidates, the low variance in their AMT scores makes the correlations uninformative.
|
| 84 |
+
|
| 85 |
+
DialogueGPT. Overlap based metrics are relatively more effective, indicating their viability for evaluating supervised models trained on the same distribution as the references used in evaluation as done in Tekiroglu et al. (2022). However, LLM evaluators still outperform these metrics. Notably, ChatGPT, Vicuna, and Prometheus show decreased performance, particularly when using the overall score rather than our multi-aspect approach.
|
| 86 |
+
|
| 87 |
+
Vicuna. LLM evaluators show decreased performance for Vicuna-generated candidates, allowing BARTScore variants to achieve near-best performance. Despite achieving top performance in all other cases, GPT-4 underperforms when evaluating only Vicuna-generated candidates. The justifications for both findings are discussed in our qualitative evaluation (Sec 4.3) and Tables H.2 and H.4, highlighting frequent misunderstandings of disfluent DialogGPT candidates and GPT-4 underrating Vicuna-generated candidates due to self-enhancement bias.
|
| 88 |
+
|
| 89 |
+
# 4.3 Qualitative Evaluation
|
| 90 |
+
|
| 91 |
+
Additionally, we qualitatively compare LLM-generated and human-annotated feedback based on the use of similar social reasoning and un
|
| 92 |
+
|
| 93 |
+
<table><tr><td></td><td>Multi-aspect Improvement in Vicuna Counter Narrative Evaluation</td></tr><tr><td>Hate Speech</td><td>This is not the USA. We do not accept immigrants. And if they live here, they should have no rights since they are not true citizens.</td></tr><tr><td>Generated Counter Narrative</td><td>We are a nation built on diversity and founded by immigrants. Everyone, regardless of their back-ground, deserves respect and basic human rights. Let's stand together for unity and inclusivity.</td></tr><tr><td>AMT Overall Score Average</td><td>4 stars</td></tr><tr><td>Vicuna-generated Overall Score and Feedback</td><td>1 star ... does not effectively challenge or debunk the abusive content ... The response lacks proper reasoning, evidence, or a respectful approach, and it may contribute to further escalation rather than de-escalation.</td></tr><tr><td></td><td>Vicuna Multi-aspect Scores</td></tr><tr><td>Vicuna-generated Toxicity Score and Feedback</td><td>5 stars ... The response promotes positive and healthy dialogue, encouraging understanding and respectful engagement.</td></tr><tr><td>Vicuna-generated Opposition Score and Feedback</td><td>5 stars ... It contradicts the hate speech by emphasizing the importance of diversity, inclusivity, and respect for all individuals, regardless of their background.</td></tr><tr><td>Vicuna-generated Specificity Score and Feedback</td><td>1 star. The counter narrative response lacks specificity and provides general arguments that do not directly address the key ideas within the hate speech example ...</td></tr></table>
|
| 94 |
+
|
| 95 |
+
Table 4: Example of improvement in Vicuna evaluation through the use of our multi-aspect framework; Vicuna initially gives a misaligned Overall score by negatively rating Opposition and Toxicity. However, these ratings are corrected when employing our multi-aspect framework, while maintaining an accurate Specificity rating.
|
| 96 |
+
|
| 97 |
+
derstanding. LLM evaluators mostly provide scores and feedback aligning with AMT annotation (shown in Table 3). Consistent with previous results, our multi-aspect evaluation framework results in aligned scores for examples where a single overall score diverges (shown in Tables 4 and H.1). This suggests that the decomposition of the task into multiple key aspects can enhance evaluation from weaker, open-source models by allowing them to better represent intricate NGO evaluation criteria.
|
| 98 |
+
|
| 99 |
+
However, we also identified that each LLM evaluator model was capable of misunderstanding the relationship between the generated counter narrative and hate speech example or conflating multiple aspects as shown in Tables H.2 and H.3, potentially leading to unaligned scores and explanations. ChatGPT was the most prone to lacking social nuance, often assigning safer scores (3-4 stars) to examples rated significantly higher or lower by AMT annotators as a result. In addition, ChatGPT, Vicuna, and Prometheus were much more likely to misunderstand DialogoGPT-generated counter narrative responses that tend to be more incoherent and unpolished in nature. While GPT-4 was mostly unaffected by these qualities in DialogoGPT-generated candidates, the model was prone to these common errors when evaluat
|
| 100 |
+
|
| 101 |
+
ing Vicuna-generated candidates and often underrated these examples. We propose that this could be a symptom of self-enhancement bias as proposed in Zheng et al. (2023a) with GPT-4 tending to rate Vicuna-generated candidates lower than AMT annotators due to the model opposing candidates less similar to its own generations.
|
| 102 |
+
|
| 103 |
+
# 5 Conclusion
|
| 104 |
+
|
| 105 |
+
This work proposes a novel counter narrative evaluation framework that utilizes the capabilities of LLMs to provide evaluation scores and feedback for counter narrative candidates based on a defined set of key evaluation aspects derived from NGO guidelines for effective counter narratives. Our experiments show that LLM evaluators effectively represent intricate NGO evaluation guidelines that require social nuance and understanding while providing aligned evaluation scores and feedback, showcasing their potential as a multi-aspect, interpretable, and reference-free counter narrative evaluation approach. In future work, we will continue to improve on this framework through additional prompting and finetuning strategies to address errors shown during qualitative evaluation while leveraging our LLM-generated evaluation scores for downstream counter narrative generation methods.
|
| 106 |
+
|
| 107 |
+
# 6 Ethical Considerations
|
| 108 |
+
|
| 109 |
+
Our work involves the use of human annotation for evaluating counter narrative responses to hate speech examples, leading to exposure to potentially offensive and harmful content for workers in our study. In order to alleviate the negative impacts of this exposure, we implement the mitigation procedure of Fanton et al. (2021). We also ensure that all workers within our AMT study are compensated fairly with an hourly rate exceeding the minimum wage and that privacy and confidentiality are maintained within our data collection process by avoiding the use of individual identifiers. More details related to our AMT study can be found in Appendix E.
|
| 110 |
+
|
| 111 |
+
In addition, our work explores the use of an automated approach to counter narrative evaluation by encoding relevant aspects of NGO guidelines within the evaluation criteria of LLMs. While we demonstrate that this approach can lead to evaluation scores and feedback that align with human interpretation of socially-oriented guidelines, the use of gold standard human evaluation should not be completely removed from the evaluation process of human-sensitive tasks. To ensure that counter narratives adhere to human standards for effective hate speech intervention, future evaluation efforts should incorporate our framework only alongside human annotations from diverse perspectives based on what constitutes hate speech and the most effective strategies for appropriate responses.
|
| 112 |
+
|
| 113 |
+
All research in this study was done in adherence to the licenses and intended purposes of the code, data, and models utilized.
|
| 114 |
+
|
| 115 |
+
# 7 Limitations
|
| 116 |
+
|
| 117 |
+
Lack of expert annotation. Previous counter narrative work from University of Trento and Fondazione Bruno Kessler has utilized annotators specifically trained over multiple weeks following the procedure used by Fanton et al. (2021) so that they became experts in hate speech/counter narrative pair post-editing and evaluation. However, we are unable to reproduce this training procedure due to lack of access to expert NGO operators and must rely on the use of crowdsourcing as an alternative. In order to address this limitation, we ensure high-quality results from Amazon Mechanical Turk through the use of a qualification task for each worker prior to any annotation (shown in Fig-
|
| 118 |
+
|
| 119 |
+
ures E.2, E.3, E.4, E.5) and active monitoring of evaluation from workers prior to use in our final results.
|
| 120 |
+
|
| 121 |
+
Alternative prompting strategies. In this work, we use LLM evaluators for counter narrative evaluation using a single answer grading approach where each model is prompted with one counter narrative response and asked to rate it from 1-5 stars. However, there are multiple alternative prompting strategies for LLM evaluators that are not explored in this work. These include the use of a 0-100 grading scale (Wang et al., 2023), the use of a reference in few-shot prompting, the use of a probability-weighted summation of LLM output scores to normalize scores (Liu et al., 2023), or pairwise comparison approaches (Zheng et al., 2023a). As a result, it will be necessary in future work to understand how these alternative evaluation strategies impact the ability of LLM evaluators for our task.
|
| 122 |
+
|
| 123 |
+
Sample size. Our evaluation framework was tested on 180 hate speech/counter narrative pairs containing Multitarget-CONAN hate speech and counter narratives generated from DialogGPT, ChatGPT, and Vicuna v1.3 33b. In future work, it will be necessary to continue to validate this evaluation framework for more examples including additional hate speech target groups and counter narrative generation approaches.
|
| 124 |
+
|
| 125 |
+
# 8 Acknowledgements
|
| 126 |
+
|
| 127 |
+
The authors would thank colleagues from the OSU NLP group and SLaTe Lab for their valuable comments and feedback. This research was sponsored in part by NSF CAREER #1942980 and the Ohio Supercomputer Center (Center, 2016, 2022). The views and conclusions contained herein are those of the authors and should not be interpreted as representing the official policies, either expressed or implied, of the U.S. government. The U.S. Government is authorized to reproduce and distribute reprints for Government purposes notwithstanding any copyright notice herein.
|
| 128 |
+
|
| 129 |
+
# References
|
| 130 |
+
|
| 131 |
+
Mana Ashida and Mamoru Komachi. 2022. Towards Automatic Generation of Messages Countering Online Hate Speech and Microaggressions. In Pro
|
| 132 |
+
|
| 133 |
+
ceedings of the Sixth Workshop on Online Abuse and Harms (WOAH), pages 11-23.
|
| 134 |
+
Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An Automatic Metric for MT Evaluation with Improved Correlation with Human Judgments. In Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization, pages 65-72, Ann Arbor, Michigan. Association for Computational Linguistics.
|
| 135 |
+
Helena Bonaldi, Giuseppe Attanasio, Debora Nozza, and Marco Guerini. 2023. Weigh Your Own Words: Improving Hate Speech Counter Narrative Generation via Attention Regularization. In Proceedings of the 1st Workshop on CounterSpeech for Online Abuse (CS4OA), pages 13-28.
|
| 136 |
+
Helena Bonaldi, Sara Dellantonio, Serra Sinem Tekiroglu, and Marco Guerini. 2022. Human-Machine Collaboration Approaches to Build a Dialogue Dataset for Hate Speech Countering. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 8031-8049.
|
| 137 |
+
Santiago Castro. 2017. Fast Krippendorff: Fast computation of Krippendorff's alpha agreement measure. https://github.com/pln-fing-udelar/fast-krippendorff.
|
| 138 |
+
Ohio Supercomputer Center. 2016. Owens Supercomputer.
|
| 139 |
+
Ohio Supercomputer Center. 2022. Ascend Supercomputer.
|
| 140 |
+
Cheng-Han Chiang and Hung-yi Lee. 2023. Can large language models be an alternative to human evaluations? In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15607-15631, Toronto, Canada. Association for Computational Linguistics.
|
| 141 |
+
Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E. Gonzalez, Ion Stoica, and Eric P. Xing. 2023. Vicuna: An Open-Source Chatbot Impressing GPT-4 with $90\%$ ChatGPT Quality.
|
| 142 |
+
Yi-Ling Chung, Elizaveta Kuzmenko, Serra Sinem Tekiroglu, and Marco Guerini. 2019. CONAN-Counter NArratives through Nichesourcing: a Multilingual Dataset of Responses to Fight Online Hate Speech. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 2819-2829.
|
| 143 |
+
Yi-Ling Chung, Serra Sinem Tekiroglu, and Marco Guerini. 2021. Towards Knowledge-Grounded Counter Narrative Generation for Hate Speech. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pages 899-914.
|
| 144 |
+
|
| 145 |
+
Margherita Fanton, Helena Bonaldi, Serra Sinem Tekiroglu, and Marco Guerini. 2021. Human-in-the-Loop for Data Collection: a Multi-Target Counter Narrative Dataset to Fight Online Hate Speech. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3226-3240.
|
| 146 |
+
Jinlan Fu, See-Kiong Ng, Zhengbao Jiang, and Pengfei Liu. 2023. Gptscore: Evaluate as you desire. arXiv preprint arXiv:2302.04166.
|
| 147 |
+
Karl Moritz Hermann, Tomas Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend. Advances in neural information processing systems, 28.
|
| 148 |
+
Ting-Yao Hsu, Chieh-Yang Huang, Ryan Rossi, Sungchul Kim, C. Giles, and Ting-Hao Huang. 2023. GPT-4 as an Effective Zero-Shot Evaluator for Scientific Figure Captions. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 5464-5474, Singapore. Association for Computational Linguistics.
|
| 149 |
+
J Edward Hu, Abhinav Singh, Nils Holzenberger, Matt Post, and Benjamin Van Durme. 2019. Largescale, diverse, paraphrastic bitexts via sampling and clustering. In Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pages 44-54.
|
| 150 |
+
Shuyu Jiang, Wenyi Tang, Xingshu Chen, Rui Tanga, Haizhou Wang, and Wenxian Wang. 2023. Raucg: Retrieval-augmented unsupervised counter narrative generation for hate speech. arXiv preprint arXiv:2310.05650.
|
| 151 |
+
Seungone Kim, Jamin Shin, Yejin Cho, Joel Jang, Shayne Longpre, Hwaran Lee, Sangwoo Yun, Seongjin Shin, Sungdong Kim, James Thorne, and Minjoon Seo. 2024. Prometheus: Inducing Evaluation Capability in Language Models. In The Twelfth International Conference on Learning Representations.
|
| 152 |
+
Diederik P. Kingma and Jimmy Ba. 2015. Adam: A Method for Stochastic Optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.
|
| 153 |
+
Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and William B Dolan. 2016. A Diversity-Promoting Objective Function for Neural Conversation Models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 110-119.
|
| 154 |
+
Chin-Yew Lin. 2004. ROUGE: A Package for Automatic Evaluation of Summaries. In Text summarization branches out, pages 74-81.
|
| 155 |
+
|
| 156 |
+
Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. 2023. G-Eval: NLG Evaluation using Gpt-4 with Better Human Alignment. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 2511–2522, Singapore. Association for Computational Linguistics.
|
| 157 |
+
Ahmed Magooda, Alec Helyar, Kyle Jackson, David Sullivan, Chad Atalla, Emily Sheng, Dan Vann, Richard Edgar, Hamid Palangi, Roman Lutz, et al. 2023. A Framework for Automated Measurement of Responsible AI Harms in Generative AI Applications. arXiv preprint arXiv:2310.17750.
|
| 158 |
+
Binny Mathew, Navish Kumar, Pawan Goyal, Animesh Mukherjee, et al. 2018. Analyzing the hate and counter speech accounts on twitter. arXiv preprint arXiv:1812.02712.
|
| 159 |
+
OpenAI. 2022. ChatGPT.
|
| 160 |
+
OpenAI. 2023. GPT-4 Technical Report.
|
| 161 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. BLEU: a Method for Automatic Evaluation of Machine Translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pages 311-318.
|
| 162 |
+
Jing Qian, Anna Bethke, Yinyin Liu, Elizabeth Belding, and William Yang Wang. 2019. A Benchmark Dataset for Learning to Intervene in Online Hate Speech. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4755-4764.
|
| 163 |
+
Serra Sinem Tekiroglu, Helena Bonaldi, Margherita Fanton, and Marco Guerini. 2022. Using Pre-Trained Language Models for Producing Counter Narratives Against Hate Speech: a Comparative Study. In *Findings of the Association for Computational Linguistics: ACL* 2022, pages 3099–3114.
|
| 164 |
+
Pauli Virtanen, Ralf Gommers, Travis E. Oliphant, Matt Haberland, Tyler Reddy, David Cournapeau, Evgeni Burovski, Pearu Peterson, Warren Weckesser, Jonathan Bright, Stefan J. van der Walt, Matthew Brett, Joshua Wilson, K. Jarrod Millman, Nikolay Mayorov, Andrew R. J. Nelson, Eric Jones, Robert Kern, Eric Larson, C J Carey, Ilhan Polat, Yu Feng, Eric W. Moore, Jake VanderPlas, Denis Laxalde, Josef Perktold, Robert Cirmrman, Ian Henriksen, E. A. Quintero, Charles R. Harris, Anne M. Archibald, Antonio H. Ribeiro, Fabian Pedregosa, Paul van Mulbregt, and SciPy 1.0 Contributors. 2020. SciPy 1.0: Fundamental Algorithms for Scientific Computing in Python. Nature Methods, 17:261-272.
|
| 165 |
+
Jiaan Wang, Yunlong Liang, Fandong Meng, Zengkui Sun, Haoxiang Shi, Zhixu Li, Jinan Xu, Jianfeng Qu, and Jie Zhou. 2023. Is ChatGPT a Good NLG Evaluator? A Preliminary Study. In Proceedings of
|
| 166 |
+
|
| 167 |
+
the 4th New Frontiers in Summarization Workshop, pages 1-11, Singapore. Association for Computational Linguistics.
|
| 168 |
+
Ke Wang and Xiaojun Wan. 2018. SentiGAN: Generating Sentimental Texts via Mixture Adversarial Networks. In Proceedings of the 27th International Joint Conference on Artificial Intelligence, pages 4446-4452.
|
| 169 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-Art Natural Language Processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
|
| 170 |
+
Weizhe Yuan, Graham Neubig, and Pengfei Liu. 2021. BARTscore: Evaluating Generated Text as Text Generation. Advances in Neural Information Processing Systems, 34:27263-27277.
|
| 171 |
+
Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2019. BERTScore: Evaluating Text Generation with BERT. In International Conference on Learning Representations.
|
| 172 |
+
Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. 2023a. Judging LLM-as-a-Judge with MT-Bench and Chatbot Arena. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track.
|
| 173 |
+
Yi Zheng, Björn Ross, and Walid Magdy. 2023b. What makes good counterspeech? a comparison of generation approaches and evaluation metrics. In Proceedings of the 1st Workshop on CounterSpeech for Online Abuse (CS4OA), pages 62-71, Prague, Czechia. Association for Computational Linguistics.
|
| 174 |
+
Wanzheng Zhu and Suma Bhat. 2021. Generate, Prune, Select: A Pipeline for Counterspeech Generation against Online Hate Speech. In Findings of the Association for Computational Linguistics: ACLIJCNLP 2021, pages 134-149.
|
| 175 |
+
|
| 176 |
+
# A Counter Narrative Generation
|
| 177 |
+
|
| 178 |
+
Based on the results shown in Table A.1, zero-shot prompting of LLMs such as ChatGPT and Vicuna can serve as an effective counter narrative generation approach in comparison to previous supervised strategies according to AMT crowdworkers. This suggests that recent LLMs are capable of performing the counter narrative generation effectively even without receiving additional guidance from finetuning or prompting, alleviating some reliance on previously created supervised datasets. However, consistent with Tekiroğlu et al. (2022), these models can struggle with the specificity of generated counter narratives, leaving room for further improvement in future counter narrative generation models.
|
| 179 |
+
|
| 180 |
+
# B DialogGPT Implementation
|
| 181 |
+
|
| 182 |
+
We implement DialogGPT-medium using Hugging-Face (Wolf et al., 2020) by finetuning on the train set of Multitarget-CONAN containing 4003 hate speech/counter narrative pairs using Adam (Kingma and Ba, 2015) and the following hyperparameters from Tekiroglu et al. (2022):
|
| 183 |
+
|
| 184 |
+
- Learning rate: 5e-5
|
| 185 |
+
- Batch size: 4
|
| 186 |
+
- Epochs: 2
|
| 187 |
+
|
| 188 |
+
For counter narrative generation, we generate 5 counter narrative candidates from our finetuned DialogoGPT model using top-pk decoding, the best decoding mechanism for the model in Tekiroglu et al. (2022) and select a random candidate from the sample for each hate speech example.
|
| 189 |
+
|
| 190 |
+
# C Prompting/API details
|
| 191 |
+
|
| 192 |
+
Counter Narrative Generation. We utilize ChatGPT and Vicuna v1.3 33b with temperature = 1 and max_new_tokens = 512 using the simple, zero-shot prompt in Figure C.1.
|
| 193 |
+
|
| 194 |
+
Generate a counter narrative response to the following hate speech example.
|
| 195 |
+
Hate Speech:[hate_speech]
|
| 196 |
+
Counter Narrative:
|
| 197 |
+
|
| 198 |
+
Figure C.1: Counter narrative generation prompt.
|
| 199 |
+
|
| 200 |
+
Score Rubric Generation. We generate score
|
| 201 |
+
|
| 202 |
+
rubrics from 1-5 stars from ChatGPT on chat. openai.com for each of our aspect definitions using the aspect prompt format from Wang et al. (2023) in the prompt in Figure C.2.
|
| 203 |
+
|
| 204 |
+
Define what each star means from 1 to 5 based on the following description of a scoring criteria:
|
| 205 |
+
|
| 206 |
+
Score the following counter narrative response given the corresponding hate speech example with respect to [aspect] with one to five stars, where one star means "[ant-aspect]" and five stars means "perfect [aspect)". Note that [aspect] measures [aspect_defined].
|
| 207 |
+
|
| 208 |
+
Figure C.2: Score rubric prompt.
|
| 209 |
+
|
| 210 |
+
Counter Narrative Evaluation. Given our generated score rubrics, we prompt ChatGPT, GPT-4, and Vicuna v1.3 33b with temperature $= 0$ and max_new_tokens = 512 for evaluation using the prompt in Figure C.3.
|
| 211 |
+
|
| 212 |
+
Score the following counter narrative response given the corresponding hate speech example with respect to [aspect] with one to five stars and provide an explanation of the given score. Note that [aspect] measures [aspect_defined]. Here is the definition of each star rating from 1 to 5:
|
| 213 |
+
|
| 214 |
+
[score_rubric]
|
| 215 |
+
|
| 216 |
+
Hate Speech: [hate_speech]
|
| 217 |
+
|
| 218 |
+
Counter Narrative: [counter_narrative] Score:
|
| 219 |
+
|
| 220 |
+
Figure C.3: Counter narrative evaluation prompt.
|
| 221 |
+
|
| 222 |
+
Our total cost for the use of the OpenAI API for ChatGPT generated candidates and evaluation from both ChatGPT and GPT-4 is $123.16.
|
| 223 |
+
|
| 224 |
+
Prometheus Evaluation. For Prometheus 13b(Kim et al., 2024), we implement the following hyperparameters directly used in the original paper for inference:
|
| 225 |
+
|
| 226 |
+
- Temperature: 1.0
|
| 227 |
+
Top-p: 0.9
|
| 228 |
+
- Repetition Penalty: 1.03
|
| 229 |
+
Max Output Length: 256
|
| 230 |
+
|
| 231 |
+
We adapt the prompt used in the original paper for the counter narrative evaluation task, resulting in the prompt in Figure C.4.
|
| 232 |
+
|
| 233 |
+
# Task Description:
|
| 234 |
+
|
| 235 |
+
An instruction (might include an Input inside it), a response to evaluate, a reference answer that gets a score of 5, and a score rubric representing a evaluation criteria are given.
|
| 236 |
+
|
| 237 |
+
1. Write a detailed feedback that assess the quality of the response strictly based on the given score rubric, not evaluating in general.
|
| 238 |
+
2. After writing a feedback, write a score that is an integer between 1 and 5. You should refer to the score rubric.
|
| 239 |
+
3. The output format should look as follows: $"Feedback:$ (write a feedback for criteria) [RESULT] (an integer number between 1 and 5) $^$ "
|
| 240 |
+
4. Please do not generate any other opening, closing, and explanations.
|
| 241 |
+
|
| 242 |
+
# The instruction to evaluate:
|
| 243 |
+
|
| 244 |
+
Generate a counter narrative response to the following hate speech example.
|
| 245 |
+
|
| 246 |
+
Hate Speech: [hate_speech]
|
| 247 |
+
|
| 248 |
+
# #####Response to evaluate:
|
| 249 |
+
|
| 250 |
+
Counter Narrative: [counter_narrative]
|
| 251 |
+
|
| 252 |
+
# Reference Answer (Score 5):
|
| 253 |
+
|
| 254 |
+
[reference hs_cn_pair_for Aspect]
|
| 255 |
+
|
| 256 |
+
# Score Rubrics:
|
| 257 |
+
|
| 258 |
+
[aspect_defined]
|
| 259 |
+
|
| 260 |
+
Score 1: [aspect_score1_description]
|
| 261 |
+
|
| 262 |
+
Score 2: [aspect_score2_description]
|
| 263 |
+
|
| 264 |
+
Score 3: [aspect_score3_description]
|
| 265 |
+
|
| 266 |
+
Score 4: [aspect_score4_description]
|
| 267 |
+
|
| 268 |
+
Score 5: [aspect_score5_description]
|
| 269 |
+
|
| 270 |
+
Feedback:
|
| 271 |
+
|
| 272 |
+
Figure C.4: Counter narrative evaluation prompt for Prometheus.
|
| 273 |
+
|
| 274 |
+
# D BARTScore details
|
| 275 |
+
|
| 276 |
+
For the use of BARTScore (Yuan et al., 2021) in this work, we implement multiple methods from the original paper including Precision, the log probability of generating the generated counter narrative candidate using a reference, Recall, the log probability of generating the reference given the generated candidate, and F1, the arithmetic average of Precision and Recall. Additionally, we utilize finetuned variants BARTScore-CNN, a BART model finetuned on the CNN/Daily Mail dataset (Hermann et al., 2015), and BARTScore-CNN-Para, a BART model further finetuned on ParaBank2 (Hu et al., 2019).
|
| 277 |
+
|
| 278 |
+
# E AMT Study details
|
| 279 |
+
|
| 280 |
+
For human annotation in our study, we utilize the Amazon Mechanical Turk platform. Prior to receiving any annotation, we have our study reviewed by an Institutional Review Board (IRB) to ensure we perform human subjects research in an ethical manner. In order to ensure the well
|
| 281 |
+
|
| 282 |
+
being of workers within this study, we provide a disclaimer related to the potential harmful effects of exposure to hateful content and implement the mitigation procedure of Fanton et al. (2021) which encourages workers to work on the task for brief durations (2-3 hours), take frequent breaks, and maintain active communication about any potential problems or distress.
|
| 283 |
+
|
| 284 |
+
To maintain high-quality annotation within our study, we require workers to have the qualifications of a $95\%$ HIT approval rate, 1000 HITs approved, and completion of our qualification task shown in Figures E.2, E.3, E.4, and E.5. After completion of our qualification task, workers receive our main task which is shown in Figure E.1. While demographic information is self-reported by workers during the qualification task so that we can gain an understanding of potential sources of bias in provided annotation, we ensure confidentiality and privacy by only sharing information amongst members of our team and aggregating all demographic information before release to avoid individual identifiers. The demographic information for the 13 workers that provided at completed at least one HIT in our study can be found in Table E.1. In order to provide fair compensation to workers in our study, we pay $2.7 per HIT while expecting each HIT to take 15 minutes on average, resulting in an hourly rate of$ 10.8 which is above the minimum wage. Additionally, we provide bonus payments of $2.7 for completion of our qualification task and 2 additional HITs and $4.05 for significant contribution in our study of completing 10 HITs. Our total cost for human annotation in this study after payment for HITs, bonus payments, and Mechanical Turk fees is $1,830.
|
| 285 |
+
|
| 286 |
+
# F Interrater Agreement
|
| 287 |
+
|
| 288 |
+
To test the reliability of human annotation within our study, we measure interrater agreement using Krippendorff's $\alpha$ using FastKrippendorff (Castro, 2017). These results are shown within Table F.1.
|
| 289 |
+
|
| 290 |
+
# G Correlations
|
| 291 |
+
|
| 292 |
+
The full results containing correlations for all candidates in our evaluation set for all evaluation metrics used are shown in Table G.1. Our fine-grained analysis results from Section 4.2 for DialoGPT, ChatGPT, and Vicuna-generated candidates are shown in Tables G.2, G.3, and G.4 respectively. All Pearson, Spearman, and Kendall
|
| 293 |
+
|
| 294 |
+
<table><tr><td colspan="2">Interrater Agreement</td></tr><tr><td>Aspect</td><td>α</td></tr><tr><td>Opposition</td><td>0.675</td></tr><tr><td>Relatedness</td><td>0.599</td></tr><tr><td>Specificity</td><td>0.599</td></tr><tr><td>Toxicity</td><td>0.534</td></tr><tr><td>Fluency</td><td>0.352</td></tr><tr><td>Overall</td><td>0.662</td></tr></table>
|
| 295 |
+
|
| 296 |
+
Table F.1: Interrater agreement in our Amazon Mechanical Turk study using Krippendorff's $\alpha$ .
|
| 297 |
+
|
| 298 |
+
correlations were computed using Scipy (Virtanen et al., 2020).
|
| 299 |
+
|
| 300 |
+
# H Qualitative Examples
|
| 301 |
+
|
| 302 |
+
We provide more qualitative examples of multi-aspect improvement for Vicuna and Prometheus in Tables 4 and H.1 respectively, common errors in Tables H.2 and H.3, and GPT-4 underring Vicuna examples in Table H.4.
|
| 303 |
+
|
| 304 |
+
# I Additional Figures
|
| 305 |
+
|
| 306 |
+
- Average scores given to counter narratives from each generation model used in our study from each LLM evaluator - Table I.1
|
| 307 |
+
- Mean absolute error of each LLM evaluator - Table I.2
|
| 308 |
+
- Standard deviation of AMT annotation - Table I.3
|
| 309 |
+
|
| 310 |
+
For this task, imagine you have encountered the following hate speech example in an online environment. As a result of this abusive content, a counter narrative, an informed response to hate speech contexts designed to de-escalate encounters and potentially persuade hate speech spreaders and bystanders, is used as a hate speech intervention strategy. Here is the given hate speech example and counter narrative response pair for this example:
|
| 311 |
+
|
| 312 |
+
Hate Speech Example: Physically disabled folks are all angry and useless.
|
| 313 |
+
|
| 314 |
+
Counter Narrative Response: There is no evidence to support your claim, in terms of a "physical handicap" that causes anger and unproductive behaviour. There is also no evidence that the mentally disabled are "unproductive" at all.
|
| 315 |
+
|
| 316 |
+
# Targeted Group: DISABLED
|
| 317 |
+
|
| 318 |
+
Generate an evaluation score from 1 to 5 stars for this counter narrative based on the following aspect definitions. When generating each evaluation score, think carefully about important details of both the hate speech example and the counter narrative response such as the claims being expressed by the hate speech spreader, any stereotypes or prejudice being demonstrated or implied, and the arguments being made within the counter narrative response. After generating these scores, provide a short explanation of why each score was given using information from the counter narrative response to justify your answer. In sample studies, we have found that taking 10-20 minutes for each task results in reasonable performance.
|
| 319 |
+
|
| 320 |
+
Aspect Definition #1: Specificity - how much the counter narrative presents focused and specific arguments that effectively counter the key ideas within the hate speech example through the use of in-depth arguments, nuanced reasoning, and supporting evidence, with one star meaning the response is general and five stars meaning "perfect specificity."
|
| 321 |
+
|
| 322 |
+

|
| 323 |
+
|
| 324 |
+
Provide an explanation to justify your score for aspect #1 here...
|
| 325 |
+
|
| 326 |
+
Aspect Definition #2: Fluency - the quality of a response based on whether they are well-written and grammatically correct, with one star meaning the response is disfluent and five stars meaning "perfect fluency."
|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
|
| 330 |
+
Provide an explanation to justify your score for aspect #2 here...
|
| 331 |
+
|
| 332 |
+
Aspect Definition #3: Opposition - how much the counter narrative opposes and contradicts the hate speech example through the expression of an opposing sentiment regardless of the argument's effectiveness or persuasiveness, with one star meaning support of the example and five stars meaning "perfect opposition" to the example
|
| 333 |
+
|
| 334 |
+

|
| 335 |
+
|
| 336 |
+
Provide an explanation to justify your score for aspect #3 here...
|
| 337 |
+
|
| 338 |
+
Aspect Definition #4: Toxicity - how rude, unreasonable, or disrespectful a response is, with one star meaning the response is toxic and five stars meaning "perfect non-toxicity."
|
| 339 |
+
|
| 340 |
+

|
| 341 |
+
|
| 342 |
+
Provide an explanation to justify your score for aspect #4 here...
|
| 343 |
+
|
| 344 |
+
Aspect Definition #5: Semantic Relatedness - the association between the counter narrative response and hate speech example based on contextual or semantic similarity, with one star meaning not related and five stars meaning "perfect relatedness."
|
| 345 |
+
|
| 346 |
+

|
| 347 |
+
|
| 348 |
+
Provide an explanation to justify your score for aspect #5 here...
|
| 349 |
+
|
| 350 |
+
# Overall Score:
|
| 351 |
+
|
| 352 |
+
Note: The overall score provided here is not just meant to be the average of the previously provided scores and you can weight some aspects more heavily than others if you feel it is applicable. For example, if you feel that specificity is a more important aspect than fluency on the overall quality of the counter narrative, you can assign an overall score that weights specificity more heavily.
|
| 353 |
+
|
| 354 |
+

|
| 355 |
+
Figure E.1: Example of main task within our Amazon Mechanical Turk study.
|
| 356 |
+
|
| 357 |
+
(Optional) Provide any feedback you may have about this task here...
|
| 358 |
+
|
| 359 |
+
# 1. Task Overview
|
| 360 |
+
|
| 361 |
+
In this study, you will be asked to imagine that you have encountered the given hate speech example in an online environment. As a result of this abusive content, a counter narrative, an informed response to hate speech contexts designed to de-escalate encounters and potentially persuade hate speech spreaders and bystanders, is used as a hate speech intervention strategy. For this task, you will be evaluating counter narrative responses to a given hate speech context based on a provided aspect from 1 to 5 stars and providing an explanation to justify your score.
|
| 362 |
+
|
| 363 |
+
# 2. Important Notes
|
| 364 |
+
|
| 365 |
+
offt
|
| 366 |
+
|
| 367 |
+
Work on the task for only brief durations per session (2-3 hours)
|
| 368 |
+
Take frequent breaks when necessary.
|
| 369 |
+
|
| 370 |
+
- Maintain active communication about any potential problems or distress stemming from the presence of hate speech content
|
| 371 |
+
|
| 372 |
+
Bonus Payments: While workers will not receive payment for completion of this qualification task initially, workers will receive additional payment for the qualification task of $2.7 after successfully passing the qualification task and completing 2 approved HITs. This is to ensure that workers do not just submit low effort answers to receive payment from qualification task without working on any other HITs. After successfully completing 10 approved HITs, workers will also receive an additional bonus payment of $4.05 for providing significant contribution to our study.
|
| 373 |
+
|
| 374 |
+
# 3. Task Guidelines
|
| 375 |
+
|
| 376 |
+
Provided Resources:
|
| 377 |
+
|
| 378 |
+
- Hate speech example
|
| 379 |
+
Counter narrative response
|
| 380 |
+
Targeted group within hate speech response
|
| 381 |
+
- Aspect definition that the counter narrative response will be evaluated on
|
| 382 |
+
|
| 383 |
+
The key aspects that will be used to evaluate the given counter narrative response to the hate speech example will include the following:
|
| 384 |
+
|
| 385 |
+
- Opposition - how much the counter narrative opposes and contradicts the hate speech example through the expression of an opposing sentiment regardless of the argument's effectiveness or persuasiveness, with 1 star
|
| 386 |
+
meaning support of the example and 5 stars meaning "perfect opposition" to the example.
|
| 387 |
+
- Semantic Relatedness - the association between the counter narrative response and hate speech example based on contextual or semantic closeness, with 1 star meaning not related and 5 stars meaning "perfect relatedness."
|
| 388 |
+
- Specificity - how much the counter narrative presents focused and specific arguments that effectively counter the key ideas within the hate speech example through the use of in-depth arguments, nuanced reasoning, and supporting evidence, with 1 star meaning the response is general and 5 stars meaning "perfect specificity."
|
| 389 |
+
- Toxicity - how rude, unreasonable, or disrespectful a response is, with 1 star meaning the response is toxic and 5 stars meaning "perfect non-toxicity."
|
| 390 |
+
|
| 391 |
+
- Fluency - the quality of a response based on whether they are well-written and grammatically correct, with 1 star meaning the response is disfluent and 5 stars meaning "perfect fluency."
|
| 392 |
+
|
| 393 |
+
Task Outline: For this task, imagine you have encountered the following hate speech example in an online environment. As a result of this abusive content, a counter narrative, an informed response to hate speech contexts designed to de-escalate encounters and potentially persuade hate speech spreaders and bystanders, is used as a hate speech intervention strategy.
|
| 394 |
+
|
| 395 |
+
Evaluation Score: Given the hate speech example and counter narrative response pair, generate an evaluation score from 1 to 5 stars based on the given aspect definition. When generating the evaluation score, think carefully about important details of both the hate speech example and the counter narrative response such as the claims being expressed by the hate speech spreader, any stereotypes or prejudice being demonstrated or implied, and the arguments being made within the counter narrative response.
|
| 396 |
+
|
| 397 |
+
Score Explanation: After generating an evaluation score for the given counter narrative response, provide a short explanation of why this score was given using information from the counter narrative response to justify your answer. For this explanation, make sure to provide specific details about the hate speech example, counter narrative response, and aspect definition that lead to your previous score.
|
| 398 |
+
|
| 399 |
+
Expected output for this task:
|
| 400 |
+
|
| 401 |
+
An evaluation score from one to five stars for the given counter narrative response based on a specified aspect definition.
|
| 402 |
+
An explanation to justify your given evaluation score based on details of the hate speech example, counter narrative response, and aspect definition using 1-3 sentences
|
| 403 |
+
|
| 404 |
+
Example output for evaluation score and explanation:
|
| 405 |
+
|
| 406 |
+
- Hate Speech Example: Gays are just sick. It is an abomination.
|
| 407 |
+
Counter Narrative Response: Do you consider it a disease like cold or flu?
|
| 408 |
+
Targeted Group: LGBT+
|
| 409 |
+
- Aspect Definition: Specificity - how much the counter narrative presents focused and specific arguments that effectively counter the key ideas within the hate speech example through the use of in-depth arguments, nuanced
|
| 410 |
+
reasoning, and supporting evidence, with 1 star meaning the response is general and 5 stars meaning "perfect specificity."
|
| 411 |
+
Score: 1 star
|
| 412 |
+
- Explanation: The counter narrative doesn't provide much of an argument against the hate speech example's claim that homosexuality is a sickness and chooses to provide a vague question about it as a disease instead.
|
| 413 |
+
|
| 414 |
+
Figure E.2: Task description within our Amazon Mechanical Turk qualification task.
|
| 415 |
+
|
| 416 |
+
# 4. Demographic Information
|
| 417 |
+
|
| 418 |
+
d t i t t the entire worker population before release, meaning no individual demographic information will be publicly released.
|
| 419 |
+
|
| 420 |
+
What is your current age?
|
| 421 |
+
|
| 422 |
+
18-25
|
| 423 |
+
25-30
|
| 424 |
+
30-35
|
| 425 |
+
35-45
|
| 426 |
+
45-55
|
| 427 |
+
55+
|
| 428 |
+
$\bigcirc$ Prefer not to say
|
| 429 |
+
|
| 430 |
+
What gender do you identify as?
|
| 431 |
+
|
| 432 |
+
Man
|
| 433 |
+
Woman
|
| 434 |
+
Non-binary
|
| 435 |
+
○ Prefer not to say
|
| 436 |
+
|
| 437 |
+
Are you of Spanish or Latino origin?
|
| 438 |
+
|
| 439 |
+
Yes
|
| 440 |
+
No
|
| 441 |
+
○ Prefer not to say
|
| 442 |
+
|
| 443 |
+
Which of the following best describes you?
|
| 444 |
+
|
| 445 |
+
Asian
|
| 446 |
+
Black or African American
|
| 447 |
+
Native American or Alaska Native
|
| 448 |
+
$\bigcirc$ Native Hawaiian or Pacific Islander
|
| 449 |
+
White
|
| 450 |
+
$\bigcirc$ Prefer not to say
|
| 451 |
+
|
| 452 |
+
What is your religious affiliation?
|
| 453 |
+
|
| 454 |
+
Christianity
|
| 455 |
+
O Islam
|
| 456 |
+
Judaism
|
| 457 |
+
○ Hinduism
|
| 458 |
+
Buddhism
|
| 459 |
+
None/no religion
|
| 460 |
+
Other
|
| 461 |
+
$\bigcirc$ Prefer not to say
|
| 462 |
+
|
| 463 |
+
Do you consider yourself to have a disability?
|
| 464 |
+
|
| 465 |
+
Yes, I have a disability.
|
| 466 |
+
No, I do not have a disability.
|
| 467 |
+
○ Prefer not to say
|
| 468 |
+
|
| 469 |
+
Figure E.3: Demographic questions within our Amazon Mechanical Turk qualification task.
|
| 470 |
+
|
| 471 |
+
# 5. Qualification Questions
|
| 472 |
+
|
| 473 |
+
1. What is a counter narrative?
|
| 474 |
+
|
| 475 |
+
a. A response that supports the claims made within the hate speech example
|
| 476 |
+
b. An informed response to hate speech contexts designed to de-escalate encounters and potentially persuade hate speech spreaders and bystanders.
|
| 477 |
+
c. An angry comment insulting the original hate speech spreader
|
| 478 |
+
$\bigcirc$ d. An educated response to hate speech situations aimed at intensifying conflicts and retaliating against hate speech spreaders
|
| 479 |
+
|
| 480 |
+
2. What is the expected output for this task?
|
| 481 |
+
|
| 482 |
+
a. A counter narrative response to the given hate speech example
|
| 483 |
+
b. An evaluation score from 1 to 100 based solely on your own personal criteria for an effective counter narrative response
|
| 484 |
+
c. An evaluation score from 1 to 5 stars based on a specified aspect definition
|
| 485 |
+
$\bigcirc$ d. An explanation to justify the evaluation score you provided
|
| 486 |
+
e.B and D
|
| 487 |
+
f.C and D
|
| 488 |
+
|
| 489 |
+
3. Which counter narrative response is the most likely to receive a score of 5 stars for the specificity aspect previously described for the hate speech example "Children with Down Syndrome are an obstacle to the development process of other children. They are slow learners and therefore should be isolated."?
|
| 490 |
+
|
| 491 |
+
Children with Down Syndrome are valuable members of our society and should be included in all aspects of life.
|
| 492 |
+
b. "Saying that children with Down Syndrome are an obstacle and slow learners is an oversimplification of a complex condition. Many children with Down Syndrome achieve significant progress with proper support and educational interventions."
|
| 493 |
+
c. "Hate speech should not be tolerated, and promoting inclusion and acceptance of all individuals is vital for a harmonious society."
|
| 494 |
+
$\bigcirc$ d. "Disabilities should not be a reason for exclusion or isolation."
|
| 495 |
+
|
| 496 |
+
4. Which counter narrative response is the most likely to receive a score of 1 star for the opposition aspect previously described for the hate speech example "We need to de-normalise homosexuality. It is harmful to children?"
|
| 497 |
+
|
| 498 |
+
a. "Every individual, regardless of sexual orientation, deserves respect."
|
| 499 |
+
b. "Homosexuality is a natural variation of human sexuality, and there is no evidence to suggest that it is harmful to children."
|
| 500 |
+
c. "Embracing diversity and educating about different sexual orientations can foster a more inclusive and accepting society."
|
| 501 |
+
$\bigcirc$ d. "Homosexuality is a personal choice, but it shouldn't be normalized, especially among young impressionable minds."
|
| 502 |
+
|
| 503 |
+
are less healthy. They are less safe."?
|
| 504 |
+
|
| 505 |
+
a. "Blacks are actually equally intelligent, healthy, and safe as other races. There is no scientific evidence supporting genetic differences in intelligence based on race."
|
| 506 |
+
b. In ancient times, all races were equally intelligent, healthy, and safe. Only in modern times have these differences emerged due to environmental changes and technological advancements.
|
| 507 |
+
c. "Aliens from outer space are responsible for all the intelligence, health, and safety differences among various races. They have been manipulating our genetics for centuries."
|
| 508 |
+
$\bigcirc$ d. "Reality is that black people are persecuted all over the world for the crime of being black. I don't see how this could be said for white people."
|
| 509 |
+
|
| 510 |
+
Figure E.4: Qualification questions within our Amazon Mechanical Turk qualification task.
|
| 511 |
+
|
| 512 |
+
# 6. Qualification Tasks
|
| 513 |
+
|
| 514 |
+
# Task 1
|
| 515 |
+
|
| 516 |
+
Hate Speech: Immigrants steal our job
|
| 517 |
+
|
| 518 |
+
Counter Narrative: Actually, many of the jobs that they steal are highly complex and demanding, and few natives want to do them.
|
| 519 |
+
|
| 520 |
+
Target: Migrants
|
| 521 |
+
|
| 522 |
+
Aspect: Opposition - how much the counter narrative opposes and contradicts the hate speech example through the expression of an opposing sentiment regardless of the argument's effectiveness or persuasiveness, with 1 star meaning support of the example and 5 stars meaning "perfect opposition" to the example.
|
| 523 |
+
|
| 524 |
+
Score:
|
| 525 |
+
|
| 526 |
+
1 star
|
| 527 |
+
2 stars
|
| 528 |
+
3 stars
|
| 529 |
+
4 stars
|
| 530 |
+
5 stars
|
| 531 |
+
|
| 532 |
+
Provide an explanation to justify your score:
|
| 533 |
+
|
| 534 |
+
# Task 2
|
| 535 |
+
|
| 536 |
+
Hate Speech: Women have no place in a free and open society. They should just do their housework and raise children without complaining.
|
| 537 |
+
|
| 538 |
+
Counter Narrative: Women have other choices than just housework and motherhood. They are human beings with freedom of choice who deserve our respect and admiration.
|
| 539 |
+
|
| 540 |
+
Target: Women
|
| 541 |
+
|
| 542 |
+
Aspect: Semantic Relatedness - the association between the counter narrative response and hate speech example based on contextual or semantic similarity, with 1 star meaning not related and 5 stars meaning "perfect relatedness."
|
| 543 |
+
|
| 544 |
+
Score:
|
| 545 |
+
|
| 546 |
+
1 star
|
| 547 |
+
2 stars
|
| 548 |
+
3 stars
|
| 549 |
+
4 stars
|
| 550 |
+
5 stars
|
| 551 |
+
|
| 552 |
+
Provide an explanation to justify your score:
|
| 553 |
+
|
| 554 |
+
# Task 3
|
| 555 |
+
|
| 556 |
+
Hate Speech: Jews make up the world's criminals.
|
| 557 |
+
|
| 558 |
+
Counter Narrative: Such anti-Semitism can seem amusing, but can you imagine if the same thing were said about Christians? It would be just as untrue.
|
| 559 |
+
|
| 560 |
+
Target: Jews
|
| 561 |
+
|
| 562 |
+
Aspect: Specificity - how much the counter narrative presents focused and specific arguments that effectively counter the key ideas within the hate speech example through the use of in-depth arguments, nuanced reasoning, and supporting evidence, with 1 star meaning the response is general and 5 stars meaning "perfect specificity."
|
| 563 |
+
|
| 564 |
+
Score:
|
| 565 |
+
|
| 566 |
+
1 star
|
| 567 |
+
2 stars
|
| 568 |
+
3 stars
|
| 569 |
+
4 stars
|
| 570 |
+
5 stars
|
| 571 |
+
|
| 572 |
+
Provide an explanation to justify your score:
|
| 573 |
+
|
| 574 |
+
Figure E.5: Qualification tasks within our Amazon Mechanical Turk qualification task.
|
| 575 |
+
|
| 576 |
+
<table><tr><td colspan="8">Average AMT Scores</td></tr><tr><td>Generation Model</td><td>Opposition</td><td>Relatedness</td><td>Specificity</td><td>Toxicity</td><td>Fluency</td><td>Aspect Average</td><td>Overall</td></tr><tr><td>DiaLoGPT</td><td>2.76</td><td>3.22</td><td>1.88</td><td>3.58</td><td>3.81</td><td>3.05</td><td>2.04</td></tr><tr><td>ChatGPT</td><td>4.78</td><td>4.71</td><td>4.18</td><td>4.64</td><td>4.77</td><td>4.62</td><td>4.36</td></tr><tr><td>Vicuna-33b v1.3</td><td>4.44</td><td>4.54</td><td>3.98</td><td>4.86</td><td>4.34</td><td>4.43</td><td>4.02</td></tr></table>
|
| 577 |
+
|
| 578 |
+
Table A.1: Average score given to counter narratives generated from each generation model from AMT annotators.
|
| 579 |
+
|
| 580 |
+
<table><tr><td></td><td>AMT Demographic Info</td></tr><tr><td>Age</td><td>35-45 (53.8%), 30-35 (23.1%), 18-25 (15.3%), 45-55 (7.7%), 25-30 (0%), 55+ (0%), Prefer not to say (0%)</td></tr><tr><td>Gender</td><td>Women (53.8%), Men (46.2%), Non-binary (0%), Prefer not to say (0%)</td></tr><tr><td>Ethnicity</td><td>Non-Hispanic/Latino (76.9%), Hispanic/Latino (33.1%), Prefer not to say (0%)</td></tr><tr><td>Race</td><td>White (76.9%), Black (7.7%), Asian (7.7%), Prefer not to say (7.7%), Native American (0%), Pacific Islander (0%)</td></tr><tr><td>Religion</td><td>None (69.2%), Christian (30.8%), Muslim (0%), Jewish (0%), Hindu (0%), Buddhist (0%), Other (0%), Prefer not to say (0%)</td></tr><tr><td>Disability</td><td>No Disability (92.3%), Disability (7.7%), Prefer not to say (0%)</td></tr></table>
|
| 581 |
+
|
| 582 |
+
Table E.1: Demographic information for workers within our Amazon Mechanical Turk study.
|
| 583 |
+
|
| 584 |
+
<table><tr><td colspan="7">Evaluation Metric Correlations (All Models)</td></tr><tr><td rowspan="2">Metric</td><td colspan="3">AMT Multi-aspect</td><td colspan="3">AMT Overall</td></tr><tr><td>Pear.</td><td>Spear.</td><td>Kend.</td><td>Pear.</td><td>Spear.</td><td>Kend.</td></tr><tr><td>BLEU1</td><td>-0.041</td><td>-0.102</td><td>-0.071</td><td>-0.048</td><td>-0.083</td><td>-0.06</td></tr><tr><td>BLEU3</td><td>0.014</td><td>-0.085</td><td>-0.075</td><td>0.001</td><td>-0.083</td><td>-0.071</td></tr><tr><td>BLEU4</td><td>-0.032</td><td>-0.187</td><td>-0.141</td><td>-0.04</td><td>-0.187</td><td>-0.143</td></tr><tr><td>ROUGE-L</td><td>-0.052</td><td>-0.111</td><td>-0.079</td><td>-0.092</td><td>-0.122</td><td>-0.087</td></tr><tr><td>METEOR</td><td>0.432</td><td>0.386</td><td>0.260</td><td>0.426</td><td>0.403</td><td>0.279</td></tr><tr><td>BERTScore</td><td>-0.099</td><td>-0.092</td><td>-0.062</td><td>-0.102</td><td>-0.089</td><td>-0.063</td></tr><tr><td>BARTScore</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>-0.609</td><td>-0.617</td><td>-0.430</td><td>-0.638</td><td>-0.629</td><td>-0.451</td></tr><tr><td>- Recall</td><td>0.581</td><td>0.565</td><td>0.405</td><td>0.596</td><td>0.564</td><td>0.417</td></tr><tr><td>- F1</td><td>-0.441</td><td>-0.487</td><td>-0.330</td><td>-0.469</td><td>-0.497</td><td>-0.343</td></tr><tr><td>BARTScore+CNN</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>0.332</td><td>0.310</td><td>0.215</td><td>0.336</td><td>0.299</td><td>0.214</td></tr><tr><td>- Recall</td><td>0.038</td><td>0.116</td><td>0.081</td><td>0.045</td><td>0.090</td><td>0.064</td></tr><tr><td>- F1</td><td>0.192</td><td>0.253</td><td>0.171</td><td>0.199</td><td>0.224</td><td>0.158</td></tr><tr><td>BARTScore+CNN+Para</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>-0.142</td><td>-0.115</td><td>-0.073</td><td>-0.133</td><td>-0.118</td><td>-0.075</td></tr><tr><td>- Recall</td><td>0.180</td><td>0.235</td><td>0.166</td><td>0.159</td><td>0.189</td><td>0.135</td></tr><tr><td>- F1</td><td>0.045</td><td>0.106</td><td>0.070</td><td>0.035</td><td>0.072</td><td>0.051</td></tr><tr><td>ChatGPT Multi-Aspect</td><td>0.664</td><td>0.626</td><td>0.481</td><td>0.632</td><td>0.609</td><td>0.475</td></tr><tr><td>ChatGPT Overall</td><td>0.658</td><td>0.633</td><td>0.517</td><td>0.654</td><td>0.624</td><td>0.521</td></tr><tr><td>Vicuna-33b v.1.3 Multi-Aspect</td><td>0.824</td><td>0.782</td><td>0.613</td><td>0.815</td><td>0.771</td><td>0.616</td></tr><tr><td>Vicuna-33b v.1.3 Overall</td><td>0.718</td><td>0.698</td><td>0.544</td><td>0.745</td><td>0.687</td><td>0.544</td></tr><tr><td>GPT-4 Multi-Aspect</td><td>0.806</td><td>0.710</td><td>0.557</td><td>0.762</td><td>0.694</td><td>0.551</td></tr><tr><td>GPT-4 Overall</td><td>0.788</td><td>0.733</td><td>0.597</td><td>0.783</td><td>0.721</td><td>0.600</td></tr><tr><td>Prometheus-13b Multi-Aspect</td><td>0.784</td><td>0.671</td><td>0.510</td><td>0.763</td><td>0.643</td><td>0.495</td></tr><tr><td>Prometheus-13b Overall</td><td>0.679</td><td>0.567</td><td>0.458</td><td>0.667</td><td>0.570</td><td>0.468</td></tr></table>
|
| 585 |
+
|
| 586 |
+
Table G.1: Correlation of evaluation metric and AMT scores for the entire evaluation set; best correlation is in bold, second is underlined.
|
| 587 |
+
|
| 588 |
+
<table><tr><td colspan="7">Evaluation Metric Correlations (DialoGPT)</td></tr><tr><td rowspan="2">Metric</td><td colspan="3">AMT Multi-aspect</td><td colspan="3">AMT Overall</td></tr><tr><td>Pear.</td><td>Spear.</td><td>Kend.</td><td>Pear.</td><td>Spear.</td><td>Kend.</td></tr><tr><td>BLEU1</td><td>0.220</td><td>0.169</td><td>0.117</td><td>0.357</td><td>0.283</td><td>0.210</td></tr><tr><td>BLEU3</td><td>0.293</td><td>0.287</td><td>0.184</td><td>0.341</td><td>0.417</td><td>0.290</td></tr><tr><td>BLEU4</td><td>0.348</td><td>0.305</td><td>0.208</td><td>0.432</td><td>0.436</td><td>0.311</td></tr><tr><td>ROUGE-L</td><td>0.274</td><td>0.198</td><td>0.136</td><td>0.302</td><td>0.171</td><td>0.12</td></tr><tr><td>METEOR</td><td>0.342</td><td>0.315</td><td>0.202</td><td>0.398</td><td>0.369</td><td>0.259</td></tr><tr><td>BERTScore</td><td>0.308</td><td>0.275</td><td>0.185</td><td>0.396</td><td>0.328</td><td>0.238</td></tr><tr><td>BARTScore</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>0.012</td><td>-0.032</td><td>-0.025</td><td>0.095</td><td>0.036</td><td>0.025</td></tr><tr><td>- Recall</td><td>0.228</td><td>0.186</td><td>0.122</td><td>0.277</td><td>0.202</td><td>0.142</td></tr><tr><td>- F1</td><td>0.262</td><td>0.238</td><td>0.169</td><td>0.395</td><td>0.350</td><td>0.259</td></tr><tr><td>BARTScore+CNN</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>0.271</td><td>0.269</td><td>0.183</td><td>0.342</td><td>0.315</td><td>0.222</td></tr><tr><td>- Recall</td><td>-0.065</td><td>-0.156</td><td>-0.116</td><td>-0.017</td><td>-0.091</td><td>-0.058</td></tr><tr><td>- F1</td><td>0.118</td><td>0.032</td><td>0.013</td><td>0.201</td><td>0.098</td><td>0.068</td></tr><tr><td>BARTScore+CNN+Para</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>0.207</td><td>0.176</td><td>0.108</td><td>0.288</td><td>0.202</td><td>0.153</td></tr><tr><td>- Recall</td><td>0.037</td><td>0.058</td><td>0.052</td><td>0.028</td><td>0.022</td><td>0.021</td></tr><tr><td>- F1</td><td>0.163</td><td>0.131</td><td>0.095</td><td>0.211</td><td>0.128</td><td>0.100</td></tr><tr><td>ChatGPT Multi-Aspect</td><td>0.435</td><td>0.377</td><td>0.269</td><td>0.398</td><td>0.404</td><td>0.303</td></tr><tr><td>ChatGPT Overall</td><td>0.248</td><td>0.229</td><td>0.169</td><td>0.232</td><td>0.239</td><td>0.190</td></tr><tr><td>Vicuna-33b v.1.3 Multi-Aspect</td><td>0.427</td><td>0.436</td><td>0.320</td><td>0.370</td><td>0.371</td><td>0.276</td></tr><tr><td>Vicuna-33b v1.3 Overall</td><td>-0.109</td><td>-0.068</td><td>-0.056</td><td>-0.124</td><td>-0.075</td><td>-0.068</td></tr><tr><td>GPT-4 Multi-Aspect</td><td>0.740</td><td>0.753</td><td>0.581</td><td>0.635</td><td>0.694</td><td>0.543</td></tr><tr><td>GPT-4 Overall</td><td>0.631</td><td>0.653</td><td>0.526</td><td>0.585</td><td>0.638</td><td>0.537</td></tr><tr><td>Prometheus-13b Multi-Aspect</td><td>0.410</td><td>0.455</td><td>0.330</td><td>0.362</td><td>0.441</td><td>0.332</td></tr><tr><td>Prometheus-13b Overall</td><td>0.321</td><td>0.333</td><td>0.267</td><td>0.333</td><td>0.390</td><td>0.320</td></tr></table>
|
| 589 |
+
|
| 590 |
+
Table G.2: Correlation of evaluation metric scores to AMT-generated evaluation scores specifically for DialoGPT-generated candidates; best correlation is in bold, second is underlined.
|
| 591 |
+
|
| 592 |
+
<table><tr><td colspan="7">Evaluation Metric Correlations (ChatGPT)</td></tr><tr><td rowspan="2">Metric</td><td colspan="3">AMT Multi-aspect</td><td colspan="3">AMT Overall</td></tr><tr><td>Pear.</td><td>Spear.</td><td>Kend.</td><td>Pear.</td><td>Spear.</td><td>Kend.</td></tr><tr><td>BLEU1</td><td>-0.078</td><td>-0.167</td><td>-0.125</td><td>-0.113</td><td>-0.157</td><td>-0.118</td></tr><tr><td>BLEU3</td><td>0.221</td><td>0.074</td><td>0.025</td><td>0.135</td><td>0.041</td><td>0.014</td></tr><tr><td>BLEU4</td><td>0.189</td><td>0.063</td><td>0.012</td><td>0.106</td><td>0.035</td><td>0.008</td></tr><tr><td>ROUGE-L</td><td>0.040</td><td>0.000</td><td>-0.001</td><td>0.003</td><td>0.014</td><td>0.015</td></tr><tr><td>METEOR</td><td>0.091</td><td>-0.002</td><td>-0.004</td><td>0.038</td><td>0.002</td><td>-0.006</td></tr><tr><td>BERTScore</td><td>0.140</td><td>0.170</td><td>0.117</td><td>0.135</td><td>0.167</td><td>0.112</td></tr><tr><td>BARTScore</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>-0.125</td><td>-0.175</td><td>-0.123</td><td>-0.079</td><td>-0.126</td><td>-0.089</td></tr><tr><td>- Recall</td><td>0.156</td><td>0.165</td><td>0.119</td><td>0.071</td><td>0.133</td><td>0.094</td></tr><tr><td>- F1</td><td>-0.081</td><td>-0.145</td><td>-0.105</td><td>-0.058</td><td>-0.124</td><td>-0.084</td></tr><tr><td>BARTScore+CNN</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>0.268</td><td>0.292</td><td>0.212</td><td>0.246</td><td>0.246</td><td>0.191</td></tr><tr><td>- Recall</td><td>0.288</td><td>0.305</td><td>0.223</td><td>0.204</td><td>0.229</td><td>0.176</td></tr><tr><td>- F1</td><td>0.325</td><td>0.339</td><td>0.232</td><td>0.243</td><td>0.256</td><td>0.185</td></tr><tr><td>BARTScore+CNN+Para</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>0.205</td><td>0.263</td><td>0.190</td><td>0.186</td><td>0.229</td><td>0.173</td></tr><tr><td>- Recall</td><td>0.273</td><td>0.282</td><td>0.184</td><td>0.182</td><td>0.212</td><td>0.149</td></tr><tr><td>- F1</td><td>0.291</td><td>0.318</td><td>0.219</td><td>0.212</td><td>0.243</td><td>0.173</td></tr><tr><td>ChatGPT Multi-Aspect</td><td>0.174</td><td>0.136</td><td>0.105</td><td>0.115</td><td>0.096</td><td>0.077</td></tr><tr><td>ChatGPT Overall</td><td>0.196</td><td>0.101</td><td>0.086</td><td>0.13</td><td>0.075</td><td>0.067</td></tr><tr><td>Vicuna-33b v.1.3 Multi-Aspect</td><td>0.295</td><td>0.287</td><td>0.218</td><td>0.287</td><td>0.259</td><td>0.215</td></tr><tr><td>Vicuna-33b v.1.3 Overall</td><td>0.138</td><td>0.09</td><td>0.077</td><td>0.067</td><td>0.043</td><td>0.038</td></tr><tr><td>GPT-4 Multi-Aspect</td><td>0.419</td><td>0.274</td><td>0.228</td><td>0.418</td><td>0.204</td><td>0.178</td></tr><tr><td>GPT-4 Overall</td><td>-0.006</td><td>0.001</td><td>0.001</td><td>-0.089</td><td>-0.091</td><td>-0.082</td></tr><tr><td>Prometheus-13b Multi-Aspect</td><td>0.298</td><td>0.272</td><td>0.208</td><td>0.222</td><td>0.187</td><td>0.154</td></tr><tr><td>Prometheus-13b Overall</td><td>0.136</td><td>0.107</td><td>0.091</td><td>0.066</td><td>0.086</td><td>0.076</td></tr></table>
|
| 593 |
+
|
| 594 |
+
Table G.3: Correlation of evaluation metric scores to AMT-generated evaluation scores specifically for ChatGPT-generated candidates; best correlation is in bold, second is underlined.
|
| 595 |
+
|
| 596 |
+
<table><tr><td colspan="7">Evaluation Metric Correlations (Vicuna v1.3)</td></tr><tr><td rowspan="2">Metric</td><td colspan="3">AMT</td><td colspan="3">AMT Overall</td></tr><tr><td>Pear.</td><td>Spear.</td><td>Kend.</td><td>Pear.</td><td>Spear.</td><td>Kend.</td></tr><tr><td>BLEU1</td><td>-0.054</td><td>-0.155</td><td>-0.096</td><td>-0.159</td><td>-0.214</td><td>-0.143</td></tr><tr><td>BLEU3</td><td>-0.022</td><td>-0.055</td><td>-0.035</td><td>-0.006</td><td>-0.108</td><td>-0.074</td></tr><tr><td>BLEU4</td><td>-0.055</td><td>-0.064</td><td>-0.041</td><td>-0.042</td><td>-0.129</td><td>-0.092</td></tr><tr><td>ROUGE-L</td><td>-0.036</td><td>-0.135</td><td>-0.104</td><td>-0.147</td><td>-0.247</td><td>-0.166</td></tr><tr><td>METEOR</td><td>0.139</td><td>0.019</td><td>0.011</td><td>0.127</td><td>0.054</td><td>0.032</td></tr><tr><td>BERTScore</td><td>0.229</td><td>0.174</td><td>0.133</td><td>0.181</td><td>0.139</td><td>0.099</td></tr><tr><td>BARTScore</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>-0.218</td><td>-0.170</td><td>-0.104</td><td>-0.328</td><td>-0.298</td><td>-0.211</td></tr><tr><td>- Recall</td><td>0.442</td><td>0.300</td><td>0.205</td><td>0.464</td><td>0.356</td><td>0.266</td></tr><tr><td>- F1</td><td>-0.089</td><td>-0.110</td><td>-0.063</td><td>-0.212</td><td>-0.235</td><td>-0.159</td></tr><tr><td>BARTScore+CNN</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>0.291</td><td>0.219</td><td>0.158</td><td>0.215</td><td>0.145</td><td>0.118</td></tr><tr><td>- Recall</td><td>0.192</td><td>0.279</td><td>0.200</td><td>0.145</td><td>0.167</td><td>0.125</td></tr><tr><td>- F1</td><td>0.294</td><td>0.327</td><td>0.232</td><td>0.219</td><td>0.223</td><td>0.159</td></tr><tr><td>BARTScore+CNN+Para</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>- Precision</td><td>0.159</td><td>0.202</td><td>0.135</td><td>0.147</td><td>0.163</td><td>0.127</td></tr><tr><td>- Recall</td><td>0.211</td><td>0.210</td><td>0.158</td><td>0.102</td><td>0.061</td><td>0.047</td></tr><tr><td>- F1</td><td>0.233</td><td>0.230</td><td>0.163</td><td>0.152</td><td>0.119</td><td>0.094</td></tr><tr><td>ChatGPT Multi-Aspect</td><td>0.266</td><td>0.368</td><td>0.284</td><td>0.214</td><td>0.236</td><td>0.182</td></tr><tr><td>ChatGPT Overall</td><td>0.122</td><td>0.227</td><td>0.189</td><td>0.109</td><td>0.146</td><td>0.124</td></tr><tr><td>Vicuna Multi-Aspect</td><td>0.409</td><td>0.404</td><td>0.321</td><td>0.374</td><td>0.359</td><td>0.293</td></tr><tr><td>Vicuna Overall</td><td>0.250</td><td>0.245</td><td>0.197</td><td>0.308</td><td>0.204</td><td>0.174</td></tr><tr><td>GPT-4 Multi-Aspect</td><td>0.236</td><td>0.313</td><td>0.238</td><td>0.194</td><td>0.223</td><td>0.173</td></tr><tr><td>GPT-4 Overall</td><td>0.122</td><td>0.227</td><td>0.189</td><td>0.109</td><td>0.146</td><td>0.124</td></tr><tr><td>Prometheus-13b Multi-Aspect</td><td>0.172</td><td>0.120</td><td>0.093</td><td>0.104</td><td>0.018</td><td>0.004</td></tr><tr><td>Prometheus-13b Overall</td><td>0.052</td><td>0.125</td><td>0.103</td><td>0.012</td><td>0.060</td><td>0.053</td></tr></table>
|
| 597 |
+
|
| 598 |
+
Table G.4: Correlation of evaluation metric scores to AMT-generated evaluation scores specifically for Vicuna-generated candidates; best correlation is in bold, second is underlined.
|
| 599 |
+
|
| 600 |
+
<table><tr><td></td><td>Multi-aspect Improvement in Prometheus Counter Narrative Evaluation</td></tr><tr><td>Hate Speech</td><td>Jews control the U.S. government.</td></tr><tr><td>Generated Counter Narrative</td><td>The vast majority of Jews are service members in the military and have worked hard to have a better life for themselves and their families.</td></tr><tr><td>AMT Overall Score Average</td><td>2.33 stars</td></tr><tr><td>Prometheus-generated Overall Score and Feedback</td><td>5 stars ... The counter narrative presents a compelling argument that the majority of Jews are service members in the military, which directly contradicts the hateful claim that Jews control the U.S. government ... demonstrates a deep understanding of the topic and presents strong evidence to support its argument ... The response is clear, concise, and comprehensive, showcasing superior reasoning and a respectful approach ...</td></tr><tr><td></td><td>Prometheus Multi-aspect Scores</td></tr><tr><td>Prometheus-generated Opposition Score and Feedback</td><td>1 star ... Instead of challenging the notion that Jews control the U.S. government, the response reinforces the idea by suggesting that Jews are primarily service members in the military ...</td></tr><tr><td>Prometheus-generated Specificity Score and Feedback</td><td>Specificity - 1 star ... The counter narrative does not provide any in-depth arguments, nuanced reasoning, or supporting evidence to refute the claim that Jews control the U.S. government.</td></tr></table>
|
| 601 |
+
|
| 602 |
+
Table H.1: Example of improvement in Prometheus evaluation through the use of our multi-aspect framework; Prometheus initially gives a misaligned Overall score by overrating Opposition and Specificity but corrects these ratings when employing our multi-aspect framework.
|
| 603 |
+
|
| 604 |
+
<table><tr><td></td><td>Error - Misunderstanding Counter Narrative</td></tr><tr><td>Hate Speech</td><td>Muslims have no place in civilised Europe.</td></tr><tr><td>DialoGPT Generated Counter Narrative</td><td>Muslims are the second largest religious group in the European Union, second only to Jews and Hindus.</td></tr><tr><td>Human Opposition Score and Feedback</td><td>1 star. The counter narrative does nothing to refute the claim that Muslims do not belong in Europe, and merely says that they are already there, which will not be convincing in the least to someone who makes hateful claims.</td></tr><tr><td>ChatGPT-generated Opposition Score and Feedback</td><td>4 stars ... It highlights that Muslims are a significant religious group in the European Union, which directly opposes the claim that they have no place in civilized Europe.</td></tr><tr><td>GPT4-generated Opposition Score and Feedback</td><td>2 stars ... The response does not address the sentiment of the hate speech that Muslims do not belong in Europe, it merely states their presence.</td></tr></table>
|
| 605 |
+
|
| 606 |
+
Table H.2: Example of a LLM evaluator providing misaligned evaluation due to misunderstanding the relationship between the counter narrative response and hate speech example; ChatGPT, Vicuna, and Prometheus are more prone to misunderstanding DialogoGPT examples in this way.
|
| 607 |
+
|
| 608 |
+
<table><tr><td></td><td>Error - Conflating Aspects</td></tr><tr><td>Hate Speech</td><td>Jews control the U.S. government.</td></tr><tr><td>Vicuna Generated Counter Narrative (shortened)</td><td>1. Jews are a diverse group of people, just like any other religious or ethnic group, and they don’t have any special power to control the U.S. government ... 4. It’s important to have open and respectful conversations about government policies and actions without making unfounded accusations or spreading hateful stereotypes.</td></tr><tr><td>Human Toxicity Score and Feedback</td><td>5 stars. The counter makes its arguments in a clear, respectful manner that should not offend anyone.</td></tr><tr><td>GPT4-generated Toxicity Score and Feedback</td><td>4 stars ... Although there is room for improvement in terms of providing more specific examples or evidence, the overall response is constructive and promotes understanding.</td></tr></table>
|
| 609 |
+
|
| 610 |
+
Table H.3: Example of a LLM evaluator providing misaligned evaluation for one aspect due to including information related to another aspect within score; GPT-4 is more prone to this error when evaluating Vicuna-generated candidates.
|
| 611 |
+
|
| 612 |
+
<table><tr><td></td><td>GPT-4 Error - Underrating Vicuna</td></tr><tr><td>Hate Speech</td><td>Islam is not a peaceful religion, it is full of hate and violence.</td></tr><tr><td>Vicuna Generated Counter Narrative (shortened)</td><td>1. Islam is a religion of peace, and it is important to recognize that the actions of a few do not represent the entire faith. 2. The Quran teaches peace, compassion, and mercy, and there are many verses promoting these values. 3. Many Muslims around the world are working to promote peace, justice, and understanding among people of all faiths and backgrounds ...</td></tr><tr><td>Human Specificity Score and Feedback</td><td>5 stars. The hate speech is about how Islam is hateful and violent. The counter goes over several points to refute that, from what the religion itself actually teaches to the large amounts of peaceful, nonviolent things its adherents promote in reality.</td></tr><tr><td>GPT4-generated Toxicity Score and Feedback</td><td>2 stars. The counter narrative response provides some degree of specificity ... However, the arguments presented are relatively broad and lack depth ...</td></tr></table>
|
| 613 |
+
|
| 614 |
+
Table H.4: Example of a GPT-4 providing misaligned evaluation due to underring Vicuna-generated candidates relative to AMT annotation.
|
| 615 |
+
|
| 616 |
+
<table><tr><td colspan="8">Average AMT Scores</td></tr><tr><td>Generation Model</td><td>Evaluation Approach</td><td>Opposition</td><td>Relatedness</td><td>Specificity</td><td>Toxicity</td><td>Fluency</td><td>Overall</td></tr><tr><td>DialoGPT</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td>Human</td><td>2.76</td><td>3.22</td><td>1.88</td><td>3.58</td><td>3.81</td><td>2.04</td></tr><tr><td></td><td>LLM Evaluators</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td>- GPT-4</td><td>2.35 (-0.41)</td><td>2.88 (-0.34)</td><td>1.68 (-0.20)</td><td>4.33 (+0.75)</td><td>2.88 (-0.93)</td><td>1.82 (-0.22)</td></tr><tr><td></td><td>- ChatGPT</td><td>3.18 (+0.42)</td><td>3.50 (+0.28)</td><td>2.35 (+0.47)</td><td>3.38 (-0.20)</td><td>2.92 (-0.89)</td><td>2.47 (+0.43)</td></tr><tr><td></td><td>- Vicuna-33b v1.3</td><td>2.40 (-0.36)</td><td>2.47 (-0.75)</td><td>1.58 (-0.30)</td><td>3.48 (-0.10)</td><td>3.15 (-0.66)</td><td>1.42 (-0.62)</td></tr><tr><td></td><td>- Prometheus-13b</td><td>1.43 (-1.33)</td><td>1.83 (-1.39)</td><td>1.55 (-0.33)</td><td>3.53 (-0.05)</td><td>3.07 (-0.74)</td><td>2.45 (+0.41)</td></tr><tr><td>ChatGPT</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td>Human</td><td>4.78</td><td>4.71</td><td>4.18</td><td>4.64</td><td>4.77</td><td>4.36</td></tr><tr><td></td><td>LLM Evaluators</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td>- GPT-4</td><td>4.95 (+0.17)</td><td>4.95 (+0.24)</td><td>3.70 (-0.48)</td><td>5.00 (+0.36)</td><td>5.00 (+0.23)</td><td>4.85 (+0.49)</td></tr><tr><td></td><td>- ChatGPT</td><td>4.02 (-0.76)</td><td>4.13 (-0.58)</td><td>3.42 (-0.76)</td><td>4.15 (-0.49)</td><td>4.02 (-0.75)</td><td>3.88 (-0.48)</td></tr><tr><td></td><td>- Vicuna-33b v1.3</td><td>5.00 (+0.22)</td><td>4.78 (+0.07)</td><td>3.95 (-0.23)</td><td>5.00 (+0.36)</td><td>5.00 (+0.23)</td><td>4.63 (+0.27)</td></tr><tr><td></td><td>- Prometheus-13b</td><td>4.20 (-0.58)</td><td>4.92 (+0.21)</td><td>4.03 (-0.15)</td><td>4.97 (-0.33)</td><td>4.33 (-0.44)</td><td>4.82 (-0.46)</td></tr><tr><td>Vicuna-33b v1.3</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td>Human</td><td>4.44</td><td>4.54</td><td>3.98</td><td>4.86</td><td>4.34</td><td>4.02</td></tr><tr><td></td><td>LLM Evaluators</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td>- GPT-4</td><td>3.90 (-0.54)</td><td>4.03 (-0.51)</td><td>3.13 (-0.85)</td><td>4.05 (-0.81)</td><td>3.72 (-0.62)</td><td>3.55 (-0.47)</td></tr><tr><td></td><td>- ChatGPT</td><td>3.92 (-0.52)</td><td>4.05 (-0.49)</td><td>3.13 (-0.85)</td><td>4.05 (-0.81)</td><td>3.70 (-0.64)</td><td>3.57 (-0.45)</td></tr><tr><td></td><td>- Vicuna-33b v1.3</td><td>4.95 (+0.51)</td><td>4.48 (-0.06)</td><td>3.32 (-0.66)</td><td>4.72 (-0.14)</td><td>4.60 (+0.26)</td><td>3.92 (-0.10)</td></tr><tr><td></td><td>- Prometheus-13b</td><td>4.05 (-0.39)</td><td>5.00 (-0.46)</td><td>3.95 (-0.03)</td><td>5.00 (-0.14)</td><td>4.33 (-0.01)</td><td>4.77 (-0.75)</td></tr></table>
|
| 617 |
+
|
| 618 |
+
Table I.1: Average score given to counter narratives generated by each generation model used in our evaluation set including average scores given from each LLM evaluator.
|
| 619 |
+
|
| 620 |
+
<table><tr><td colspan="9">Mean Absolute Error</td></tr><tr><td>Generation Model</td><td>Evaluation Approach</td><td>Opposition</td><td>Relatedness</td><td>Specificity</td><td>Toxicity</td><td>Fluency</td><td>Aspect Average</td><td>Overall</td></tr><tr><td>DialoGPT</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td>GPT-4</td><td>0.77</td><td>1.01</td><td>0.54</td><td>0.91</td><td>1.15</td><td>0.52</td><td>0.53</td></tr><tr><td></td><td>ChatGPT</td><td>1.02</td><td>1.03</td><td>0.9</td><td>0.91</td><td>1.26</td><td>0.66</td><td>0.87</td></tr><tr><td></td><td>Vicuna-33b v1.3</td><td>1.01</td><td>1.2</td><td>0.79</td><td>0.83</td><td>1.15</td><td>0.74</td><td>0.95</td></tr><tr><td></td><td>Prometheus-13b</td><td>1.48</td><td>2.18</td><td>0.97</td><td>1.07</td><td>1.36</td><td>1.09</td><td>1.33</td></tr><tr><td>ChatGPT</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td>GPT-4</td><td>0.21</td><td>0.29</td><td>0.67</td><td>0.35</td><td>0.23</td><td>0.22</td><td>0.66</td></tr><tr><td></td><td>ChatGPT</td><td>0.81</td><td>0.73</td><td>0.9</td><td>0.69</td><td>0.75</td><td>0.7</td><td>0.64</td></tr><tr><td></td><td>Vicuna-33b v1.3</td><td>0.22</td><td>0.39</td><td>0.7</td><td>0.36</td><td>0.23</td><td>0.25</td><td>0.61</td></tr><tr><td></td><td>Prometheus-13b</td><td>0.68</td><td>0.25</td><td>0.69</td><td>0.37</td><td>0.57</td><td>0.32</td><td>0.62</td></tr><tr><td>Vicuna-33b v1.3</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td>GPT-4</td><td>0.75</td><td>0.71</td><td>1.2</td><td>0.92</td><td>0.89</td><td>0.73</td><td>0.77</td></tr><tr><td></td><td>ChatGPT</td><td>0.74</td><td>0.69</td><td>1.19</td><td>0.92</td><td>0.89</td><td>0.73</td><td>0.76</td></tr><tr><td></td><td>Vicuna-33b v1.3</td><td>0.57</td><td>0.59</td><td>0.99</td><td>0.38</td><td>0.44</td><td>0.3</td><td>0.82</td></tr><tr><td></td><td>Prometheus-13b</td><td>0.84</td><td>0.46</td><td>0.99</td><td>0.14</td><td>0.49</td><td>0.41</td><td>0.91</td></tr><tr><td>All Models</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td>GPT-4</td><td>0.58</td><td>0.67</td><td>0.81</td><td>0.73</td><td>0.76</td><td>0.49</td><td>0.65</td></tr><tr><td></td><td>ChatGPT</td><td>0.86</td><td>0.82</td><td>1</td><td>0.84</td><td>0.97</td><td>0.69</td><td>0.76</td></tr><tr><td></td><td>Vicuna-33b v1.3</td><td>0.6</td><td>0.73</td><td>0.83</td><td>0.52</td><td>0.61</td><td>0.43</td><td>0.79</td></tr><tr><td></td><td>Prometheus-13b</td><td>1</td><td>0.96</td><td>0.89</td><td>0.53</td><td>0.81</td><td>0.61</td><td>0.95</td></tr></table>
|
| 621 |
+
|
| 622 |
+
Table I.2: Mean absolute error for scores generated by each LLM evaluator in our study per generation approach as well as for all candidates generated.
|
| 623 |
+
|
| 624 |
+
<table><tr><td colspan="8">Average AMT Scores</td></tr><tr><td>Generation Model</td><td>Opposition</td><td>Relatedness</td><td>Specificity</td><td>Toxicity</td><td>Fluency</td><td>Aspect Average</td><td>Overall</td></tr><tr><td>DialoGPT</td><td>2.76 ± 1.33</td><td>3.22 ± 1.04</td><td>1.88 ± 0.76</td><td>3.58 ± 1.20</td><td>3.81 ± 1.02</td><td>3.05 ± 0.73</td><td>2.04 ± 0.83</td></tr><tr><td>ChatGPT</td><td>4.78 ± 0.35</td><td>4.71 ± 0.54</td><td>4.18 ± 0.72</td><td>4.64 ± 0.47</td><td>4.77 ± 0.29</td><td>4.62 ± 0.32</td><td>4.36 ± 0.60</td></tr><tr><td>Vicuna-33b v1.3</td><td>4.44 ± 0.60</td><td>4.54 ± 0.64</td><td>3.98 ± 0.86</td><td>4.86 ± 0.36</td><td>4.34 ± 0.75</td><td>4.43 ± 0.43</td><td>4.02 ± 0.71</td></tr><tr><td>All Models</td><td>3.99 ± 1.24</td><td>4.16 ± 1.02</td><td>3.34 ± 1.3</td><td>4.36 ± 0.96</td><td>4.31 ± 0.85</td><td>4.03 ± 0.87</td><td>3.47 ± 1.25</td></tr></table>
|
| 625 |
+
|
| 626 |
+
Table I.3: Average score given from AMT workers to counter narratives generated by each generation model used in our evaluation set including standard deviation.
|
amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:26b7bdf8e4c8b92e6c08f09dd0e114d40ef08ec950d4576ef04faa51b43362ff
|
| 3 |
+
size 1684588
|
amultiaspectframeworkforcounternarrativeevaluationusinglargelanguagemodels/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:34814d7ba114cb9d09e41f66b073d336bc876dd708bca483593450869490f7bb
|
| 3 |
+
size 625493
|
arithmeticreasoningwithllmprologgenerationpermutation/5a089955-c71e-4ee9-b652-0504dd5f9383_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:683060ae19538a2729976a15c841877e52e249b8e721fbd8631e766fad6eb1d2
|
| 3 |
+
size 69876
|
arithmeticreasoningwithllmprologgenerationpermutation/5a089955-c71e-4ee9-b652-0504dd5f9383_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6ca6e5a0dd1bbe165607f06a029ab27c850511b759949664691a55e49ed5c84d
|
| 3 |
+
size 85552
|
arithmeticreasoningwithllmprologgenerationpermutation/5a089955-c71e-4ee9-b652-0504dd5f9383_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1007d8ba63662ab4011999ea126373e36d88b1b6beedbfca73c9798ae60ef8f8
|
| 3 |
+
size 596188
|
arithmeticreasoningwithllmprologgenerationpermutation/full.md
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Arithmetic Reasoning with LLM: Prolog Generation & Permutation
|
| 2 |
+
|
| 3 |
+
Xiaocheng Yang and Bingsen Chen and Yik-Cheung Tam
|
| 4 |
+
|
| 5 |
+
Shanghai Frontiers Science Center of Artificial Intelligence and Deep Learning
|
| 6 |
+
|
| 7 |
+
New York University Shanghai
|
| 8 |
+
|
| 9 |
+
{xy2128,bc3088,yt2267} $@$ nyu.edu
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Instructing large language models (LLMs) to solve elementary school math problems has shown great success using Chain of Thought (CoT). However, the CoT approach relies on an LLM to generate a sequence of arithmetic calculations which can be prone to cascaded calculation errors. We hypothesize that an LLM should focus on extracting predicates and generating symbolic formulas from the math problem description so that the underlying calculation can be done via an external code interpreter. We investigate using LLM to generate Prolog programs to solve mathematical questions. Experimental results show that our Prolog-based arithmetic problem-solving outperforms CoT generation in the GSM8K benchmark across three distinct LLMs. In addition, given the insensitive ordering of predicates and symbolic formulas in Prolog, we propose to permute the ground truth predicates for more robust LLM training via data augmentation.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Large language models (LLMs), with their scaling of model size and data size, have demonstrated impressive performance across various understanding and generation tasks (Brown et al., 2020; Chowdhery et al., 2022; Rae et al., 2021; Thoppilan et al., 2022; Touvron et al., 2023; Almazrouei et al., 2023; Jiang et al., 2023). Nevertheless, such LLMs fall short in addressing mathematical problems that involves arithmetic, commonsense, and symbolic reasoning – topics that may appear deceptively simple to humans (Rae et al., 2021). Existing works leveraged Chain-of-Thought (CoT) reasoning that asks language models to generate both the answer and the step-by-step reasoning chain, which helps break down a complex reasoning task into a sequential thought process (Wei et al., 2022b). Particularly, arithmetic reasoning with CoT is shown to be an emergent ability that language models acquired during the scaling process (Wei et al., 2022a).
|
| 18 |
+
|
| 19 |
+
# Question
|
| 20 |
+
|
| 21 |
+
Raymond and Samantha are cousins. Raymond was born 6 years before Samantha. Raymond had a son at the age of 23. If Samantha is now 31, how many years ago was Raymond's son born?
|
| 22 |
+
|
| 23 |
+
# Ground Truth
|
| 24 |
+
|
| 25 |
+
When Raymond's son was born Samantha was $23 - 6 = 17$ years old. Thus it has been $31 - 17 = 14$ years since Raymond's son was born.
|
| 26 |
+
|
| 27 |
+
Answer: 14
|
| 28 |
+
|
| 29 |
+
# Chain of Thought
|
| 30 |
+
|
| 31 |
+
Samantha is $31 - 6 = 25$ years old. Raymond's son was born $25 - 23 = 2$ years ago.
|
| 32 |
+
|
| 33 |
+
Answer: 2
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
|
| 37 |
+
# Prolog Generation
|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
Figure 1: Overview of Prolog generation for arithmetic reasoning with large language models.
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
|
| 44 |
+
Yet, natural language reasoning is not native to mathematical operations and symbolic manipulations. A line of work has focused on augmenting language models with deterministic computation resources like a calculator (Schick et al., 2023) or program-based tools (Gao et al., 2023; Gou et al., 2023). However, all such methods require a sequential reasoning trajectory, where models need to translate the natural language questions into sequential mathematical or logical operations. Our research probes into the application of Prolog, a logic programming language, in solving the arithmetic reasoning task. Prolog solves arithmetic reasoning tasks by defining an unordered set of predicates and running queries over them. We further explain the unique properties of Prolog in Section 2. In Prolog code generation for arithmetic reasoning, LLMs extract facts and rules in mathematical ques
|
| 45 |
+
|
| 46 |
+
tions and formulate them into Prolog code. If the facts and rules are accurately captured, a Prolog interpreter can precisely solve for a correct answer in a deterministic way.
|
| 47 |
+
|
| 48 |
+
Our research has the following contributions: 1) We curate and open-source the GSM8K-Prolog dataset with a semi-automatic approach, which contains arithmetic reasoning problems and their corresponding Prolog code solutions. 2) Our experiments show that Prolog code generation is consistently better than CoT on the arithmetic reasoning task, indicating that LLM can focus on predicate extractions and rely on an external tool to calculate and perform the logical induction to address mathematical problems. 3) Given the non-sequential nature of predicates in Prolog code, we propose predicate permutation as a data augmentation method and demonstrate its efficacy in robust LLM training.
|
| 49 |
+
|
| 50 |
+

|
| 51 |
+
Original Ground Truth
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
Permuted Ground Truth
|
| 55 |
+
Figure 2: Prolog and permuted Prolog code samples.
|
| 56 |
+
|
| 57 |
+
# 2 Preliminaries: Prolog Language
|
| 58 |
+
|
| 59 |
+
Prolog is a logic programming language, which was initially designed for artificial intelligence and computational linguistics (Clocksin and Mellish, 2003; Bratko, 2012; Covington, 2002). As shown in the upper graph of Figure 2, a Prolog program defines a set of predicates that contains facts and goals. In the example, facts include earn(weng, 12) that declares the hourly salary of Weng, and work(weng, 50) that defines the working minutes of Weng; the goals constitute a rule in the form of solve<answer>: --<goal_1>, <goal_2>, ... A
|
| 60 |
+
|
| 61 |
+
rule is true when all the goals are satisfied. Having all the facts and goals defined in the program, users can make a query to obtain the solutions that make the rule true given all the facts. Moreover, Prolog codes are not sequential like Python, meaning that the order of facts and rules does not alter the result of the program. The lower graph in Figure 2, shows an equivalent sample that permutes the order of the predicates, which produces the same result as the original program.
|
| 62 |
+
|
| 63 |
+
# 3 Method
|
| 64 |
+
|
| 65 |
+
# 3.1 GSM8K-Prolog Dataset
|
| 66 |
+
|
| 67 |
+
To our knowledge, there has not been a dataset for solving mathematical questions with Prolog. We hence curated a dataset based on GSM8K (Cobbe et al., 2021), a popular benchmark of diverse grade school math word problems, in a semi-automatic manner with OpenAI's Text Completion API $^{1}$ . In particular, we used the same dataset splits and questions in GSM8K and prompted GPT-4 to generate the Prolog programs to solve the questions. We then manually corrected some malfunctioning samples. In this manner, we obtained a high-quality corpus with $100\%$ accuracy in terms of the code results. Algorithm 1 describes the detailed pseudocode for creating this dataset. We open-sourced this dataset to the research community with the MIT license. $^{2}$ .
|
| 68 |
+
|
| 69 |
+
# 3.2 PROPER: Prolog Permutation
|
| 70 |
+
|
| 71 |
+
Since Prolog predicates are permutable, inspired by XLNet (Yang et al., 2020) that performs a tokenwise permutation via attention masking, we decided to also use the permutation technique. The XLNet, via the permutation, can attend to tokens on both sides during training and thus can partially obtain the property of autoencoding while maintaining the property of autoregressive modeling. Similarly, PROPER takes advantage of the permutative property of facts and goals in the Prolog programs as indicated in Figure 2. For each original program, we sample $n$ of its permutations and mix them into the dataset. In this way, models can learn to extract predicates in the mathematical questions based on any other predicates regardless of the ordering, which more precisely reflects the nature
|
| 72 |
+
|
| 73 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">Llama-2</td><td colspan="2">CodeLlama</td><td colspan="2">Mistral</td></tr><tr><td>GSM8K</td><td>GSM-HARD</td><td>GSM8K</td><td>GSM-HARD</td><td>GSM8K</td><td>GSM-HARD</td></tr><tr><td>CoT</td><td>33.8%</td><td>12.0%</td><td>37.5%</td><td>13.9%</td><td>58.9%</td><td>30.8%</td></tr><tr><td>Prolog</td><td>41.5%</td><td>32.4%</td><td>55.0%</td><td>41.6%</td><td>66.3%</td><td>50.6%</td></tr><tr><td>PROPER</td><td>51.0%</td><td>37.4%</td><td>59.0%</td><td>45.9%</td><td>70.2%</td><td>54.4%</td></tr></table>
|
| 74 |
+
|
| 75 |
+
Table 1: Accuracy results on the GSM8K and GSM-HARD datasets. We compare regular Prolog generation (Prolog) and PROPER Prolog generation with the CoT baseline (supervised finetuning with LoRA using CoT ground truth labels in the original GSM8K dataset).
|
| 76 |
+
|
| 77 |
+
of the Prolog language. We describe the practical details of permutation in Appendix A.2.
|
| 78 |
+
|
| 79 |
+
# 4 Experiments
|
| 80 |
+
|
| 81 |
+
# 4.1 Setup
|
| 82 |
+
|
| 83 |
+
Dataset We used the GSM8K-Prolog described in Section 3.1. We denote the corpus as $D$ . The training set is $D_{train}$ and the test set is $D_{test}$ . The total corpus size is 8792, where 7473 samples belong to the training set and 1319 belong to the test set. During training, 100 samples were selected from the training set to constitute the validation set. The input format follows the instruction prompt used in Stanford Alpaca (Taori et al., 2023) (See sample prompts in Appendix A.3). We discarded samples that exceeded 512 tokens. Notably, when we used PROPER to augment the dataset, we used slightly altered input prompts for permuted samples because we found that using the same instruction for both the original ground truth codes and the permuted ones degraded the performance of the model. A likely reason is that having multiple correct output tokens for the same input instruction confuses the model. In addition, besides the GSM8K's test set, GSM-HARD (Gao et al., 2023), which replaces the numbers in the GSM8K test set with large numbers and thus makes questions hard for language models, was also used for evaluation.
|
| 84 |
+
|
| 85 |
+
Training We experimented with different LLMs' 7B versions, including Llama2 (Touvron et al., 2023), CodeLlama (Rozière et al., 2023) and Mistral (Jiang et al., 2023). We adopted 8-bit quantization and LoRA (Hu et al., 2021) to finetune models efficiently at a reasonable performance cost. We applied LoRA to finetune query and value weight matrices in the transformer blocks. We experimented with different LoRA rank and alpha settings, including $(r,\alpha) = (8,16)$ , $(16,32)$ , and $(32,64)$ . With more trainable parameters, $r = 32$ , $\alpha = 64$ yielded significantly better results,
|
| 86 |
+
|
| 87 |
+
which we thereby adopted as the configuration for all the experiments. Note that this setting resulted in training only $0.248\%$ of the 7 billion parameters for Llama2 and CodeLlama, and $0.188\%$ of the 7 billion for Mistral. We document our training details and GPU usage in Appendix A.5.
|
| 88 |
+
|
| 89 |
+
Evaluation At inference time, we used beam search with a beam size of 4 to generate the Prolog code. We then used the PySwip library $^{3}$ , a foreign interface of Prolog in Python, as the Prolog interpreter to produce the final answer. We used accuracy as the metric for evaluation. It is defined as
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\mathrm {A c c} = \frac {\sum_ {i = 1} ^ {| D _ {t e s t} |} \mathbb {1} \left\{\mathcal {P} \left(a _ {i} ^ {p r e d}\right) = \mathcal {P} \left(a _ {i} ^ {t r u e}\right) \right\}}{| D _ {t e s t} |} \times 100 \%
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
where $\mathcal{P}$ denotes the Prolog interpreter. Notably, since we noticed that the PySwip library cannot handle decimal answers, we only considered the samples with an integer answer.
|
| 96 |
+
|
| 97 |
+
# 4.2 Results
|
| 98 |
+
|
| 99 |
+
Prolog generation performs consistently better than CoT across three models. According to Table 1, generating Prolog to solve mathematical questions yields significantly more accurate results with a $10.9\%$ margin over the CoT baseline on average across all models on GSM8K. This gap further expands to $22.6\%$ on GSM-HARD, indicating exceptional superiority over CoT when large number calculations are involved. Although Llama-2 and Mistral exhibit large performance gaps when applying CoT reasoning, generating Prolog code produces better results than CoT on both models. This observation indicates that Prolog generation works well regardless of the model's inherent arithmetic reasoning capability. Also, CodeLlama demonstrates a larger performance gain when switching
|
| 100 |
+
|
| 101 |
+
<table><tr><td>Ratio</td><td>Llama-2</td><td>CodeLlama</td><td>Mistral</td></tr><tr><td>1:0</td><td>41.5</td><td>55.0</td><td>66.3</td></tr><tr><td>1:1</td><td>50.9 (49.5)</td><td>58.7 (56.6)</td><td>70.2 (69.1)</td></tr><tr><td>1:2</td><td>51.0 (49.4)</td><td>59.0 (58.3)</td><td>68.8 (66.8)</td></tr></table>
|
| 102 |
+
|
| 103 |
+
Table 2: Accuracy(%) results on GSM8K with different permutation ratios. We report both the best and average accuracy of $1:1$ and $1:2$ over three trials with different randomly permuted data in the form of max (avg). Note that the $1:0$ case essentially means not applying PROPER.
|
| 104 |
+
|
| 105 |
+

|
| 106 |
+
Figure 3: Validation loss curves for training Llama2, CodeLlama, and Mistral with different permutation ratios (We only report the first trial when we use permuted data since the loss curves are very similar across trials).
|
| 107 |
+
|
| 108 |
+
from CoT to Prolog generation, which is potentially attributed to its pretraining on the code-related corpus. In other words, CodeLlama is specifically trained to generate structured programs better than natural language reasoning.
|
| 109 |
+
|
| 110 |
+
With a proper permutation ratio, PROPER further enhances LLM's arithmetic reasoning with Prolog generation. Permutation ratio refers to the ratio between original samples and permuted samples. As shown in Table 2, by adding two permuted samples for each original sample, we observed an increased accuracy of $9.5\%$ and $4.0\%$ of Llama-2 and CodeLlama respectively on the test set. This improvement indicates that learning the non-sequential structure of Prolog predicates is helpful for LLMs to generate correct Prolog programs to solve arithmetic problems. On the other hand, the lowered accuracy of Mistral, compared with its case of one permutation per sample, suggests that PROPER might be limited for models already with high Prolog generation capacity.
|
| 111 |
+
|
| 112 |
+
Lowered validation loss from PROPER does not lead to higher accuracy. As is shown in Figure 3, increasing the permutation ratio results in significantly lowered validation loss. This is because we first added in permutations and then split a validation set from the training set. Consequently, the permutations of validation samples were included in the training set and the generalization ability of the language models enabled the models to utilize the permutations to improve the performance on the validation set, causing a soft data leakage. Therefore, according to Table 2, the permutation ratio of $1:2$ yielded a weakened performance on Mistral although the validation loss was the lowest.
|
| 113 |
+
|
| 114 |
+

|
| 115 |
+
Figure 4: Validation loss curves and validation accuracy curves for training Llama2 with different methods (We only report the first trial when we use permuted data since the loss and accuracy curves are very similar across trials).
|
| 116 |
+
|
| 117 |
+
Increased validation loss from PROPER does not lead to decreased validation accuracy. Excluding the permutations of validation samples from the training set, we report both the cross-entropy loss and the accuracy on the validation set for Llama2 using different methods in Figure 4. A mismatch between the loss and accuracy is observed. As a loss curve decreases to the minimum and bounces back, the corresponding accuracy curve keeps increasing and then maintains a high level. As is shown in Table 3, by choosing checkpoints based on validation accuracy instead of validation loss, the performance can be improved across all methods. Moreover, the improvement for the Prolog and PROPER method is significantly greater than that of CoT, suggesting a larger divergence between the objective of cross entropy loss and the ultimate accuracy of Prolog generation. Therefore, it is suggested to choose the best checkpoint based on the
|
| 118 |
+
|
| 119 |
+
<table><tr><td>Method</td><td>Initial</td><td>No Leakage (by loss)</td><td>No Leakage (by accuracy)</td></tr><tr><td>CoT</td><td>33.8</td><td>33.8</td><td>36.5</td></tr><tr><td>Prolog</td><td>41.5</td><td>41.5</td><td>47.9</td></tr><tr><td>ProPer 1:1</td><td>50.9 (49.5)</td><td>44.3 (43.4)</td><td>50.1 (48.4)</td></tr><tr><td>ProPer 1:2</td><td>51.0 (49.4)</td><td>44.4 (43.6)</td><td>51.3 (50.3)</td></tr></table>
|
| 120 |
+
|
| 121 |
+
Table 3: Accuracy(%) results of training Llama2 on the GSM8K dataset. We compare the results of avoiding validation sample leakage in the training set and picking the optimal checkpoint based on validation loss and accuracy with the initial results with leakage. The best and average accuracy of $1:1$ and $1:2$ are in the form of max (avg).
|
| 122 |
+
|
| 123 |
+
validation accuracy. Nevertheless, the new performance is similar to the initial results where leakage is involved. We notice that late checkpoints yield better performance according to the validation accuracy and the validation loss keeps decreasing in the initial setting. Therefore, both settings happen to pick late checkpoints, resulting in similar performance.
|
| 124 |
+
|
| 125 |
+
We have also tested Python generation, for which the corpus was generated by the same procedure as Algorithms 1 except that we prepare Python codes instead of Prolog codes. It gives an accuracy of $55.12\%$ on GSM8K using Llama2 as the base model, better than both Prolog and PROPER. One possible reason is that Python now is the prevalent programming language and Llama2 might have been pretrained on a large amount of Python codes. We believe if sufficient Prolog codes are used for training, Prolog generation can at least match up with Python generation due to its essence of symbolic reasoning.
|
| 126 |
+
|
| 127 |
+
We present some representative error cases of Mistral (1:1) in Appendix A.4.
|
| 128 |
+
|
| 129 |
+
# 5 Related Work
|
| 130 |
+
|
| 131 |
+
Arithmetic Reasoning The Chain-of-Thought (CoT) prompting approach (Wei et al., 2022b) first proposes to prompt the model to generate the reasoning chain step-by-step to reach the final answer. Afterwards, advancements have been made in LLMs' reasoning capacity via step-by-step methods (Zhou et al., 2023; Zhu et al., 2023; Huang et al., 2022; Liang et al., 2023). However, the natural language generation still performs poorly on complex or multi-step reasoning. Therefore, one trajectory of efforts has been made to leverage reasoning structures like trees (Yao et al., 2023; Long,
|
| 132 |
+
|
| 133 |
+
2023) and graphs (Besta et al., 2023; Zhang et al., 2023). Another trajectory is to render the reasoning task based on external tools (Cobbe et al., 2021; Mishra et al., 2023; Gou et al., 2023; Gao et al., 2023; Shao et al., 2023; Chen et al., 2023), which is the one that we are following. Besides, Yuan et al.'s (2023) RFT method shares the idea of dataset augmentation, but they compile rejection samples from multiple models to form an augmented training set, which is different from PROPER's automatic permutation.
|
| 134 |
+
|
| 135 |
+
Neural Symbolic Reasoning Neural symbolic reasoning (Andreas et al., 2016; Neelakantan et al., 2017; Hudson and Manning, 2019; Gupta et al., 2020; Nye et al., 2021) aims to leverage both neural networks and symbolic reasoning to obtain better reasoning abilities and transparency. Those methods suffer from low scalability of learning and reasoning components. LLMs are hence adopted to generate symbolic representations from natural language (Lyu et al., 2023; Pan et al., 2023; Yang et al., 2023), where deterministic symbolic solvers will process the query and symbolic representations generated by LLMs to conduct reasoning or proofs. Prolog has been a popular candidate for the format of symbolic representations. We are posited on this trajectory and in the specific field of arithmetic reasoning.
|
| 136 |
+
|
| 137 |
+
# 6 Conclusion
|
| 138 |
+
|
| 139 |
+
In conclusion, we aim to enhance the reasoning performance of LLMs. We adopt the pipeline that the model generates Prolog predicates from a mathematical question in natural language and an external Prolog interpreter processes the query for a final result. We contribute an open-sourced corpus named GSM8K-Prolog, which is a high-quality Prolog-annotated version of GSM8K. We show that Prolog generation substantially outperformed CoT generation across all three 7B models for solving arithmetic reasoning problems. We also propose PROPER, a data augmentation method designed specifically for Prolog code generation, which enables the finetuned models to learn the nonsequential nature of Prolog predicates. PROPER further improves the model's accuracy on GSM8K-Prolog and mitigates early convergence during training. Lastly, due to the gap between cross-entropy loss objective and accuracy, we suggest using validation accuracy instead of validation loss to pick the best checkpoint.
|
| 140 |
+
|
| 141 |
+
# Limitations
|
| 142 |
+
|
| 143 |
+
Although we have experimentally conducted full-parameter finetuning, the result was not satisfying. We believe it is because of the limited size of the original corpus. Therefore, at the current stage, we cannot have a comparison with other methods like ToRA (Gou et al., 2023) or RFT (Yuan et al., 2023). Future research can look into preparing a larger and more diverse corpus adapted to Prolog code generation. Besides, We did not try scaling the base model to more than 7B parameters. So we do not know the impact of model scaling on the performance of Prolog code generation for arithmetic reasoning. Furthermore, due to the limitation of the PySwip library, solvable questions are restricted to the ones with an integer answer. Future work can expand the domain by using other interpreting tools.
|
| 144 |
+
|
| 145 |
+
# References
|
| 146 |
+
|
| 147 |
+
Ebtesam Almazrouei, Hamza Alobeidli, Abdulaziz Alshamsi, Alessandro Cappelli, Ruxandra Cojocaru, Merouane Debbah, Etienne Goffinet, Daniel Heslow, Julien Launay, Quentin Malartic, Badreddine Noune, Baptiste Pannier, and Guilherme Penedo. 2023. Falcon-40B: an open large language model with state-of-the-art performance.
|
| 148 |
+
Jacob Andreas, Marcus Rohrbach, Trevor Darrell, and Dan Klein. 2016. Neural module networks. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 39-48.
|
| 149 |
+
Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Michal Podstawski, Hubert Niewiadomski, Piotr Nczyk, and Torsten Hoefler. 2023. Graph of thoughts: Solving elaborate problems with large language models.
|
| 150 |
+
Ivan Bratko. 2012. Prolog programming for Artificial Intelligence. Addison-Wesley.
|
| 151 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901.
|
| 152 |
+
Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. 2023. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks.
|
| 153 |
+
Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, Parker Schuh, Kensen Shi, Sasha Tsvyashchenko, Joshua Maynez, Abhishek
|
| 154 |
+
|
| 155 |
+
Rao, Parker Barnes, Yi Tay, Noam Shazeer, Vinodkumar Prabhakaran, Emily Reif, Nan Du, Ben Hutchinson, Reiner Pope, James Bradbury, Jacob Austin, Michael Isard, Guy Gur-Ari, Pengcheng Yin, Toju Duke, Anselm Levskaya, Sanjay Ghemawat, Sunipa Dev, Henryk Michalewski, Xavier Garcia, Vedant Misra, Kevin Robinson, Liam Fedus, Denny Zhou, Daphne Ippolito, David Luan, Hyeontaek Lim, Barret Zoph, Alexander Spiridonov, Ryan Sepassi, David Dohan, Shivani Agrawal, Mark Omernick, Andrew M. Dai, Thanumalayan Sankaranarayana Pillai, Marie Pellat, Aitor Lewkowycz, Erica Moreira, Rewon Child, Oleksandr Polozov, Katherine Lee, Zongwei Zhou, Xuezhi Wang, Brennan Saeta, Mark Diaz, Orhan First, Michele Catasta, Jason Wei, Kathy Meier-Hellstern, Douglas Eck, Jeff Dean, Slav Petrov, and Noah Fiedel. 2022. Palm: Scaling language modeling with pathways.
|
| 156 |
+
W. F. Clocksin and C. S. Mellish. 2003. Programming in Prolog. Springer-Verlag.
|
| 157 |
+
Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. 2021. Training verifiers to solve math word problems.
|
| 158 |
+
Michael A. Covington. 2002. Natural language processing for Prolog programmers. Prentice Hall.
|
| 159 |
+
Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. 2023. Pal: Program-aided language models.
|
| 160 |
+
Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Tora: A tool-integrated reasoning agent for mathematical problem solving.
|
| 161 |
+
Nitish Gupta, Kevin Lin, Dan Roth, Sameer Singh, and Matt Gardner. 2020. Neural module networks for reasoning over text. In International Conference on Learning Representations.
|
| 162 |
+
Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models.
|
| 163 |
+
Jiaxin Huang, Shixiang Shane Gu, Le Hou, Yuexin Wu, Xuezhi Wang, Hongkun Yu, and Jiawei Han. 2022. Large language models can self-improve.
|
| 164 |
+
Drew Hudson and Christopher D Manning. 2019. Learning by abstraction: The neural state machine. In Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc.
|
| 165 |
+
Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao,
|
| 166 |
+
|
| 167 |
+
Thibaut Lavril, Thomas Wang, Timothee Lacroix, and William El Sayed. 2023. Mistral 7b.
|
| 168 |
+
Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Zhaopeng Tu, and Shuming Shi. 2023. Encouraging divergent thinking in large language models through multi-agent debate.
|
| 169 |
+
Jieyi Long. 2023. Large language model guided tree-of-thought.
|
| 170 |
+
Qing Lyu, Shreya Havaldar, Adam Stein, Li Zhang, Delip Rao, Eric Wong, Marianna Apidianaki, and Chris Callison-Burch. 2023. Faithful chain-of-thought reasoning.
|
| 171 |
+
Swaroop Mishra, Matthew Finlayson, Pan Lu, Leonard Tang, Sean Welleck, Chitta Baral, Tanmay Rajpurohit, Oyvind Tafjord, Ashish Sabharwal, Peter Clark, and Ashwin Kalyan. 2023. Lila: A unified benchmark for mathematical reasoning.
|
| 172 |
+
Arvind Neelakantan, Quoc V. Le, Martin Abadi, Andrew McCallum, and Dario Amodei. 2017. Learning a natural language interface with neural programmer. In International Conference on Learning Representations.
|
| 173 |
+
Maxwell Nye, Michael Henry Tessler, Joshua B. Tenenbaum, and Brenden M. Lake. 2021. Improving coherence and consistency in neural sequence models with dual-system, neuro-symbolic reasoning.
|
| 174 |
+
Liangming Pan, Alon Albalak, Xinyi Wang, and William Yang Wang. 2023. Logic-lm: Empowering large language models with symbolic solvers for faithful logical reasoning.
|
| 175 |
+
Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, et al. 2021. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446.
|
| 176 |
+
Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, and Gabriel Synnaeve. 2023. Code llama: Open foundation models for code.
|
| 177 |
+
Timo Schick, Jane Dwivedi-Yu, Roberto Dessì, Roberta Raileanu, Maria Lomeli, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. 2023. Toolformer: Language models can teach themselves to use tools. arXiv preprint arXiv:2302.04761.
|
| 178 |
+
Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Synthetic prompting: Generating chain-of-thought demonstrations for large language models. In Proceedings of the 40th International Conference on
|
| 179 |
+
|
| 180 |
+
Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 30706-30775. PMLR.
|
| 181 |
+
Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. 2023. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca.
|
| 182 |
+
Romal Thoppilan, Daniel De Freitas, Jamie Hall, Noam Shazeer, Apoorv Kulshreshtha, Heng-Tze Cheng, Alicia Jin, Taylor Bos, Leslie Baker, Yu Du, et al. 2022. Lamda: Language models for dialog applications. arXiv preprint arXiv:2201.08239.
|
| 183 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. 2023. Llama 2: Open foundation and finetuned chat models.
|
| 184 |
+
Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. 2022a. Emergent abilities of large language models.
|
| 185 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022b. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, 35:24824-24837.
|
| 186 |
+
Sen Yang, Xin Li, Leyang Cui, Lidong Bing, and Wai Lam. 2023. Neuro-symbolic integration brings causal and reliable reasoning proofs.
|
| 187 |
+
Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, and Quoc V. Le. 2020. Xlnet: Generalized autoregressive pretraining for language understanding.
|
| 188 |
+
Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. Tree of thoughts: Deliberate problem solving with large language models.
|
| 189 |
+
|
| 190 |
+
Zheng Yuan, Hongyi Yuan, Chengpeng Li, Guanting Dong, Chuanqi Tan, and Chang Zhou. 2023. Scaling relationship on learning mathematical reasoning with large language models. arXiv preprint arXiv:2308.01825.
|
| 191 |
+
Yifan Zhang, Jingqin Yang, Yang Yuan, and Andrew Chi-Chih Yao. 2023. Cumulative reasoning with large language models.
|
| 192 |
+
Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc Le, and Ed Chi. 2023. Least-to-most prompting enables complex reasoning in large language models.
|
| 193 |
+
Xinyu Zhu, Junjie Wang, Lin Zhang, Yuxiang Zhang, Yongfeng Huang, Ruyi Gan, Jiaxing Zhang, and Yu-jiu Yang. 2023. Solving math word problems via cooperative reasoning induced language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics.
|
| 194 |
+
|
| 195 |
+
# A Appendix
|
| 196 |
+
|
| 197 |
+
# A.1 Generation Procedure of GSM8K-Prolog
|
| 198 |
+
|
| 199 |
+
Below is the detailed pseudo-code for the GSM8K-Prolog dataset generation.
|
| 200 |
+
|
| 201 |
+
Algorithm 1 Procedure of GSM8K-Prolog Generation
|
| 202 |
+
|
| 203 |
+
Input: The original GSM8K dataset, denoted as set $\mathcal{X} = \{(q_i,a_i^{\mathrm{CoT}})\}_{i = 1}^N$ , where each sample consists of one question $q_{i}$ and one Chain-of-Thought answer $a_{i}^{\mathrm{CoT}}$ ; A Prolog interpreter $\mathcal{P}$ that returns the output of a Prolog program; A Chain-of-Thought answer retriever $\mathcal{C}$ that parses out the final answer of a natural language reasoning chain.
|
| 204 |
+
|
| 205 |
+
Output: GSM8K-Prolog dataset $\mathcal{D} = \{(q_i,a_i^{\mathrm{Prolog}})\}_{i = 1}^N$
|
| 206 |
+
|
| 207 |
+
Initialize a set of indices $\mathcal{I} \gets \{1, \dots, N\}$ , a static instruction prompt in the new dataset $p_{\mathrm{ins}}$ , and an initial question for querying OpenAI API $q_{\mathrm{gen}}$ .
|
| 208 |
+
|
| 209 |
+
Manually craft 10 correct Prolog codes $\{a_i^{\mathrm{Prolog}}\}_{i = 1}^{10}$ that correctly solve $\{q_i\}_{i = 1}^{10}$ in $\mathcal{X}$ to initialize $\mathcal{D}$ for $i\in I$ do
|
| 210 |
+
|
| 211 |
+
Retrieve a sample $(q_{i},a_{i}^{\mathrm{CoT}})\in \mathcal{X}$
|
| 212 |
+
|
| 213 |
+
Prompt GPT-4 with $\{q^{gen}\} \cup \{(q_k, a_k^{\mathrm{CoT}}, a_k^{\mathrm{Prolog}})_{k=1}^{10}\} \cup \{q_i, a_i^{\mathrm{CoT}}\}$ to obtain $a_i^{\mathrm{Prolog}}$
|
| 214 |
+
|
| 215 |
+
if $\mathcal{P}(a_i^{\mathrm{Prolog}}) = \mathcal{C}(a_i^{\mathrm{CoT}})$ then
|
| 216 |
+
|
| 217 |
+
$$
|
| 218 |
+
\mathcal {D} \leftarrow \mathcal {D} \cup \left\{\left(p _ {\text {i n s}}, q _ {i}, a _ {i} ^ {\text {P r o l o g}}\right) \right\}
|
| 219 |
+
$$
|
| 220 |
+
|
| 221 |
+
$$
|
| 222 |
+
\mathcal {I} \leftarrow \mathcal {I} \setminus \{i \}
|
| 223 |
+
$$
|
| 224 |
+
|
| 225 |
+
end if
|
| 226 |
+
|
| 227 |
+
end for
|
| 228 |
+
|
| 229 |
+
Manually select the top 10 clean and logical Prolog code from the current $\mathcal{D}$ to form a new few-shot sample set $Q^{\mathrm{fixed}} = \{(q_k, a_k^{\mathrm{CoT}}, a_k^{\mathrm{Prolog}})_{k \notin \mathcal{I}}\}, |Q^{\mathrm{fixed}}| = 10$ .
|
| 230 |
+
|
| 231 |
+
for $j = 1,\dots ,M$ do
|
| 232 |
+
|
| 233 |
+
// $M$ trial attempts
|
| 234 |
+
|
| 235 |
+
for $i\in I$ do
|
| 236 |
+
|
| 237 |
+
Retrieve a sample $(q_{i},a_{i}^{\mathrm{CoT}})\in \mathcal{X}$
|
| 238 |
+
|
| 239 |
+
Sample $Q^{\mathrm{random}}\gets \{(q_k,a_k^{\mathrm{CoT}},a_k^{\mathrm{Prolog}})_{k\notin \mathcal{I}}\}$ , $|Q^{\mathrm{random}}| = 10$ from $\mathcal{D}$
|
| 240 |
+
|
| 241 |
+
// Adding 10 dynamic samples and 10 fixed samples into the 20-shot prompt.
|
| 242 |
+
|
| 243 |
+
Prompt GPT-4 with $\{q^{gen}\} \cup Q^{fixed} \cup Q^{random} \cup \{q_i, a_i^{\mathrm{CoT}}\}$ to obtain $a_i^{\mathrm{Prolog}}$
|
| 244 |
+
|
| 245 |
+
if $\mathcal{P}(a_i^{\mathrm{Prolog}}) = \mathcal{C}(a_i^{\mathrm{CoT}})$ then
|
| 246 |
+
|
| 247 |
+
$$
|
| 248 |
+
\mathcal {D} \leftarrow \mathcal {D} \cup \left\{\left(p _ {\text {i n s}}, q _ {i}, a _ {i} ^ {\text {P r o l o g}}\right) \right\}
|
| 249 |
+
$$
|
| 250 |
+
|
| 251 |
+
$$
|
| 252 |
+
\mathcal {I} \leftarrow \mathcal {I} \setminus \{i \}
|
| 253 |
+
$$
|
| 254 |
+
|
| 255 |
+
end if
|
| 256 |
+
|
| 257 |
+
end for
|
| 258 |
+
|
| 259 |
+
end for
|
| 260 |
+
|
| 261 |
+
if $\mathcal{I}\neq \emptyset$ then
|
| 262 |
+
|
| 263 |
+
Manually correct Prolog codes $\{a_i^{\mathrm{Prolog}}\}_{i\in \mathcal{I}}$ that solve $\{q_{i}\}_{i\in \mathcal{I}}$
|
| 264 |
+
|
| 265 |
+
$$
|
| 266 |
+
\mathcal {D} \leftarrow \mathcal {D} \cup \left\{\left(p _ {\text {i n s}}, q _ {i}, a _ {i} ^ {\text {P r o l o g}}\right) _ {i \in \mathcal {I}} \right\}
|
| 267 |
+
$$
|
| 268 |
+
|
| 269 |
+
end if
|
| 270 |
+
|
| 271 |
+
# A.2 Permutation procedures
|
| 272 |
+
|
| 273 |
+
Permutations can be performed both on the level of facts or rules and on the level of goals in a rule. In practice, for each piece of code, we first permute the goals in the solve<answer>: -<goal_1>, <goal_2>, ... predicate. Since the total number of permutations is sensitive to the number of goals and can easily grow to a large magnitude, thus running out of memory, we used the permutation method in the itertools library to yield an iterator over the permutations. Then, we took up to 10 goal permutations from the iterator. If there were less than 10 goal permutations in total because the code was concise and there were not many goals, we took as many goal permutations as possible. Then, in the same manner, we took up
|
| 274 |
+
|
| 275 |
+
to 10 fact and rule permutations. In principle, there would be at most 100 permuted samples generated for one original sample. Then, for each sample, while conducting an experiment that required a certain number of permutations, we randomly sampled permutations from the set of permutations of size up to 100. For some sample, if the target number of permutations exceeded the total permutations it had, we took all its permutations instead.
|
| 276 |
+
|
| 277 |
+
# A.3 Instruction Prompt Samples
|
| 278 |
+
|
| 279 |
+
Below are the instruction prompts we used for different training settings (CoT, Prolog, and Permuted Prolog).
|
| 280 |
+
|
| 281 |
+
<table><tr><td>Setting</td><td>Prompt Template</td></tr><tr><td>CoT</td><td>Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.##### Instruction:Please generate an explanatory answer to solve the given math problem.##### Input:请输入#### Output:请输入CoT Reasoning></td></tr><tr><td>Prolog Generation</td><td>Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.##### Instruction:Please generate a piece of Prolog code to solve the given math problem.##### Input:请输入#### Output:请输入Prolog Code></td></tr><tr><td>Permuted Prolog Generation</td><td>Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.##### Instruction:Please generate a piece of Prolog code in non-sequential order to solve the given math problem.##### Input:请输入请输入Output:请输入Prolog Code></td></tr></table>
|
| 282 |
+
|
| 283 |
+
# A.4 Error Analysis
|
| 284 |
+
|
| 285 |
+
In this section, we present some typical errors made by our best-performing model to understand the behavior and bottleneck of LLM generation of prolog programs to solve arithmetic reasoning tasks. We sampled 70 problematic Prolog codes generated by finetuned Mistral using the 1:1 permutation setting, which had yielded the highest accuracy, that had errors and manually checked the type of errors using the Swi-Prolog interpreter. $100\%$ of them had semantic errors, meaning their logic of solution was wrong. $7\%$ of them had syntax errors, meaning the Swi-Prolog yields syntax errors on those codes. Some examples of typical syntax errors are listed below. The lines causing errors are in bold. The details of the errors are explained in the comments.
|
| 286 |
+
|
| 287 |
+
<table><tr><td>Question</td><td>Prolog Code</td></tr><tr><td>Julia's boat sprang a leak while she was out on the lake. The boat was taking on two liters of water for every ten feet she rowed back towards shore. It took her sixteen seconds to row twenty feet. The shore was 64 seconds away. How much water had the boat taken on by the time Julia reached shore?</td><td>:- use_module library(clpq).liters_per_10_feet(julia, 2).time_per_20_feet(julia, 16).distance_to_shore(julia, 64).solve(Water_taken_on) :- liters_per_10_feet(julia, Liters_per_10_feet),time_per_20_feet(julia, Time_per_20_feet),distance_to_shore(julia, Distance_to_shore),{Seconds_per_10_feet = Time_per_20_feet / 2},{TotalSeconds = Distance_to_shore / 10 * Seconds_per_10_feet},{Total Feet_rowed = Total_Seonds / Seconds_per_20_feet * 20}, % The variable has never been defined.{Total_liters_taken_on = Total_foot_rowed * Liters_per_10_feet},{Water_taken_on = Total_liters_taken_on}.</td></tr><tr><td>Bill bakes 300 rolls, 120 chocolate croisants, and 60 baguettes every day. Each roll is 4 inches long, each croissant is 6 inches long, and each baguette is two feet long. If Bill puts all the baked goods end to end, how long will they be in feet?</td><td>:- useModule library(clpq)).bake(bill, rolls, 300).bake(bill, chocolate_croissants, 120).bake(bill, baguettes, 60).length(roll, 4). # The predicate has already been reserved by Prolog.length(chocolate_croissant, 6).length(baguette, 2).solve(Total_length) :-bake(bill, rolls, Num_rails),bake(bill, chocolate_croissants, Num_chocolate_croissants),bake(bill, baguettes, Num_baguettes),length(roll, Roll_length),length(chocolate_croissant, Chocolate_croissant_length),length(baguette, Baguette_length),{Roll_total_length = Num_rails * Roll_length},{Chocolate_croissant_total_length = Num_chocolate_croissants * Chocolate_croissant_length},{Baguette_total_length = Num_baguettes * Baguette_length},{Total_length = Roll_total_length + Chocolate_croissant_total_length + Baguette_total_length}.% This code also contains a semantic error because it fails to convert the unit of inch to feet.</td></tr><tr><td>In a race with 240 Asians, 80 were Japanese, and the rest were Chinese. If the number of boys on the Chinese team was 60, how many girls were on the Chinese team?</td><td>:- useModule library(clpq)).total_participants(240).japanese_participants(80).chinese Boys(60).solve(Girls_on_chinese-team) :-total_participants(Total_participants),Japanese_participants , % The correct syntax: japanese_participants(Japanese_participants),{Chinese_participants = Total_participants - Japanese_participants},chinese_boys(Chinese_boys),{Girls_on_chinese-team = Chinese_participants - Chinese_boys}.</td></tr><tr><td>Brinley is in Mr. Bert's math class. Mr. Bert gives six tests each semester. He allows the students to remove the lowest score from the average each semester. If Brinley has scores of 89, 71, 92, 100 and 86 on the first five tests, what score does she need on the sixth test to get an average of 93?</td><td>:- useModule library(clpq)).numTests(mr_bert, 6).lowest_score Removal(mr_bert, 1).target_average(mr_bert, 93).scores(brinley, [89, 71, 92, 100, 86]).solve(Test_score) :-numTests(mr_bert, Num Tests),lowest_score Removal(mr_bert, Lowest_score Removal),target_average(mr_bert, Target-average),scores(brinley, Scores),Length is Num Tests - Lowest_score Removal,{Total_score = sum(Scores)}, % The built-in predicate is misused.{Average_score = Total_score / Length},{Test_score = (Target-average * Length) - Total_score}.</td></tr></table>
|
| 288 |
+
|
| 289 |
+
# A.5 Training Details and Computational Budget
|
| 290 |
+
|
| 291 |
+
During finetuning, we controlled the number of epochs to be 6, batch size to be 128, and learning rate to be $3 \times 10^{-4}$ . For a single training run, we used 2 NVIDIA RTX 4090 GPUs to finetune Llama2 and CodeLlama and 2 NVIDIA RTX 8000 GPUs to finetune Mistral. We adopted Distributed Data Parallelism to speed up training. Training on the original CoT data in GSM8K or the non-permuted Prolog code data took around 2 hours on 2 NVIDIA RTX 4090 GPUs and around 10 hours on 2 NVIDIA RTX 8000 GPUs. When we added in permuted samples, the training time grew proportionally with the dataset size since we controlled the number of epochs and batch size. During inference on the test set, we used a batch size of 2
|
| 292 |
+
|
| 293 |
+
on an RTX 4090 GPU, which took around 6 hours to finish a full inference round, and a batch size of 3 on one RTX 8000 GPU, which took around 7 hours to finish a full inference round.
|
arithmeticreasoningwithllmprologgenerationpermutation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f4a52924298b8ad9682d3e500d3ae79181f6f754d875ab52144073c6aaed058
|
| 3 |
+
size 525970
|
arithmeticreasoningwithllmprologgenerationpermutation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e4d949ef000a73030ded4776f26f2a565031efafd993551329cfdfde9597dfbe
|
| 3 |
+
size 331987
|
beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/430aedb8-8e92-42ef-8176-5984d7337890_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aaf1ff604cda7061099185e8be2dd11ab4be218d6ce5c181f0a95bee81c75eef
|
| 3 |
+
size 86647
|
beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/430aedb8-8e92-42ef-8176-5984d7337890_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f1744c6716a02e25944cff28cc7a70481024c0a69a76178044c4f49456a2d10
|
| 3 |
+
size 106057
|
beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/430aedb8-8e92-42ef-8176-5984d7337890_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ca84cfb9f14ea80771dea539043b6e46a8f9d4bff118fb0c5aa306a27b9f6b8a
|
| 3 |
+
size 1064196
|
beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/full.md
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Beyond Yes and No: Improving Zero-Shot LLM Rankers via Scoring Fine-Grained Relevance Labels
|
| 2 |
+
|
| 3 |
+
Honglei Zhuang, Zhen Qin, Kai Hui, Junru Wu, Le Yan, Xuanhui Wang and Michael Bendersky
|
| 4 |
+
|
| 5 |
+
Google Research
|
| 6 |
+
|
| 7 |
+
{hlz,zhenqin,kaihuibj,junru,lyyanle,
|
| 8 |
+
|
| 9 |
+
xuanhui,bemike}@google.com
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Zero-shot text rankers powered by recent LLMs achieve remarkable ranking performance by simply prompting. Existing prompts for pointwise LLM rankers mostly ask the model to choose from binary relevance labels like "Yes" and "No". However, the lack of intermediate relevance label options may cause the LLM to provide noisy or biased answers for documents that are partially relevant to the query. We propose to incorporate fine-grained relevance labels into the prompt for LLM rankers, enabling them to better differentiate among documents with different levels of relevance to the query and thus derive a more accurate ranking. We study two variants of the prompt template, coupled with different numbers of relevance levels. Our experiments on 8 BEIR data sets show that adding fine-grained relevance labels significantly improves the performance of LLM rankers.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Large language models (LLMs) such as GPT-4 (OpenAI, 2023) and PaLM 2 (Google et al., 2023) have demonstrated impressive zero-shot performance on a variety of NLP tasks. Recently, there has been a growing interest in applying LLMs to zero-shot text ranking, with remarkably impressive results. The earliest zero-shot LLM rankers are pointwise (Liang et al., 2023; Sachan et al., 2022), which score one query and one document at each time and rank the documents based on the scores. Lately, pairwise (Qin et al., 2024) and listwise (Sun et al., 2023; Ma et al., 2023) LLM rankers also show strong performance, but they cannot scale to long lists and still largely rely on a high-quality first-stage ranking.
|
| 18 |
+
|
| 19 |
+
A typical category of pointwise LLM rankers is relevance generation (Liang et al., 2023). In this method, the LLM is prompted to answer whether a document is relevant to the query. Existing pointwise LLM rankers mostly ask the LLM to answer
|
| 20 |
+
|
| 21 |
+
"Yes" or "No" and use their likelihood to derive a ranking score. Nevertheless, some documents cannot always be accurately classified into these two categories as they may not directly answer the query but still contain helpful information.
|
| 22 |
+
|
| 23 |
+
Studies on human subjects show that using binary options sometimes leads to biased answers (Rivera-Garrido et al., 2022). Instead, providing reasonably fine-grained options can lead to more reliable results (Roitero et al., 2018; Birkett, 1986; Rivera-Garrido et al., 2022; Johnston et al., 2017). Actually, in information retrieval data sets, the annotation guidelines for human annotators often employ multiple relevance levels, like the 3-level scale used in TREC-COVID (Voorhees et al., 2021) and TREC-Robust (Voorhees, 2005), as well as the 4-level scale used in TREC-DL (Craswell et al., 2020b,a). We believe that a zero-shot LLM ranker might share the same behavior pattern with human annotators.
|
| 24 |
+
|
| 25 |
+
Therefore, we propose to explicitly provide fine-grained relevance labels in the prompt to zero-shot LLM rankers. Instead of asking the LLM to choose between two options, we provide the LLM with fine-grained relevance labels, such as "Highly Relevant", "Somewhat Relevant" and "Not Relevant" and collect their likelihood scores from LLM predictions to derive the ranking score. The intuition is that the intermediate relevance labels in the prompt serve as a "cue" to the LLM to distinguish partially relevant documents from fully relevant or fully irrelevant ones.
|
| 26 |
+
|
| 27 |
+
Our evaluation on 8 BEIR (Thakur et al., 2021) datasets demonstrates that simply adding intermediate relevance labels significantly boosts LLM ranking performance across different datasets, regardless of the actual ground-truth label granularity. An in-depth analysis reveals that the proposed new prompt enables LLM rankers to distinguish documents previously indistinguishable with the binary-option prompt.
|
| 28 |
+
|
| 29 |
+
# 2 Related Work
|
| 30 |
+
|
| 31 |
+
Zero-shot LLM rankers. Shifted from tuning-based learning to rank on textual and traditional tabular datasets (Nogueira et al., 2019; Han et al., 2020; Zhuang et al., 2021; Nogueira et al., 2020; Zhuang et al., 2023; Xian et al., 2023; Liu, 2009; Qin et al., 2021), there is an emerging thread of research exploring how to use general-purpose LLMs directly or indirectly (Jagerman et al., 2023; Li et al., 2024) for zero-shot text ranking.
|
| 32 |
+
|
| 33 |
+
Liang et al. (2023) and Sachan et al. (2022) adopt a pointwise approach which scores the relevance of one document at a time based on how likely the LLM would classify the document as relevant or how likely the LLM would generate the query from the document respectively. There are also explorations on pairwise (Qin et al., 2024) and listwise (Sun et al., 2023; Ma et al., 2023; Zhuang et al., 2024) LLM rankers which take multiple documents as input and return the ranking directly, but they are usually applied iteratively on smaller sets of documents. In this paper, we only focus on pointwise LLM rankers.
|
| 34 |
+
|
| 35 |
+
Zero-shot LLM assessors. Another related research area (Faggioli et al., 2023; Thomas et al., 2023) employs LLMs as assessors, where fine-grained relevance labels are also provided in the prompt. However, these methods do not use the likelihood scores of fine-grained relevance labels. The goal of LLM assessors is to provide a relevance label for every query-document pairs that aligns with the ground-truth relevance label, potentially created by human assessors. LLM assessors are usually used to create an evaluation data set, which can be used to reliably evaluate different ranking models. This is different from LLM rankers, which typically only need to ensure that the relative order of the top-ranked documents are accurate.
|
| 36 |
+
|
| 37 |
+
# 3 LLM Rankers
|
| 38 |
+
|
| 39 |
+
# 3.1 Preliminaries
|
| 40 |
+
|
| 41 |
+
Existing explorations using zero-shot LLMs as pointwise rankers can be broadly divided into two categories: relevance generation (Liang et al., 2023) and query generation (Sachan et al., 2022). We focus on relevance generation in this work.
|
| 42 |
+
|
| 43 |
+
Given a query $q$ and a list of candidate documents $\mathbf{d} = (d_1, \ldots, d_m)$ , an LLM ranker based on relevance generation takes each query-document pair $(q, d_i)$ as input and prompts the LLM to an-
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
(a) Yes-No relevance generation
|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
(b) Fine-grained relevance label generation
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
(c) Rating scale relevance generation
|
| 53 |
+
Figure 1: Illustration of different prompting strategies for relevance generation LLM rankers.
|
| 54 |
+
|
| 55 |
+
swer whether the document is relevant to the query by "Yes" or "No" (see Figure 1(a)). Then a ranking score $f(q, d_i) \in \mathbb{R}$ for each document is calculated based on LLM's log-likelihood score $s_{i,1} = \mathrm{LLM}(\mathrm{Yes}|q, d_i)$ and $s_{i,0} = \mathrm{LLM}(\mathrm{No}|q, d_i)$ by using a softmax function (Nogueira et al., 2020):
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
f (q, d _ {i}) = \frac {\exp (s _ {i , 1})}{\exp (s _ {i , 1}) + \exp (s _ {i , 0})}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
The ranked list is obtained by sorting the documents based on their ranking scores.
|
| 62 |
+
|
| 63 |
+
# 3.2 Prompts
|
| 64 |
+
|
| 65 |
+
In many datasets, there exist documents that are only partially or marginally relevant to the query, which LLMs struggle to classify into two classes.
|
| 66 |
+
|
| 67 |
+
Fine-grained relevance labels. We extend the classical relevance generation methods by introducing fine-grained relevance labels. Without loss of generality, we use a set of 3-level graded relevance labels as example: ["Not Relevant", "Somewhat Relevant", "Highly Relevant"], denoted as $[l_0, l_1, l_2]$ . Then, for each query-document pair $(q, d_i)$ , we ask the LLM to evaluate their relevance by choosing from the given relevance labels. We can obtain the log-likelihood of the LLM generating each relevance label:
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
s _ {i, k} = \operatorname {L L M} \left(l _ {k} \mid q, d _ {i}\right) \tag {1}
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
This example is illustrated in Figure 1(b). The exact prompt can be found in Appendix G.
|
| 74 |
+
|
| 75 |
+
Rating scale. To avoid using relevance labels with potentially ambiguous order, we can also employ a rating scale. For example, we can prompt the LLM to rate the relevance between the query $q$ and the document $d_{i}$ on a scale from 0 to 4. We can then use the LLM to obtain the log-likelihood $[s_{i,0},\dots ,s_{i,4}]$ of generating each relevance scale value $[l_0,\ldots ,l_4]$ , which are "0" to "4" respectively. This method allows us to try arbitrarily fine-grained relevance levels in the prompt. Figure 1(c) illustrates an example of this prompt. The exact prompt can be found in Appendix G.
|
| 76 |
+
|
| 77 |
+
# 3.3 Ranking Scores
|
| 78 |
+
|
| 79 |
+
Once we obtain the log-likelihood of each relevance label, we can derive the ranking scores.
|
| 80 |
+
|
| 81 |
+
Expected relevance values (ER). The most straightforward way is to calculate the expected relevance value. First, we need to assign a series of relevance values $[y_0, y_1, y_2]$ to all the relevance labels $[l_0, l_1, l_2]$ , where $y_k \in \mathbb{R}$ . Then we can calculate the expected relevance value by:
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
f (q, d _ {i}) = \sum p _ {i, k} \cdot y _ {k} \tag {2}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
\text {w h e r e} p _ {i, k} = \frac {\exp \left(s _ {i , k}\right)}{\sum_ {k ^ {\prime}} \exp \left(s _ {i , k ^ {\prime}}\right)}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
The relevance values $y_{k}$ can be provided by users or even tuned based on a training data set. We empirically find that naively assigning $y_{k} = k$ (with $l_{0}$ to $l_{k}$ ordered from least to most relevant) already yields excellent performance. Therefore, we simply adopt $y_{k} = k$ .
|
| 92 |
+
|
| 93 |
+
Peak relevance likelihood (PR). We can further simplify ranking score derivation by focusing on top-ranked items. We propose to only use the log-likelihood of the peak relevance label ("Highly Relevant" in this example). More formally, let $l_{k^*}$ denote the relevance label with the highest relevance. We can simply rank the documents by:
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
f (q, d _ {i}) = s _ {i, k ^ {*}} \tag {3}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
Note that $s_{i,k^*}$ is the log-likelihood directly obtained from the LLM, instead of the marginal probability $p_{i,k^*}$ in Equation (2). Hence, it is not necessary to score all relevance labels using the LLM and could potentially save some decoding cost when using this strategy to derive the ranking score. While this method is shown less effective on smaller models (Nogueira et al., 2020), it works well empirically with larger models in our experiments.
|
| 100 |
+
|
| 101 |
+
Table 1: Relevance labels used in RG- $k\mathrm{\;L}$ . The relevance label ${l}_{{k}^{ * }}$ with the maximum relevance value is bolded.
|
| 102 |
+
|
| 103 |
+
<table><tr><td>Method</td><td>Relevance Labels</td></tr><tr><td>RG-2L</td><td>"Not Relevant", "Relevant"</td></tr><tr><td>RG-3L</td><td>"Not Relevant", "Somewhat Relevant", "Highly Relevant"</td></tr><tr><td>RG-4L</td><td>"Not Relevant", "Somewhat Relevant", "Highly Relevant", "Perfectly Relevant"</td></tr></table>
|
| 104 |
+
|
| 105 |
+
# 4 Experiment Setup
|
| 106 |
+
|
| 107 |
+
Data set. We conduct experiments on 8 chosen data sets (Sun et al., 2023) from BEIR (Thakur et al., 2021): Covid, Touche, DBPedia, SciFact, Signal, News, Robust04, and NFCorpus. Notice that our method is applicable regardless of the actual relevance granularity in each data set.
|
| 108 |
+
|
| 109 |
+
We use BM25 (Lin et al., 2021) to retrieve the top-100 documents for each data set, and then rank the retrieved documents using LLMs with our proposed methods. We use FLAN PaLM2 S (Google et al., 2023) as the LLM in our main experiments. Results of other LLMs can be found in Appendix D.
|
| 110 |
+
|
| 111 |
+
The ranking performance is measured by NDCG@10 (Järvelin and Kekäläinen, 2002).
|
| 112 |
+
|
| 113 |
+
Compared methods. We compared the following prompting strategies:
|
| 114 |
+
|
| 115 |
+
1. Query Generation (QG). Ranking documents based on the query likelihood from LLM given the document (Sachan et al., 2022).
|
| 116 |
+
2. Binary Relevance Generation (RG-YN). Prompting the LLM with a query-document pair and using "Yes/No" likelihood to calculate the ranking score (Liang et al., 2023).
|
| 117 |
+
3. $k$ -Level Relevance Generation (RG- $k\mathrm{L}$ ). Prompting the LLM to choose from $k$ relevance labels for each query-document pair. The relevance labels are listed in Table 1.
|
| 118 |
+
4. Rating Scale 0-to- $k$ Relevance Generation (RG-S(0, k)). Prompting the LLM to rate the relevance for each query-document pair using a scale from 0 to $k$ . Note that for RG-S(0, k), the LLM needs to score $(k + 1)$ labels.
|
| 119 |
+
|
| 120 |
+
By default, the ranking scores of our methods are derived using expected relevance (Equation (2)).
|
| 121 |
+
|
| 122 |
+
Table 2: Overall ranking performances measured by NDCG@10 on BEIR data sets. The best performances are bolded. Average results that are significantly (paired $t$ -test, $p < 0.05$ ) better than RG-2L are marked with *
|
| 123 |
+
|
| 124 |
+
<table><tr><td>Method</td><td>Covid</td><td>Touche</td><td>DBPedia</td><td>SciFact</td><td>Signal</td><td>News</td><td>Robust04</td><td>NFCorpus</td><td>Average</td></tr><tr><td>QG</td><td>0.7357</td><td>0.2408</td><td>0.3773</td><td>0.7495</td><td>0.2872</td><td>0.4156</td><td>0.4651</td><td>0.3673</td><td>0.4548</td></tr><tr><td>RG-YN</td><td>0.7897</td><td>0.2427</td><td>0.3696</td><td>0.6958</td><td>0.3196</td><td>0.4588</td><td>0.5656</td><td>0.3743</td><td>0.4770</td></tr><tr><td>RG-2L</td><td>0.7949</td><td>0.2411</td><td>0.3590</td><td>0.7290</td><td>0.2996</td><td>0.4623</td><td>0.5636</td><td>0.3814</td><td>0.4789</td></tr><tr><td>RG-3L</td><td>0.8065</td><td>0.2650</td><td>0.4013</td><td>0.7671</td><td>0.3142</td><td>0.4890</td><td>0.5660</td><td>0.3849</td><td>0.4992*</td></tr><tr><td>RG-4L</td><td>0.8063</td><td>0.2388</td><td>0.4033</td><td>0.7766</td><td>0.3184</td><td>0.4884</td><td>0.5635</td><td>0.3801</td><td>0.4969*</td></tr><tr><td>RG-S(0,2)</td><td>0.7760</td><td>0.2695</td><td>0.3709</td><td>0.6921</td><td>0.3034</td><td>0.4677</td><td>0.5557</td><td>0.3787</td><td>0.4768</td></tr><tr><td>RG-S(0,4)</td><td>0.8048</td><td>0.2757</td><td>0.4190</td><td>0.7521</td><td>0.3301</td><td>0.4790</td><td>0.5668</td><td>0.3901</td><td>0.5022*</td></tr></table>
|
| 125 |
+
|
| 126 |
+

|
| 127 |
+
Figure 2: Average NDCG@10 on 8 BEIR data sets with different $k$ in rating scale 0-to- $k$ .
|
| 128 |
+
|
| 129 |
+
# 5 Results
|
| 130 |
+
|
| 131 |
+
Overall performance. Table 2 summarizes the overall comparison results. It can be seen that prompting LLMs with fine-grained relevance labels achieves substantially higher performance than binary relevance labels (RG-YN, RG-2L). For example, RG-3L on average achieves $+2\%$ improvement in NDCG@10 compared with RG-2L and RG-YN. RG-S(0,4) which uses the rating scale 0 to 4 in the prompt also achieves similar improvement. Note that even on data sets with binary ground-truth labels (e.g., SciFact), using fine-grained relevance labels still achieves substantial improvement. This suggests that the improvement is not merely a result of matching the actual ground-truth relevance levels of the data set.
|
| 132 |
+
|
| 133 |
+
There are a few potential explanations for the observed improvement. One explanation is that the estimated relevance becomes more accurate as we aggregate more log-likelihood scores of multiple relevance labels. Another is that the fine-grained relevance labels in the prompt help the LLMs to develop a more nuanced understanding of relevance. We conduct more experiments to further explore these explanations.
|
| 134 |
+
|
| 135 |
+
Number of relevance labels. We first explore the effect of using different number of relevance labels. Table 2 demonstrates that when using RG $kL$ , RG-4L performance is on par with RG-3L,
|
| 136 |
+
|
| 137 |
+
Table 3: Comparing ranking score derivation strategies measured by average NDCG@10 on BEIR data sets.
|
| 138 |
+
|
| 139 |
+
<table><tr><td>Prompts</td><td>Generated</td><td>Likelihood-ER</td><td>Likelihood-PR</td></tr><tr><td>RG-3L</td><td>0.3989</td><td>0.4992</td><td>0.5005</td></tr><tr><td>RG-4L</td><td>0.4259</td><td>0.4969</td><td>0.4934</td></tr><tr><td>RG-S(0,4)</td><td>0.4445</td><td>0.5022</td><td>0.4988</td></tr></table>
|
| 140 |
+
|
| 141 |
+
suggesting that adding more relevance levels does not always improve the performance when using textual fine-grained relevance labels.
|
| 142 |
+
|
| 143 |
+
We also plot how the performance changes with regard to $k$ for the rating scale prompting method RG-S(0,k) in Figure 2. It shows that the performance from RG-S(0,4) to RG-S(0,8) remain similar. This again suggests that using more fine-grained relevance labels does not further improve the performance. Furthermore, performance declines for even larger $k$ such as RG-S(0,9) and RG-S(0,10). This potentially indicates that LLMs struggle to understand prompts with excessive granularity (Thawani et al., 2021).
|
| 144 |
+
|
| 145 |
+
Notably, the performance trend in Figure 2 remains consistent across datasets regardless of varying granularity of ground-truth label (Appendix E). This illustrates that, in practice, the performance gains are robust to a wide range of $k$ selections.
|
| 146 |
+
|
| 147 |
+
Ranking score derivation. We compare different strategies for deriving ranking scores.
|
| 148 |
+
|
| 149 |
+
Some existing work on LLM assessors (Faggioli et al., 2023; Thomas et al., 2023) directly use the generated labels or scores without using the likelihood. Technically, we can also rank documents directly based on the labels or scores parsed from the string outputs generated by LLMs. We include this method in our comparison, denoted as "Generated".
|
| 150 |
+
|
| 151 |
+
Additionally, we compare the two strategies proposed in Section 3.3: expected relevance values
|
| 152 |
+
|
| 153 |
+

|
| 154 |
+
(a) RG-2L vs. RG-S(0,4)
|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
(b) RG-3L vs. RG-S(0,4)
|
| 158 |
+
Figure 3: Comparing ranking score distribution of different methods on the Covid data set.
|
| 159 |
+
|
| 160 |
+
(Likelihood-ER) and peak relevance likelihood (Likelihood-PR), both of which derive ranking scores from the predicted log-likelihood of LLMs.
|
| 161 |
+
|
| 162 |
+
The comparison results are presented in Table 3. It is clear that directly using the generated labels or scores results in lower ranking performance compared to deriving scores from the log-likelihood, as it tends to introduce ties between documents. On the other hand, peak relevance likelihood (Likelihood-PR) achieves very close performance to expected relevance values (Likelihood-ER) in most methods, despite only using the log-likelihood of one relevance label. This suggests that the improvement brought by scoring fine-grained relevance labels cannot be simply explained by improved accuracy of estimated relevance by using more samples. Instead, it is possible that including fine-grained relevance labels within the prompt may signal LLMs to attend to the subtle relevance differences.
|
| 163 |
+
|
| 164 |
+
Score distribution comparison. We compare the score distributions of different methods to gain deeper insight into how fine-grained relevance labels influence performance. Figure 3 presents a scatter plot of ranking scores (Likelihood-ER) from two methods for a random sample of query-document pairs in the Covid data set.
|
| 165 |
+
|
| 166 |
+
Figure 3(a) demonstrates that RG-2L's ranking scores are mostly positively correlated with RG-S(0,4)'s (Figure 3(a)), but struggles to distinguish query-document pairs with higher scores from RG-S(0,4) and scores them almost equally with scores close to 1.0. This indicates that LLMs can differentiate better among higher-ranked relevant documents with fine-grained relevance labels. In contrast, the ranking scores from RG-3L and RG-S(0,4) (Figure 3(b)) exhibit strong correlation almost throughout the entire range. Correspondingly, RG-3L and RG-S(0,4) also achieve similar ranking performance on this data set.
|
| 167 |
+
|
| 168 |
+
# 6 Conclusion
|
| 169 |
+
|
| 170 |
+
We explore pointwise zero-shot LLM rankers which score fine-grained relevance labels (e.g., "Somewhat Relevant") instead of binary labels. We propose to either provide intermediate relevance labels such as "Somewhat Relevant" as additional choices for the LLM or ask the LLM to rate the relevance between query-document pairs using a rating scale. Then we aggregate the LLM likelihood scores of different relevance labels into ranking scores to rank the documents. Further experiments illustrate that the performance gains are not solely attributable to more precise relevance estimation by using more samples, as only using the log-likelihood of one relevance labels can also achieve similar performance gain. Instead, it is possible that the inclusion of fine-grained relevance labels in the prompt may guide LLMs to better differentiate documents, especially those ranked at the top.
|
| 171 |
+
|
| 172 |
+
We believe that this approach can be extended beyond information retrieval to many other applications (Liu et al., 2023). For example, the same method can be applied for recommendation (Fan et al., 2023; Wu et al., 2023), where the LLM is asked to rate how likely a user would buy an item.
|
| 173 |
+
|
| 174 |
+
# 7 Limitations
|
| 175 |
+
|
| 176 |
+
In this work, we assume that the predicted likelihood for any generated text can be accessed. However, we are aware that this might not always be true for many proprietary LLMs where users can only call with specific APIs.
|
| 177 |
+
|
| 178 |
+
Our study is also limited to ranking performance of LLMs, without further evaluation or analysis on whether our prompts can also improve LLM assessors. Higher ranking performance does not always translate to higher relevance calibration performance (Cohen et al., 2021; Faggioli et al., 2023; Thomas et al., 2023), as the metrics have different emphasis. It is possible that one needs to apply an appropriate transformation on the derived ranking scores from LLM likelihoods to achieve the best relevance calibration performance, which can be non-trivial. We believe this is an intriguing research direction as it can further broaden the application (Bahri et al., 2020; Shtok et al., 2012) of the proposed methods.
|
| 179 |
+
|
| 180 |
+
# References
|
| 181 |
+
|
| 182 |
+
Dara Bahri, Yi Tay, Che Zheng, Donald Metzler, and Andrew Tomkins. 2020. Choppy: Cut transformer for ranked list truncation. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 1513-1516.
|
| 183 |
+
Nicholas J Birkett. 1986. Selecting the number of response categories for a likert-type scale. In Proceedings of the American statistical association, volume 1, pages 488-492.
|
| 184 |
+
Daniel Cohen, Bhaskar Mitra, Oleg Lesota, Navid Rekabsaz, and Carsten Eickhoff. 2021. Not all relevance scores are equal: Efficient uncertainty and calibration modeling for deep retrieval models. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 654-664.
|
| 185 |
+
Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2020a. Overview of the TREC 2020 deep learning track. In Proceedings of the Twenty-Ninth Text Retrieval Conference, TREC 2020, Virtual Event [Gaithersburg, Maryland, USA], November 16-20, 2020, volume 1266 of NIST Special Publication. National Institute of Standards and Technology (NIST).
|
| 186 |
+
Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M. Voorhees. 2020b. Overview of the TREC 2019 deep learning track. CoRR, abs/2003.07820.
|
| 187 |
+
Guglielmo Faggioli, Laura Dietz, Charles LA Clarke, Gianluca Demartini, Matthias Hagen, Claudia Hauff, Noriko Kando, Evangelos Kanoulas, Martin Pottast, Benno Stein, et al. 2023. Perspectives on large language models for relevance judgment. In Proceedings of the 2023 ACM SIGIR International Conference on Theory of Information Retrieval, pages 39-50.
|
| 188 |
+
Wenqi Fan, Zihuai Zhao, Jiatong Li, Yunqing Liu, Xiaowei Mei, Yiqi Wang, Jiliang Tang, and Qing Li. 2023. Recommender systems in the era of large language models (LLMs). arXiv preprint arXiv:2307.02046.
|
| 189 |
+
Google, Rohan Anil, Andrew M. Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, Eric Chu, Jonathan H. Clark, Laurent El Shafey, Yanping Huang, Kathy Meier-Hellstern, Gaurav Mishra, Erica Moreira, Mark Omernick, Kevin Robinson, Sebastian Ruder, Yi Tay, Kefan Xiao, Yuanzhong Xu, Yujing Zhang, Gustavo Hernandez Abrego, Junwhan Ahn, Jacob Austin, Paul Barham, Jan Botha, James Bradbury, Siddhartha Brahma, Kevin Brooks, Michele Catasta, Yong Cheng, Colin Cherry, Christopher A. Choquette-Choo, Aakanksha Chowdhery, Clément Crepy, Shachi Dave, Mostafa Dehghani, Sunipa Dev, Jacob Devlin, Mark Diaz, Nan Du, Ethan Dyer, Vlad
|
| 190 |
+
|
| 191 |
+
Feinberg, Fangxiaoyu Feng, Vlad Fienber, Markus Freitag, Xavier Garcia, Sebastian Gehrmann, Lucas Gonzalez, Guy Gur-Ari, Steven Hand, Hadi Hashemi, Le Hou, Joshua Howland, Andrea Hu, Jeffrey Hui, Jeremy Hurwitz, Michael Isard, Abe Ittycheriah, Matthew Jagielski, Wenhao Jia, Kathleen Kenealy, Maxim Krikun, Sneha Kudugunta, Chang Lan, Katherine Lee, Benjamin Lee, Eric Li, Music Li, Wei Li, YaGuang Li, Jian Li, Hyeontaek Lim, Hanzhao Lin, Zhongtao Liu, Frederick Liu, Marcello Maggioni, Aroma Mahendru, Joshua Maynez, Vedant Misra, Maysam Moussalem, Zachary Nado, John Nham, Eric Ni, Andrew Nystrom, Alicia Parrish, Marie Pellat, Martin Polacek, Alex Polozov, Reiner Pope, Siyuan Qiao, Emily Reif, Bryan Richter, Parker Riley, Alex Castro Ros, Aurko Roy, Brennan Saeta, Rajkumar Samuel, Renee Shelby, Ambrose Slone, Daniel Smilkov, David R. So, Daniel Sohn, Simon Tokumine, Dasha Valter, Vijay Vasudevan, Kiran Vodrahalli, Xuezhi Wang, Pidong Wang, Zirui Wang, Tao Wang, John Wieting, Yuhuai Wu, Kelvin Xu, Yunhan Xu, Linting Xue, Pengcheng Yin, Jiahui Yu, Qiao Zhang, Steven Zheng, Ce Zheng, Weikang Zhou, Denny Zhou, Slav Petrov, and Yonghui Wu. 2023. PaLM 2 technical report.
|
| 192 |
+
Shuguang Han, Xuanhui Wang, Mike Bendersky, and Marc Najork. 2020. Learning-to-rank with BERT in TF-Ranking. arXiv preprint arXiv:2004.08476.
|
| 193 |
+
Rolf Jagerman, Honglei Zhuang, Zhen Qin, Xuanhui Wang, and Michael Bendersky. 2023. Query expansion by prompting large language models. In Gen-IR @ SIGIR 2023: The First Workshop on Generative Information Retrieval.
|
| 194 |
+
Kalervo Järvelin and Jaana Kekäläinen. 2002. Cumulated gain-based evaluation of IR techniques. ACM Transactions on Information Systems, 20(4):422-446.
|
| 195 |
+
Robert J Johnston, Kevin J Boyle, Wiktor Adamowicz, Jeff Bennett, Roy Brouwer, Trudy Ann Cameron, W Michael Hanemann, Nick Hanley, Mandy Ryan, Riccardo Scarpa, et al. 2017. Contemporary guidance for stated preference studies. Journal of the Association of Environmental and Resource Economists, 4(2):319-405.
|
| 196 |
+
Minghan Li, Honglei Zhuang, Kai Hui, Zhen Qin, Jimmy Lin, Rolf Jagerman, Xuanhui Wang, and Michael Bendersky. 2024. Can query expansion improve generalization of strong cross-encoder rankers? In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval.
|
| 197 |
+
Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, et al. 2023. Holistic evaluation of language models. Transactions on Machine Learning Research.
|
| 198 |
+
|
| 199 |
+
Jimmy Lin, Xueguang Ma, Sheng-Chieh Lin, Zheng-Hong Yang, Ronak Pradeep, and Rodrigo Nogueira. 2021. Pyserini: A Python toolkit for reproducible information retrieval research with sparse and dense representations. In Proceedings of the 44th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 2356-2362.
|
| 200 |
+
Tie-Yan Liu. 2009. Learning to Rank for Information Retrieval. Now Publishers Inc.
|
| 201 |
+
Yang Liu, Dan Iter, Yichong Xu, Shuohang Wang, Ruochen Xu, and Chenguang Zhu. 2023. G-eval: Nlg evaluation using gpt-4 with better human alignment. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 2511-2522.
|
| 202 |
+
Xueguang Ma, Xinyu Zhang, Ronak Pradeep, and Jimmy Lin. 2023. Zero-shot listwise document reranking with a large language model. arXiv preprint arXiv:2305.02156.
|
| 203 |
+
Rodrigo Nogueira, Zhiying Jiang, Ronak Pradeep, and Jimmy Lin. 2020. Document ranking with a pretrained sequence-to-sequence model. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, pages 708-718.
|
| 204 |
+
Rodrigo Nogueira, Wei Yang, Kyunghyun Cho, and Jimmy Lin. 2019. Multi-stage document ranking with BERT. arXiv preprint arXiv:1910.14424.
|
| 205 |
+
OpenAI. 2023. GPT-4 technical report. arXiv preprint arXiv:2303.08774.
|
| 206 |
+
Zhen Qin, Rolf Jagerman, Kai Hui, Honglei Zhuang, Junru Wu, Jiaming Shen, Tianqi Liu, Jialu Liu, Donald Metzler, Xuanhui Wang, et al. 2024. Large language models are effective text rankers with pairwise ranking prompting. In Findings of the Association for Computational Linguistics: NAACL 2024.
|
| 207 |
+
Zhen Qin, Le Yan, Honglei Zhuang, Yi Tay, Rama Kumar Pasumarthi, Xuanhui Wang, Michael Bendersky, and Marc Najork. 2021. Are neural rankers still outperformed by gradient boosted decision trees? In International Conference on Learning Representations.
|
| 208 |
+
Noelia Rivera-Garrido, MP Ramos-Sosa, Michela Accerenzi, and Pablo Brñas-Garza. 2022. Continuous and binary sets of responses differ in the field. Scientific Reports, 12(1):14376.
|
| 209 |
+
Kevin Roitero, Eddy Maddalena, Gianluca Demartini, and Stefano Mizzaro. 2018. On fine-grained relevance scales. In Proceedings of the 41st International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 675-684.
|
| 210 |
+
Devendra Sachan, Mike Lewis, Mandar Joshi, Armen Aghajanyan, Wen-tau Yih, Joelle Pineau, and Luke Zettlemoyer. 2022. Improving passage retrieval
|
| 211 |
+
|
| 212 |
+
with zero-shot question generation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 3781-3797.
|
| 213 |
+
Anna Shtok, Oren Kurland, David Carmel, Fiona Raiber, and Gad Markovits. 2012. Predicting query performance by query-drift estimation. ACM Transactions on Information Systems (TOIS), 30(2):1-35.
|
| 214 |
+
Weiwei Sun, Lingyong Yan, Xinyu Ma, Shuaiqiang Wang, Pengjie Ren, Zhumin Chen, Dawei Yin, and Zhaochun Ren. 2023. Is ChatGPT good at search? investigating large language models as re-ranking agents. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 14918-14937.
|
| 215 |
+
Yi Tay, Mostafa Dehghani, Vinh Q Tran, Xavier Garcia, Jason Wei, Xuezhi Wang, Hyung Won Chung, Dara Bahri, Tal Schuster, Steven Zheng, et al. 2022. UL2: Unifying language learning paradigms. In The Eleventh International Conference on Learning Representations.
|
| 216 |
+
Nandan Thakur, Nils Reimers, Andreas Rückle, Abhishek Srivastava, and Iryna Gurevych. 2021. BEIR: A heterogeneous benchmark for zero-shot evaluation of information retrieval models. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2).
|
| 217 |
+
Avijit Thawani, Jay Pujara, Filip Ilievski, and Pedro Szekely. 2021. Representing numbers in NLP: a survey and a vision. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 644-656, Online. Association for Computational Linguistics.
|
| 218 |
+
Paul Thomas, Seth Spielman, Nick Craswell, and Bhaskar Mitra. 2023. Large language models can accurately predict searcher preferences. arXiv preprint arXiv:2309.10621.
|
| 219 |
+
Ellen Voorhees, Tasmeer Alam, Steven Bedrick, Dina Demner-Fushman, William R Hersh, Kyle Lo, Kirk Roberts, Ian Soboroff, and Lucy Lu Wang. 2021. Trec-covid: constructing a pandemic information retrieval test collection. In ACM SIGIR Forum, volume 54, pages 1-12. ACM New York, NY, USA.
|
| 220 |
+
Ellen M Voorhees. 2005. The trec robust retrieval track. In ACM SIGIR Forum, volume 39, pages 11-20. ACM New York, NY, USA.
|
| 221 |
+
Likang Wu, Zhi Zheng, Zhaopeng Qiu, Hao Wang, Hongchao Gu, Tingjia Shen, Chuan Qin, Chen Zhu, Hengshu Zhu, Qi Liu, Hui Xiong, and Enhong Chen. 2023. A survey on large language models for recommendation. arXiv preprint arXiv:2305.19860.
|
| 222 |
+
Ruicheng Xian, Honglei Zhuang, Zhen Qin, Hamed Zamani, Jing Lu, Ji Ma, Kai Hui, Han Zhao, Xuanhui Wang, and Michael Bendersky. 2023. Learning list-level domain-invariant representations for ranking. In Advances in Neural Information Processing Systems.
|
| 223 |
+
|
| 224 |
+
Honglei Zhuang, Zhen Qin, Shuguang Han, Xuanhui Wang, Michael Bendersky, and Marc Najork. 2021. Ensemble distillation for BERT-based ranking models. In Proceedings of the 2021 ACM SIGIR International Conference on Theory of Information Retrieval, pages 131-136.
|
| 225 |
+
|
| 226 |
+
Honglei Zhuang, Zhen Qin, Rolf Jagerman, Kai Hui, Ji Ma, Jing Lu, Jianmo Ni, Xuanhui Wang, and Michael Bendersky. 2023. RankT5: Fine-tuning T5 for text ranking with ranking losses. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 2308-2313.
|
| 227 |
+
|
| 228 |
+
Shengyao Zhuang, Honglei Zhuang, Bevan Koopman, and Guido Zuccon. 2024. A setwise approach for effective and highly efficient zero-shot ranking with large language models. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval.
|
| 229 |
+
|
| 230 |
+
# A Alternative Relevance Labels
|
| 231 |
+
|
| 232 |
+
We replace the relevance labels with other phrases to examine how the performance changes. For RG-2L, we replace "Not Relevant" with "Irrelevant"; for RG-3L, we replace "Somewhat Relevant" with "Partially Relevant".
|
| 233 |
+
|
| 234 |
+
The results are shown in Table 4. Regardless of using different textual representations of relevance labels, RG-3L consistently outperforms RG2L. This suggests that the discovery in this paper is generalizable to different choices of textual relevance labels. Another observation is that RG-2L performance varies slightly more than RG-3L performance. This might indicate that RG-3L is more robust to different wording of relevance labels.
|
| 235 |
+
|
| 236 |
+
Table 4: Comparing ranking performance with different textual relevance labels. Measured by average NDCG@10 on BEIR data sets.
|
| 237 |
+
|
| 238 |
+
<table><tr><td>Method</td><td>Relevance Labels</td><td>Average</td></tr><tr><td rowspan="2">RG-2L</td><td>“Irrelevant”, “Relevant”</td><td>0.4717</td></tr><tr><td>“Not Relevant”, “Relevant”</td><td>0.4789</td></tr><tr><td rowspan="2">RG-3L</td><td>“Not Relevant”, “Partially Relevant”, “Highly Relevant”</td><td>0.4975</td></tr><tr><td>“Not Relevant”, “Somewhat Relevant”, “Highly Relevant”</td><td>0.4992</td></tr></table>
|
| 239 |
+
|
| 240 |
+
We also experiment with different rating scale formulation. Instead of prompting the LLM to rate the relevance from 0 to $k$ , we also try to ask the LLM to rate the relevance from 1 to $k$ , denoted as RG-S(1, k). We plot the average NDCG@10 performance in Figure 4.
|
| 241 |
+
|
| 242 |
+
The performance of both methods do not differ much when $k$ is larger than 4. But not providing the "0" option substantially hurt the performance when $k$ is lower than or equal to 3. This might also suggest that using the rating scale from 0 to $k$ is slightly more robust.
|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
Figure 4: Comparing rating scale relevance generation with different prompts.
|
| 246 |
+
|
| 247 |
+
# B In-Depth Score Distribution
|
| 248 |
+
|
| 249 |
+
We plot the in-depth score distribution of our methods. Specifically, we group the query-document pairs in Covid data set by different ground-truth relevance. We then denote $p_k$ as the random variable of the marginal probability $p_{i,k}$ derived for different query-document pairs $(q, d_i)$ . We the plot the estimated distribution of $p_k$ for each relevance label $l_k$ respectively. Figure 5 and 6 shows the results on Covid data set when we use RG-S(0,4) and RG-4L respectively. The ground-truth relevance of Covid data set is 0, 1 or 2.
|
| 250 |
+
|
| 251 |
+
In Figure 5, we observe that the distributions of marginal probability $p_k$ of relevance label "0", "1" and "2" shift down towards 0 as the ground-truth relevance increases. Meanwhile, the distributions of $p_k$ across relevance label "3" and "4" shift up towards 1. In Figure 6, we found a similar trend where the distributions of marginal probability $p_k$ of "Not Relevant" and "Somewhat Relevant" shift down towards 0 as the ground-truth relevance increases, while the distributions of $p_k$ across "Highly Relevant" and "Perfectly Relevant" shift up towards 1. This reveals how our expected relevance values (ER) methods works in practice, and also given us hints on how peak relevance likelihood (PR) alone works based on the distribution shift of the peak relevance label.
|
| 252 |
+
|
| 253 |
+
# C Varying Assigned Relevance Values
|
| 254 |
+
|
| 255 |
+
We also investigate how the user provided relevance values $y_{k}$ 's make a difference to the ranking performance. We use RG-3L as the example. We fix $y_{0} = 0$ for "Not Relevant" and $y_{2} = 2$ for "Highly Relevant", but vary the relevance value $y_{1}$ for "Somewhat Relevant" between $y_{0}$ and $y_{2}$ . We evaluate the average NDCG@10 on the 8 BEIR data sets and presents the results in Table 5.
|
| 256 |
+
|
| 257 |
+
As $y_{1}$ varies, the average NDCG@10 does not change substantially when $y_{1}$ decreases. Even when $y_{1} = y_{0}$ , the NDCG@10 performance remains high. This is expected as NDCG@10 focuses on the top-ranked items, thus changing the relevance values of intermediate relevance labels may not significantly change the top-ranked items.
|
| 258 |
+
|
| 259 |
+
In contrast, when $y_{1} = y_{2}$ , the performance drops significantly to about the same level as RG-2L. This might indirectly explain why RG-2L performance is worse than RG-3L, as it might not be able to distinguish partially relevant and highly relevant documents.
|
| 260 |
+
|
| 261 |
+
Table 5: Comparing ranking performance with different relevance values $y_{k}$ 's. Measured by average NDCG@10 on BEIR data sets.
|
| 262 |
+
|
| 263 |
+
<table><tr><td>Method</td><td>[y0,y1,y2]</td><td>Average</td></tr><tr><td>RG-3L</td><td>[0.00, 0.00, 2.00]</td><td>0.5000</td></tr><tr><td>RG-3L</td><td>[0.00, 0.50, 2.00]</td><td>0.5000</td></tr><tr><td>RG-3L</td><td>[0.00, 1.00, 2.00]</td><td>0.4992</td></tr><tr><td>RG-3L</td><td>[0.00, 1.50, 2.00]</td><td>0.4990</td></tr><tr><td>RG-3L</td><td>[0.00, 2.00, 2.00]</td><td>0.4779</td></tr></table>
|
| 264 |
+
|
| 265 |
+
# D Experiments on Other LLMs
|
| 266 |
+
|
| 267 |
+
To verify the generalizability of our proposed method, we also conduct experiments on two other LLMs. We use FLAN PaLM2 XS, which is a smaller alternative of FLAN PaLM2 S. We also use FLAN UL2 (Tay et al., 2022), which is an open-sourced LLM with 20B parameters. The results are presented in Table 6. We observe similar results where scoring fine-grained relevance labels (RG-3L, RG-S(0,4)) can achieve better average performance than scoring binary labels (RG-2L). This shows that our method can generalize to different LLMs.
|
| 268 |
+
|
| 269 |
+
# E More Comparison Results
|
| 270 |
+
|
| 271 |
+
We also include a more thorough comparison with other methods including:
|
| 272 |
+
|
| 273 |
+
- BM25. The base retriever performance.
|
| 274 |
+
- monoT5 (Nogueira et al., 2020). A T5 XL model fine-tuned on MS MARCO data set for text ranking task and applied directly on the BEIR data sets.
|
| 275 |
+
- RankT5 (Zhuang et al., 2023). An encoder-only model initialized with T5 XL but finetuned on MS MARCO data set using listwise softmax cross-entropy ranking loss and applied directly on the BEIR data sets.
|
| 276 |
+
- Pairwise Ranking Prompts (PRP) (Qin et al., 2024). A zero-shot pairwise LLM ranker which takes a query and two documents as input, and outputs which one is more relevant to the query. We include the best results of PRP which uses UL2 as the LLM and a sliding window strategy.
|
| 277 |
+
- RankGPT (Sun et al., 2023). A zero-shot list-wise LLM ranker which takes a query and a list of documents as input, and outputs an
|
| 278 |
+
|
| 279 |
+

|
| 280 |
+
Figure 5: Distribution of marginal probability $p_k$ of each relevance label in RG-S(0,4) for query-document pairs with different ground-truth labels on Covid data set
|
| 281 |
+
|
| 282 |
+

|
| 283 |
+
|
| 284 |
+

|
| 285 |
+
|
| 286 |
+

|
| 287 |
+
Figure 6: Distribution of marginal probability $p_k$ of each relevance label in RG-4L for query-document pairs with different ground-truth labels on Covid data set
|
| 288 |
+
|
| 289 |
+

|
| 290 |
+
|
| 291 |
+

|
| 292 |
+
|
| 293 |
+
Table 6: Overall ranking performances of FLAN PaLM2 XS and FLAN UL2 measured by NDCG@10 on BEIR data sets. The best performances are bolded.
|
| 294 |
+
|
| 295 |
+
<table><tr><td>Model</td><td>Method</td><td>Covid</td><td>Touche</td><td>DBPedia</td><td>SciFact</td><td>Signal</td><td>News</td><td>Robust04</td><td>NFCorpus</td><td>Average</td></tr><tr><td rowspan="5">FLAN PaLM2 XS</td><td>RG-2L</td><td>0.7769</td><td>0.2549</td><td>0.4228</td><td>0.6826</td><td>0.2892</td><td>0.4229</td><td>0.4947</td><td>0.3756</td><td>0.4649</td></tr><tr><td>RG-3L</td><td>0.7936</td><td>0.2554</td><td>0.4235</td><td>0.6810</td><td>0.2931</td><td>0.4374</td><td>0.4933</td><td>0.3777</td><td>0.4694</td></tr><tr><td>RG-4L</td><td>0.7969</td><td>0.2598</td><td>0.4277</td><td>0.6681</td><td>0.3004</td><td>0.4326</td><td>0.4772</td><td>0.3773</td><td>0.4675</td></tr><tr><td>RG-S(0,2)</td><td>0.7819</td><td>0.2535</td><td>0.4141</td><td>0.7135</td><td>0.2791</td><td>0.4356</td><td>0.4579</td><td>0.3711</td><td>0.4633</td></tr><tr><td>RG-S(0,4)</td><td>0.8119</td><td>0.2885</td><td>0.4386</td><td>0.7102</td><td>0.3097</td><td>0.4341</td><td>0.4559</td><td>0.3763</td><td>0.4781</td></tr><tr><td rowspan="5">FLAN UL2</td><td>RG-2L</td><td>0.7769</td><td>0.2737</td><td>0.4047</td><td>0.5626</td><td>0.2822</td><td>0.4573</td><td>0.5421</td><td>0.3756</td><td>0.4594</td></tr><tr><td>RG-3L</td><td>0.7998</td><td>0.2555</td><td>0.4303</td><td>0.7007</td><td>0.2928</td><td>0.4698</td><td>0.5582</td><td>0.3757</td><td>0.4853</td></tr><tr><td>RG-4L</td><td>0.8030</td><td>0.2477</td><td>0.4336</td><td>0.7186</td><td>0.3047</td><td>0.4710</td><td>0.5575</td><td>0.3775</td><td>0.4892</td></tr><tr><td>RG-S(0,2)</td><td>0.7915</td><td>0.2546</td><td>0.4252</td><td>0.7341</td><td>0.2997</td><td>0.4700</td><td>0.5497</td><td>0.3702</td><td>0.4869</td></tr><tr><td>RG-S(0,4)</td><td>0.7969</td><td>0.2641</td><td>0.4325</td><td>0.7391</td><td>0.3129</td><td>0.4557</td><td>0.5454</td><td>0.3708</td><td>0.4897</td></tr></table>
|
| 296 |
+
|
| 297 |
+
ordered list of documents based on their relevance. The method is used jointly with a sliding window strategy. We do not include the GPT-4 reranking number as it involves a second-stage ranking.
|
| 298 |
+
|
| 299 |
+
We also include the detailed results of our proposed methods with different $k$ values, and different strategies to derive ranking scores. Table 7 illustrates the results.
|
| 300 |
+
|
| 301 |
+
It is not surprising that our methods perform slightly worse than monoT5 or RankT5 as they are fine-tuned for the text ranking task on MS MARCO
|
| 302 |
+
|
| 303 |
+
data set. However, it is encouraging to see our prompting method substantially shrinks the gap between zero-shot LLM rankers and RankT5.
|
| 304 |
+
|
| 305 |
+
Our methods can also perform slightly better than the single-stage RankGPT. However, note that the LLM used in these experiments are different, so the difference might also be explained by the model difference.
|
| 306 |
+
|
| 307 |
+
Figure 7 also plots the performance of rating scale methods ranking score derivation methods. It can be observed that the ranking performance of using PR to derive ranking scores is more sensitive to the selection of $k$ than using ER.
|
| 308 |
+
|
| 309 |
+
Table 7: Overall ranking performances measured by NDCG@10 on BEIR data sets.
|
| 310 |
+
|
| 311 |
+
<table><tr><td>Method</td><td>Model</td><td>Covid</td><td>Touche</td><td>DBPedia</td><td>SciFact</td><td>Signal</td><td>News</td><td>Robust04</td><td>NFCorpus</td><td>Average</td></tr><tr><td>BM25</td><td>N/A</td><td>0.5947</td><td>0.4422</td><td>0.3180</td><td>0.6789</td><td>0.3305</td><td>0.3952</td><td>0.4070</td><td>0.3075</td><td>0.4342</td></tr><tr><td>QG</td><td>FLAN PaLM2 S</td><td>0.7357</td><td>0.2408</td><td>0.3773</td><td>0.7495</td><td>0.2872</td><td>0.4156</td><td>0.4651</td><td>0.3673</td><td>0.4548</td></tr><tr><td>RG-YN</td><td>FLAN PaLM2 S</td><td>0.7897</td><td>0.2427</td><td>0.3696</td><td>0.6958</td><td>0.3196</td><td>0.4588</td><td>0.5656</td><td>0.3743</td><td>0.4770</td></tr><tr><td>RG-2L-ER</td><td>FLAN PaLM2 S</td><td>0.7949</td><td>0.2411</td><td>0.3590</td><td>0.7290</td><td>0.2996</td><td>0.4623</td><td>0.5636</td><td>0.3814</td><td>0.4789</td></tr><tr><td>RG-3L-ER</td><td>FLAN PaLM2 S</td><td>0.8065</td><td>0.2650</td><td>0.4013</td><td>0.7671</td><td>0.3142</td><td>0.4890</td><td>0.5660</td><td>0.3849</td><td>0.4992</td></tr><tr><td>RG-4L-ER</td><td>FLAN PaLM2 S</td><td>0.8063</td><td>0.2388</td><td>0.4033</td><td>0.7766</td><td>0.3184</td><td>0.4884</td><td>0.5635</td><td>0.3801</td><td>0.4969</td></tr><tr><td>RG-2L-PR</td><td>FLAN PaLM2 S</td><td>0.7874</td><td>0.2482</td><td>0.3435</td><td>0.7230</td><td>0.2819</td><td>0.4619</td><td>0.5647</td><td>0.3706</td><td>0.4726</td></tr><tr><td>RG-3L-PR</td><td>FLAN PaLM2 S</td><td>0.8065</td><td>0.2634</td><td>0.4032</td><td>0.7745</td><td>0.3202</td><td>0.4816</td><td>0.5681</td><td>0.3860</td><td>0.5005</td></tr><tr><td>RG-4L-PR</td><td>FLAN PaLM2 S</td><td>0.8076</td><td>0.2354</td><td>0.4050</td><td>0.7772</td><td>0.3121</td><td>0.4712</td><td>0.5561</td><td>0.3824</td><td>0.4934</td></tr><tr><td>RG-S(0, 2)-ER</td><td>FLAN PaLM2 S</td><td>0.7760</td><td>0.2695</td><td>0.3709</td><td>0.6921</td><td>0.3034</td><td>0.4677</td><td>0.5557</td><td>0.3787</td><td>0.4768</td></tr><tr><td>RG-S(0, 3)-ER</td><td>FLAN PaLM2 S</td><td>0.7936</td><td>0.2720</td><td>0.4092</td><td>0.7434</td><td>0.3240</td><td>0.4817</td><td>0.5662</td><td>0.3868</td><td>0.4971</td></tr><tr><td>RG-S(0, 4)-ER</td><td>FLAN PaLM2 S</td><td>0.8048</td><td>0.2757</td><td>0.4190</td><td>0.7521</td><td>0.3301</td><td>0.4790</td><td>0.5668</td><td>0.3901</td><td>0.5022</td></tr><tr><td>RG-S(0, 5)-ER</td><td>FLAN PaLM2 S</td><td>0.8088</td><td>0.2702</td><td>0.4217</td><td>0.7475</td><td>0.3266</td><td>0.4734</td><td>0.5666</td><td>0.3871</td><td>0.5002</td></tr><tr><td>RG-S(0, 6)-ER</td><td>FLAN PaLM2 S</td><td>0.7898</td><td>0.2720</td><td>0.4260</td><td>0.7529</td><td>0.3288</td><td>0.4734</td><td>0.5687</td><td>0.3864</td><td>0.4997</td></tr><tr><td>RG-S(0, 7)-ER</td><td>FLAN PaLM2 S</td><td>0.7873</td><td>0.2695</td><td>0.4225</td><td>0.7557</td><td>0.3263</td><td>0.4848</td><td>0.5659</td><td>0.3831</td><td>0.4994</td></tr><tr><td>RG-S(0, 8)-ER</td><td>FLAN PaLM2 S</td><td>0.7971</td><td>0.2730</td><td>0.4254</td><td>0.7463</td><td>0.3239</td><td>0.4722</td><td>0.5647</td><td>0.3853</td><td>0.4985</td></tr><tr><td>RG-S(0, 9)-ER</td><td>FLAN PaLM2 S</td><td>0.7910</td><td>0.2746</td><td>0.4160</td><td>0.7465</td><td>0.3017</td><td>0.4679</td><td>0.5644</td><td>0.3871</td><td>0.4936</td></tr><tr><td>RG-S(0, 10)-ER</td><td>FLAN PaLM2 S</td><td>0.7576</td><td>0.2496</td><td>0.3738</td><td>0.7310</td><td>0.2771</td><td>0.4779</td><td>0.5642</td><td>0.3655</td><td>0.4746</td></tr><tr><td>RG-S(0, 2)-PR</td><td>FLAN PaLM2 S</td><td>0.7821</td><td>0.2735</td><td>0.3469</td><td>0.6954</td><td>0.2597</td><td>0.4540</td><td>0.5409</td><td>0.3752</td><td>0.4659</td></tr><tr><td>RG-S(0, 4)-PR</td><td>FLAN PaLM2 S</td><td>0.8036</td><td>0.2785</td><td>0.4221</td><td>0.7625</td><td>0.3168</td><td>0.4623</td><td>0.5559</td><td>0.3886</td><td>0.4988</td></tr><tr><td>monoT5</td><td>Fine-tuned T5 XL</td><td>0.8071</td><td>0.3241</td><td>0.4445</td><td>0.7657</td><td>0.3255</td><td>0.4849</td><td>0.5671</td><td>0.3897</td><td>0.5136</td></tr><tr><td>RankT5</td><td>Fine-tuned T5 XL</td><td>0.8200</td><td>0.3762</td><td>0.4419</td><td>0.7686</td><td>0.3180</td><td>0.4815</td><td>0.5276</td><td>0.3860</td><td>0.5150</td></tr><tr><td>RankGPT</td><td>GPT-3.5 Turbo</td><td>0.7667</td><td>0.3618</td><td>0.4447</td><td>0.7043</td><td>0.3212</td><td>0.4885</td><td>0.5062</td><td>0.3562</td><td>0.4937</td></tr><tr><td>PRP</td><td>UL2</td><td>0.7945</td><td>0.3789</td><td>0.4647</td><td>0.7333</td><td>0.3520</td><td>0.4911</td><td>0.5343</td><td>N/A</td><td>N/A</td></tr></table>
|
| 312 |
+
|
| 313 |
+

|
| 314 |
+
Figure 7: Comparing rating scale relevance generation with different strategies to derive ranking scores.
|
| 315 |
+
|
| 316 |
+
Table 8: Comparing ranking performance instruction and in-context learning. Measured by average NDCG@10 on BEIR data sets.
|
| 317 |
+
|
| 318 |
+
<table><tr><td>Method</td><td>Average</td></tr><tr><td>RG-2L</td><td>0.4789</td></tr><tr><td>+ Instructions</td><td>0.4914</td></tr><tr><td>+ Instructions + 4-shot ICL</td><td>0.4914</td></tr><tr><td>RG-3L</td><td>0.4992</td></tr><tr><td>+ Instructions</td><td>0.5034</td></tr><tr><td>+ Instructions + 4-shot ICL</td><td>0.5046</td></tr></table>
|
| 319 |
+
|
| 320 |
+
# F Instructions and In-Context Learning
|
| 321 |
+
|
| 322 |
+
We also try adding instructions and few-shot exemplars into the prompt. For instructions, we directly add the definition of the relevance labels into the prompt. The relevance label definitions are di
|
| 323 |
+
|
| 324 |
+
rectly copied from TREC-DL 2020 (Craswell et al., 2020a). For RG-2L instructions we use the "Irrelevant" and "Relevant" labels; for RG-3L instructions we use the "Irrelevant", "Relevant" and "Highly Relevant" labels. We also change the relevance labels accordingly to align with the instructions.
|
| 325 |
+
|
| 326 |
+
In addition to instructions, we also try to include few-shot exemplars to leverage the model's incontext learning capabilities. We include 4-shot exemplars, which are randomly sampled from TRECDL 2020 data sets. We sampled 2 "Irrelevant", 1 "Relevant" and 1 "Perfectly Relevant" query-document pairs. To align with the instructions, for RG-2L we label both "Relevant" and "Perfectly Relevant" exemplar query-document pairs as "Relevant"; for RG-3L we label the "Perfectly Relevant" pair as "Highly Relevant".
|
| 327 |
+
|
| 328 |
+
The results are shown in Table 8. Adding instructions improves both RG-2L and RG-3L, while RG-3L still remains $+1.2\%$ better than RG-2L. Further adding exemplars on top of the instructions does not improve much, possibly due to the distribution discrepancy between TREC-DL and BEIR.
|
| 329 |
+
|
| 330 |
+
# G Prompts
|
| 331 |
+
|
| 332 |
+
In this section, we provide the prompts we used for each method:
|
| 333 |
+
|
| 334 |
+
# G.1 Query Generation (QG)
|
| 335 |
+
|
| 336 |
+
We use the following prompt for our QG experiments. We find this prompt performs better empirically for zero-shot QG LLM rankers than the prompt used in existing works (Sachan et al., 2022).
|
| 337 |
+
|
| 338 |
+
I will check whether what you said could answer my question.
|
| 339 |
+
|
| 340 |
+
You said: {document}
|
| 341 |
+
|
| 342 |
+
I googled: {query}
|
| 343 |
+
|
| 344 |
+
# G.2 Binary Relevance Generation (RG-YN)
|
| 345 |
+
|
| 346 |
+
We use the following prompt for our RG-YN experiments. We find this prompt performs better empirically than the prompt used originally by Liang et al. (2023), Sun et al. (2023) and Qin et al. (2024).
|
| 347 |
+
|
| 348 |
+
For the following query and document, judge whether they are relevant. Output "Yes" or "No".
|
| 349 |
+
|
| 350 |
+
Query: {query}
|
| 351 |
+
|
| 352 |
+
Document: {document}
|
| 353 |
+
|
| 354 |
+
Output:
|
| 355 |
+
|
| 356 |
+
# G.3 2-Level Relevance Generation (RG-2L)
|
| 357 |
+
|
| 358 |
+
For the following query and document, judge whether they are "Relevant", or "Not Relevant".
|
| 359 |
+
|
| 360 |
+
Query: {query}
|
| 361 |
+
|
| 362 |
+
Document: {document}
|
| 363 |
+
|
| 364 |
+
Output:
|
| 365 |
+
|
| 366 |
+
# G.4 3-Level Relevance Generation (RG-3L)
|
| 367 |
+
|
| 368 |
+
For the following query and document, judge whether they are "Highly Relevant", "Somewhat Relevant", or "Not Relevant".
|
| 369 |
+
|
| 370 |
+
Query: {query}
|
| 371 |
+
|
| 372 |
+
Document: {document}
|
| 373 |
+
|
| 374 |
+
Output:
|
| 375 |
+
|
| 376 |
+
# G.5 4-Level Relevance Generation (RG-4L)
|
| 377 |
+
|
| 378 |
+
For the following query and document, judge whether they are "Perfectly Relevant", "Highly Relevant", "Somewhat Relevant", or "Not Relevant".
|
| 379 |
+
|
| 380 |
+
Query: {query}
|
| 381 |
+
|
| 382 |
+
Document: {document}
|
| 383 |
+
|
| 384 |
+
Output:
|
| 385 |
+
|
| 386 |
+
# G.6 Rating Scale Relevance Generation (RG-S(0, k))
|
| 387 |
+
|
| 388 |
+
From a scale of 0 to $\{\mathbf{k}\}$ , judge the relevance between the query and the document.
|
| 389 |
+
|
| 390 |
+
Query: {query}
|
| 391 |
+
|
| 392 |
+
Document: {document}
|
| 393 |
+
|
| 394 |
+
Output:
|
beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a6e73f3e5e735a192a4dbe4f5fc5a34275079f8f1b87eaa5eb649b2dea1d8ad6
|
| 3 |
+
size 694635
|
beyondyesandnoimprovingzeroshotllmrankersviascoringfinegrainedrelevancelabels/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c6cfd2b90a19f223eebc14619e58d8e9711019d38ff92c5f640e3f21ae8c1c2f
|
| 3 |
+
size 419341
|
breakingthelanguagebarriercandirectinferenceoutperformpretranslationinmultilingualllmapplications/22394956-5f70-4322-9984-6d8e991f143c_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d205b53e2aa6ce751392ee53eacb40578d23e4097edd1d2c6271ffbecac89300
|
| 3 |
+
size 92333
|
breakingthelanguagebarriercandirectinferenceoutperformpretranslationinmultilingualllmapplications/22394956-5f70-4322-9984-6d8e991f143c_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9af7c19fd1f38995af54c4366f889ca620b9a81f4d4aaf3e2e6e67f1ad8be47d
|
| 3 |
+
size 105373
|