Add Batch 6f5d0319-af05-46c6-b0b6-307d21b71751 data
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +64 -0
- 2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/502ba00b-aedc-401c-9087-9dfb7448dbe0_content_list.json +0 -0
- 2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/502ba00b-aedc-401c-9087-9dfb7448dbe0_model.json +0 -0
- 2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/502ba00b-aedc-401c-9087-9dfb7448dbe0_origin.pdf +3 -0
- 2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/full.md +394 -0
- 2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/images.zip +3 -0
- 2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/layout.json +0 -0
- 2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/b5a9fb0d-d326-47a6-aba0-d08593e3056d_content_list.json +0 -0
- 2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/b5a9fb0d-d326-47a6-aba0-d08593e3056d_model.json +0 -0
- 2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/b5a9fb0d-d326-47a6-aba0-d08593e3056d_origin.pdf +3 -0
- 2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/full.md +486 -0
- 2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/images.zip +3 -0
- 2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/layout.json +0 -0
- 2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/1de0907b-7695-457e-9022-fcf3d0255480_content_list.json +0 -0
- 2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/1de0907b-7695-457e-9022-fcf3d0255480_model.json +0 -0
- 2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/1de0907b-7695-457e-9022-fcf3d0255480_origin.pdf +3 -0
- 2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/full.md +475 -0
- 2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/images.zip +3 -0
- 2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/layout.json +0 -0
- 2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/c15622fd-9732-4c6f-a455-96733517d658_content_list.json +1821 -0
- 2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/c15622fd-9732-4c6f-a455-96733517d658_model.json +0 -0
- 2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/c15622fd-9732-4c6f-a455-96733517d658_origin.pdf +3 -0
- 2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/full.md +320 -0
- 2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/images.zip +3 -0
- 2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/layout.json +0 -0
- 2023/Towards Faithful Dialogues via Focus Learning/657e0968-cdfa-4f01-81a8-cef1a90aabec_content_list.json +2331 -0
- 2023/Towards Faithful Dialogues via Focus Learning/657e0968-cdfa-4f01-81a8-cef1a90aabec_model.json +0 -0
- 2023/Towards Faithful Dialogues via Focus Learning/657e0968-cdfa-4f01-81a8-cef1a90aabec_origin.pdf +3 -0
- 2023/Towards Faithful Dialogues via Focus Learning/full.md +429 -0
- 2023/Towards Faithful Dialogues via Focus Learning/images.zip +3 -0
- 2023/Towards Faithful Dialogues via Focus Learning/layout.json +0 -0
- 2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/46a16516-75c3-4923-9a1f-8ba83ee5b355_content_list.json +0 -0
- 2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/46a16516-75c3-4923-9a1f-8ba83ee5b355_model.json +0 -0
- 2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/46a16516-75c3-4923-9a1f-8ba83ee5b355_origin.pdf +3 -0
- 2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/full.md +489 -0
- 2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/images.zip +3 -0
- 2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/layout.json +0 -0
- 2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/c7e93c62-eea3-4f99-ab1c-c34da618031b_content_list.json +0 -0
- 2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/c7e93c62-eea3-4f99-ab1c-c34da618031b_model.json +0 -0
- 2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/c7e93c62-eea3-4f99-ab1c-c34da618031b_origin.pdf +3 -0
- 2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/full.md +389 -0
- 2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/images.zip +3 -0
- 2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/layout.json +0 -0
- 2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/e0a7a239-b27a-4248-b7cc-b08e9f53d50c_content_list.json +0 -0
- 2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/e0a7a239-b27a-4248-b7cc-b08e9f53d50c_model.json +0 -0
- 2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/e0a7a239-b27a-4248-b7cc-b08e9f53d50c_origin.pdf +3 -0
- 2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/full.md +0 -0
- 2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/images.zip +3 -0
- 2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/layout.json +0 -0
- 2023/Towards Open-World Product Attribute Mining_ A Lightly-Supervised Approach/fe9cab5a-1dad-4066-bceb-65ab3c7270b6_content_list.json +0 -0
.gitattributes
CHANGED
|
@@ -6755,3 +6755,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 6755 |
2023/Topic-Guided[[:space:]]Sampling[[:space:]]For[[:space:]]Data-Efficient[[:space:]]Multi-Domain[[:space:]]Stance[[:space:]]Detection/48f2a85c-41f6-4e5d-abc8-87001e1f4f54_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6756 |
2023/Toward[[:space:]]Human-Like[[:space:]]Evaluation[[:space:]]for[[:space:]]Natural[[:space:]]Language[[:space:]]Generation[[:space:]]with[[:space:]]Error[[:space:]]Analysis/9caf9eb5-7c1d-467a-b79f-fda459a235e9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6757 |
2023/Toward[[:space:]]Interactive[[:space:]]Dictation/b10fcc8b-bc8b-417e-ae05-15dbe25c86ce_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6755 |
2023/Topic-Guided[[:space:]]Sampling[[:space:]]For[[:space:]]Data-Efficient[[:space:]]Multi-Domain[[:space:]]Stance[[:space:]]Detection/48f2a85c-41f6-4e5d-abc8-87001e1f4f54_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6756 |
2023/Toward[[:space:]]Human-Like[[:space:]]Evaluation[[:space:]]for[[:space:]]Natural[[:space:]]Language[[:space:]]Generation[[:space:]]with[[:space:]]Error[[:space:]]Analysis/9caf9eb5-7c1d-467a-b79f-fda459a235e9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6757 |
2023/Toward[[:space:]]Interactive[[:space:]]Dictation/b10fcc8b-bc8b-417e-ae05-15dbe25c86ce_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6758 |
+
2023/Towards[[:space:]]Benchmarking[[:space:]]and[[:space:]]Improving[[:space:]]the[[:space:]]Temporal[[:space:]]Reasoning[[:space:]]Capability[[:space:]]of[[:space:]]Large[[:space:]]Language[[:space:]]Models/502ba00b-aedc-401c-9087-9dfb7448dbe0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6759 |
+
2023/Towards[[:space:]]Better[[:space:]]Entity[[:space:]]Linking[[:space:]]with[[:space:]]Multi-View[[:space:]]Enhanced[[:space:]]Distillation/b5a9fb0d-d326-47a6-aba0-d08593e3056d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6760 |
+
2023/Towards[[:space:]]Boosting[[:space:]]the[[:space:]]Open-Domain[[:space:]]Chatbot[[:space:]]with[[:space:]]Human[[:space:]]Feedback/1de0907b-7695-457e-9022-fcf3d0255480_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6761 |
+
2023/Towards[[:space:]]Domain-Agnostic[[:space:]]and[[:space:]]Domain-Adaptive[[:space:]]Dementia[[:space:]]Detection[[:space:]]from[[:space:]]Spoken[[:space:]]Language/c15622fd-9732-4c6f-a455-96733517d658_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6762 |
+
2023/Towards[[:space:]]Faithful[[:space:]]Dialogues[[:space:]]via[[:space:]]Focus[[:space:]]Learning/657e0968-cdfa-4f01-81a8-cef1a90aabec_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6763 |
+
2023/Towards[[:space:]]Higher[[:space:]]Pareto[[:space:]]Frontier[[:space:]]in[[:space:]]Multilingual[[:space:]]Machine[[:space:]]Translation/46a16516-75c3-4923-9a1f-8ba83ee5b355_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6764 |
+
2023/Towards[[:space:]]Identifying[[:space:]]Fine-Grained[[:space:]]Depression[[:space:]]Symptoms[[:space:]]from[[:space:]]Memes/c7e93c62-eea3-4f99-ab1c-c34da618031b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6765 |
+
2023/Towards[[:space:]]Leaving[[:space:]]No[[:space:]]Indic[[:space:]]Language[[:space:]]Behind_[[:space:]]Building[[:space:]]Monolingual[[:space:]]Corpora,[[:space:]]Benchmark[[:space:]]and[[:space:]]Models[[:space:]]for[[:space:]]Indic[[:space:]]Languages/e0a7a239-b27a-4248-b7cc-b08e9f53d50c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6766 |
+
2023/Towards[[:space:]]Open-World[[:space:]]Product[[:space:]]Attribute[[:space:]]Mining_[[:space:]]A[[:space:]]Lightly-Supervised[[:space:]]Approach/fe9cab5a-1dad-4066-bceb-65ab3c7270b6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6767 |
+
2023/Towards[[:space:]]Robust[[:space:]]Low-Resource[[:space:]]Fine-Tuning[[:space:]]with[[:space:]]Multi-View[[:space:]]Compressed[[:space:]]Representations/fa875e15-8722-4135-9f22-c8e4d559def7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6768 |
+
2023/Towards[[:space:]]Stable[[:space:]]Natural[[:space:]]Language[[:space:]]Understanding[[:space:]]via[[:space:]]Information[[:space:]]Entropy[[:space:]]Guided[[:space:]]Debiasing/e75bfb25-7de7-4ebc-97b9-220e652bb1f1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6769 |
+
2023/Towards[[:space:]]Understanding[[:space:]]Chain-of-Thought[[:space:]]Prompting_[[:space:]]An[[:space:]]Empirical[[:space:]]Study[[:space:]]of[[:space:]]What[[:space:]]Matters/bf11d70e-9f67-421a-9611-4b29831e8132_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6770 |
+
2023/Towards[[:space:]]Understanding[[:space:]]Omission[[:space:]]in[[:space:]]Dialogue[[:space:]]Summarization/c48ea565-174e-4317-a6fa-c84d06aeb299_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6771 |
+
2023/Towards[[:space:]]Understanding[[:space:]]and[[:space:]]Improving[[:space:]]Knowledge[[:space:]]Distillation[[:space:]]for[[:space:]]Neural[[:space:]]Machine[[:space:]]Translation/77d251b4-1208-4675-8472-ec0552359309_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6772 |
+
2023/Towards[[:space:]]Unifying[[:space:]]Multi-Lingual[[:space:]]and[[:space:]]Cross-Lingual[[:space:]]Summarization/072575cb-ddb9-488f-91bb-9da37d1c7bd5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6773 |
+
2023/Towards[[:space:]]Zero-Shot[[:space:]]Multilingual[[:space:]]Transfer[[:space:]]for[[:space:]]Code-Switched[[:space:]]Responses/eea21a2c-6f67-49c6-a81b-3f4152f5fdab_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6774 |
+
2023/Towards[[:space:]]a[[:space:]]Common[[:space:]]Understanding[[:space:]]of[[:space:]]Contributing[[:space:]]Factors[[:space:]]for[[:space:]]Cross-Lingual[[:space:]]Transfer[[:space:]]in[[:space:]]Multilingual[[:space:]]Language[[:space:]]Models_[[:space:]]A[[:space:]]Review/4c0d9924-2f6f-4bb1-a79a-3466b3fa5498_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6775 |
+
2023/Towards[[:space:]]standardizing[[:space:]]Korean[[:space:]]Grammatical[[:space:]]Error[[:space:]]Correction_[[:space:]]Datasets[[:space:]]and[[:space:]]Annotation/9f1a8335-cdf0-46a5-91b0-a028b821bb4f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6776 |
+
2023/Training[[:space:]]Models[[:space:]]to[[:space:]]Generate,[[:space:]]Recognize,[[:space:]]and[[:space:]]Reframe[[:space:]]Unhelpful[[:space:]]Thoughts/05270dbd-6ba1-4bb3-829f-8a19d48f76b0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6777 |
+
2023/Training[[:space:]]Trajectories[[:space:]]of[[:space:]]Language[[:space:]]Models[[:space:]]Across[[:space:]]Scales/3ed612d1-562a-4dda-9e32-8c2c23622bea_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6778 |
+
2023/Training-free[[:space:]]Neural[[:space:]]Architecture[[:space:]]Search[[:space:]]for[[:space:]]RNNs[[:space:]]and[[:space:]]Transformers/ae2338ee-485f-4496-ab72-b3a7657281d8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6779 |
+
2023/Transfer[[:space:]]and[[:space:]]Active[[:space:]]Learning[[:space:]]for[[:space:]]Dissonance[[:space:]]Detection_[[:space:]]Addressing[[:space:]]the[[:space:]]Rare-Class[[:space:]]Challenge/2fb3387e-9571-4f82-9d21-072494f83dd4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6780 |
+
2023/Transforming[[:space:]]Visual[[:space:]]Scene[[:space:]]Graphs[[:space:]]to[[:space:]]Image[[:space:]]Captions/c895e15e-57e7-4678-95ea-168adace1420_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6781 |
+
2023/Translation-Enhanced[[:space:]]Multilingual[[:space:]]Text-to-Image[[:space:]]Generation/46699d01-3ea6-43c3-bcde-283aca414429_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6782 |
+
2023/Tree-Based[[:space:]]Representation[[:space:]]and[[:space:]]Generation[[:space:]]of[[:space:]]Natural[[:space:]]and[[:space:]]Mathematical[[:space:]]Language/c38c3858-26aa-4703-8faa-c3d701f3383d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6783 |
+
2023/Trigger[[:space:]]Warning[[:space:]]Assignment[[:space:]]as[[:space:]]a[[:space:]]Multi-Label[[:space:]]Document[[:space:]]Classification[[:space:]]Problem/67adb51c-9d3d-411c-9fed-68fa290a6752_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6784 |
+
2023/Trillion[[:space:]]Dollar[[:space:]]Words_[[:space:]]A[[:space:]]New[[:space:]]Financial[[:space:]]Dataset,[[:space:]]Task[[:space:]]&[[:space:]]Market[[:space:]]Analysis/775f2d5b-d31d-46f5-97b9-ee758101f1e8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6785 |
+
2023/Two[[:space:]]Birds[[:space:]]One[[:space:]]Stone_[[:space:]]Dynamic[[:space:]]Ensemble[[:space:]]for[[:space:]]OOD[[:space:]]Intent[[:space:]]Classification/a9c88a46-24e8-44e5-a28f-d490b9614d44_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6786 |
+
2023/Two-Stage[[:space:]]Fine-Tuning[[:space:]]for[[:space:]]Improved[[:space:]]Bias[[:space:]]and[[:space:]]Variance[[:space:]]for[[:space:]]Large[[:space:]]Pretrained[[:space:]]Language[[:space:]]Models/b2dd19f1-ce94-4f20-beac-f5c19f1f8faa_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6787 |
+
2023/U-CREAT_[[:space:]]Unsupervised[[:space:]]Case[[:space:]]Retrieval[[:space:]]using[[:space:]]Events[[:space:]]extrAcTion/7518b9a6-3f3c-4929-914c-76f2f3b1c4ed_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6788 |
+
2023/UMRSpell_[[:space:]]Unifying[[:space:]]the[[:space:]]Detection[[:space:]]and[[:space:]]Correction[[:space:]]Parts[[:space:]]of[[:space:]]Pre-trained[[:space:]]Models[[:space:]]towards[[:space:]]Chinese[[:space:]]Missing,[[:space:]]Redundant,[[:space:]]and[[:space:]]Spelling[[:space:]]Correction/768e1563-4f4f-491f-8590-758e2969bd08_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6789 |
+
2023/UPPAM_[[:space:]]A[[:space:]]Unified[[:space:]]Pre-training[[:space:]]Architecture[[:space:]]for[[:space:]]Political[[:space:]]Actor[[:space:]]Modeling[[:space:]]based[[:space:]]on[[:space:]]Language/f6c9f8d3-379e-4bd9-b84c-80a1473092b0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6790 |
+
2023/USSA_[[:space:]]A[[:space:]]Unified[[:space:]]Table[[:space:]]Filling[[:space:]]Scheme[[:space:]]for[[:space:]]Structured[[:space:]]Sentiment[[:space:]]Analysis/61c2edc5-32d1-4c91-9192-947ea689bc44_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6791 |
+
2023/UTC-IE_[[:space:]]A[[:space:]]Unified[[:space:]]Token-pair[[:space:]]Classification[[:space:]]Architecture[[:space:]]for[[:space:]]Information[[:space:]]Extraction/e7fd8b56-c533-4d9f-9d49-bc90352c0dd4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6792 |
+
2023/Unbalanced[[:space:]]Optimal[[:space:]]Transport[[:space:]]for[[:space:]]Unbalanced[[:space:]]Word[[:space:]]Alignment/d6d23de0-8038-4d8e-aa30-c7d2f2c5a68a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6793 |
+
2023/Uncertainty[[:space:]]Guided[[:space:]]Label[[:space:]]Denoising[[:space:]]for[[:space:]]Document-level[[:space:]]Distant[[:space:]]Relation[[:space:]]Extraction/738fb39a-2d51-4b58-8867-86b9299a04fc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6794 |
+
2023/Uncovering[[:space:]]and[[:space:]]Categorizing[[:space:]]Social[[:space:]]Biases[[:space:]]in[[:space:]]Text-to-SQL/1e426778-958a-441c-b60b-dbfd0560d43e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6795 |
+
2023/Understanding[[:space:]]Client[[:space:]]Reactions[[:space:]]in[[:space:]]Online[[:space:]]Mental[[:space:]]Health[[:space:]]Counseling/8a6f2249-6020-4426-b9e9-d94cf9a665b3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6796 |
+
2023/Understanding[[:space:]]Factual[[:space:]]Errors[[:space:]]in[[:space:]]Summarization_[[:space:]]Errors,[[:space:]]Summarizers,[[:space:]]Datasets,[[:space:]]Error[[:space:]]Detectors/0c73118a-c7af-4b00-9cf1-9422855fbf83_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6797 |
+
2023/Understanding[[:space:]]In-Context[[:space:]]Learning[[:space:]]via[[:space:]]Supportive[[:space:]]Pretraining[[:space:]]Data/affb4a1e-2dec-4081-960a-9f9600f8d9c0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6798 |
+
2023/Understanding[[:space:]]and[[:space:]]Bridging[[:space:]]the[[:space:]]Modality[[:space:]]Gap[[:space:]]for[[:space:]]Speech[[:space:]]Translation/6f8e5315-af6f-4d12-bf7c-b9375b9ef7d1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6799 |
+
2023/Understanding[[:space:]]and[[:space:]]Improving[[:space:]]the[[:space:]]Robustness[[:space:]]of[[:space:]]Terminology[[:space:]]Constraints[[:space:]]in[[:space:]]Neural[[:space:]]Machine[[:space:]]Translation/362d2928-83e8-4df2-a3dd-0d52a137c94d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6800 |
+
2023/UniCoRN_[[:space:]]Unified[[:space:]]Cognitive[[:space:]]Signal[[:space:]]ReconstructioN[[:space:]]bridging[[:space:]]cognitive[[:space:]]signals[[:space:]]and[[:space:]]human[[:space:]]language/2b2445f1-3a7b-4056-b0a3-f8073649ed14_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6801 |
+
2023/UniEX_[[:space:]]An[[:space:]]Effective[[:space:]]and[[:space:]]Efficient[[:space:]]Framework[[:space:]]for[[:space:]]Unified[[:space:]]Information[[:space:]]Extraction[[:space:]]via[[:space:]]a[[:space:]]Span-extractive[[:space:]]Perspective/23295a99-2382-4942-baa9-ebcbbf521161_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6802 |
+
2023/UniEvent_[[:space:]]Unified[[:space:]]Generative[[:space:]]Model[[:space:]]with[[:space:]]Multi-Dimensional[[:space:]]Prefix[[:space:]]for[[:space:]]Zero-Shot[[:space:]]Event-Relational[[:space:]]Reasoning/09902c3a-e7d7-4bc1-b7f1-9e67cccb0aef_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6803 |
+
2023/UniLG_[[:space:]]A[[:space:]]Unified[[:space:]]Structure-aware[[:space:]]Framework[[:space:]]for[[:space:]]Lyrics[[:space:]]Generation/806baa06-806a-4b22-96eb-61f06802ba62_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6804 |
+
2023/UniSumm[[:space:]]and[[:space:]]SummZoo_[[:space:]]Unified[[:space:]]Model[[:space:]]and[[:space:]]Diverse[[:space:]]Benchmark[[:space:]]for[[:space:]]Few-Shot[[:space:]]Summarization/4e3bf36a-55e7-4676-972b-9f340cb20033_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6805 |
+
2023/Unified[[:space:]]Demonstration[[:space:]]Retriever[[:space:]]for[[:space:]]In-Context[[:space:]]Learning/71a90fe8-5944-4044-83cd-5c2f32a4d6a4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6806 |
+
2023/Unifying[[:space:]]Cross-Lingual[[:space:]]and[[:space:]]Cross-Modal[[:space:]]Modeling[[:space:]]Towards[[:space:]]Weakly[[:space:]]Supervised[[:space:]]Multilingual[[:space:]]Vision-Language[[:space:]]Pre-training/6be32a24-6fef-42ef-92fa-45f399b1cd60_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6807 |
+
2023/UnitY_[[:space:]]Two-pass[[:space:]]Direct[[:space:]]Speech-to-speech[[:space:]]Translation[[:space:]]with[[:space:]]Discrete[[:space:]]Units/1c5edf91-d7bf-404e-a26e-2b2bf943b954_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6808 |
+
2023/Unnatural[[:space:]]Instructions_[[:space:]]Tuning[[:space:]]Language[[:space:]]Models[[:space:]]with[[:space:]](Almost)[[:space:]]No[[:space:]]Human[[:space:]]Labor/131e6a21-ee88-4273-9c7e-8fea23fe4f99_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6809 |
+
2023/Unsupervised[[:space:]]Discontinuous[[:space:]]Constituency[[:space:]]Parsing[[:space:]]with[[:space:]]Mildly[[:space:]]Context-Sensitive[[:space:]]Grammars/f92dec56-2fdb-4629-9daa-0e619022f42f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6810 |
+
2023/Unsupervised[[:space:]]Extractive[[:space:]]Summarization[[:space:]]of[[:space:]]Emotion[[:space:]]Triggers/158defa9-2312-4f2a-ad2a-9c6b5b44c055_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6811 |
+
2023/Unsupervised[[:space:]]Graph-Text[[:space:]]Mutual[[:space:]]Conversion[[:space:]]with[[:space:]]a[[:space:]]Unified[[:space:]]Pretrained[[:space:]]Language[[:space:]]Model/e59a50d8-cc6f-4afd-9bbb-527946dd4b1a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6812 |
+
2023/Unsupervised[[:space:]]Melody-to-Lyrics[[:space:]]Generation/1b5d4b5b-29db-4121-9b98-824997e38c49_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6813 |
+
2023/Unsupervised[[:space:]]Open-domain[[:space:]]Keyphrase[[:space:]]Generation/5c84337d-6c3b-47f6-b651-80797035a603_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6814 |
+
2023/Unsupervised[[:space:]]Selective[[:space:]]Rationalization[[:space:]]with[[:space:]]Noise[[:space:]]Injection/b0220425-af2b-4976-960c-7813100fa163_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6815 |
+
2023/Using[[:space:]]Domain[[:space:]]Knowledge[[:space:]]to[[:space:]]Guide[[:space:]]Dialog[[:space:]]Structure[[:space:]]Induction[[:space:]]via[[:space:]]Neural[[:space:]]Probabilistic[[:space:]]Soft[[:space:]]Logic/a0a2335f-a343-4814-97bd-ad1cf91470d3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6816 |
+
2023/Using[[:space:]]Neural[[:space:]]Machine[[:space:]]Translation[[:space:]]for[[:space:]]Generating[[:space:]]Diverse[[:space:]]Challenging[[:space:]]Exercises[[:space:]]for[[:space:]]Language[[:space:]]Learner/f7f312c6-bf6d-419c-86dd-e6e475d7028c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6817 |
+
2023/Using[[:space:]]counterfactual[[:space:]]contrast[[:space:]]to[[:space:]]improve[[:space:]]compositional[[:space:]]generalization[[:space:]]for[[:space:]]multi-step[[:space:]]quantitative[[:space:]]reasoning/1f359202-01ad-4526-9425-379e7002cf2d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6818 |
+
2023/VLN-Trans_[[:space:]]Translator[[:space:]]for[[:space:]]the[[:space:]]Vision[[:space:]]and[[:space:]]Language[[:space:]]Navigation[[:space:]]Agent/53606133-158b-4733-8933-c3fb3e04cb6b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6819 |
+
2023/VSTAR_[[:space:]]A[[:space:]]Video-grounded[[:space:]]Dialogue[[:space:]]Dataset[[:space:]]for[[:space:]]Situated[[:space:]]Semantic[[:space:]]Understanding[[:space:]]with[[:space:]]Scene[[:space:]]and[[:space:]]Topic[[:space:]]Transitions/6f5d5af5-17fa-43d3-8a24-b11b7677a488_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6820 |
+
2023/VendorLink_[[:space:]]An[[:space:]]NLP[[:space:]]approach[[:space:]]for[[:space:]]Identifying[[:space:]]&[[:space:]]Linking[[:space:]]Vendor[[:space:]]Migrants[[:space:]]&[[:space:]]Potential[[:space:]]Aliases[[:space:]]on[[:space:]]Darknet[[:space:]]Markets/ac1c7701-2e2e-4b03-a422-ddd8be8ab20a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6821 |
+
2023/Verify-and-Edit_[[:space:]]A[[:space:]]Knowledge-Enhanced[[:space:]]Chain-of-Thought[[:space:]]Framework/98bee19c-b2c2-4df3-9a06-e5fbd642ad6d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/502ba00b-aedc-401c-9087-9dfb7448dbe0_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/502ba00b-aedc-401c-9087-9dfb7448dbe0_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/502ba00b-aedc-401c-9087-9dfb7448dbe0_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eecef269192dccbdefe9cb1d792efc8659cf904cb803bcd4d69f814e2f4d3f30
|
| 3 |
+
size 703260
|
2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/full.md
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models
|
| 2 |
+
|
| 3 |
+
Qingyu Tan *1,2 Hwee Tou Ng† 2 Lidong Bing†
|
| 4 |
+
|
| 5 |
+
$^{1}$ DAMO Academy, Alibaba Group
|
| 6 |
+
|
| 7 |
+
$^{2}$ Department of Computer Science, National University of Singapore
|
| 8 |
+
|
| 9 |
+
{qingyu.tan,l.bing}@alibaba-inc.com
|
| 10 |
+
|
| 11 |
+
{qtan6,nght}@comp.nus.edu.sg
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Reasoning about time is of fundamental importance. Many facts are time-dependent. For example, athletes change teams from time to time, and different government officials are elected periodically. Previous time-dependent question answering (QA) datasets tend to be biased in either their coverage of time spans or question types. In this paper, we introduce a comprehensive probing dataset TEMPREASON to evaluate the temporal reasoning capability of large language models. Our dataset includes questions of three temporal reasoning levels. In addition, we also propose a novel learning framework to improve the temporal reasoning capability of large language models, based on temporal span extraction and time-sensitive reinforcement learning. We conducted experiments in closed book QA, open book QA, and reasoning QA settings and demonstrated the effectiveness of our approach<sup>1</sup>.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
In recent years, large language models (LLMs) have achieved significant success in many natural language processing (NLP) tasks, such as natural language understanding (NLU) (Fei et al., 2023), information extraction (IE) (Ding et al., 2023), and question answering (QA) (Ye et al., 2023; Zhao et al., 2023). Many facts and answers are dependent on their related time scopes, such as 'What soccer club was Lionel Messi playing for?'. Chia et al. (2022) has pointed out around $48\%$ of the qualifiers in the widely-used knowledge base Wikidata (Vrandecic and Krötzsch, 2014) are time-related. That is, a significant number of the knowledge triples in the Wikidata KB have their expiry dates. Correct understanding of temporal concepts is crucial
|
| 20 |
+
|
| 21 |
+
for language models to be successful in real-world applications. To examine the temporal reasoning capabilities of LLMs, the Time-Sensitive Question Answering (TSQA) task has been proposed and several evaluation datasets were published for research purposes. The Time-sensitive QA dataset (Chen et al., 2021) and the TEMPLAMA dataset (Dhingra et al., 2022) were constructed based on the Wikidata temporal KB. StreamingQA (Liska et al., 2022) was constructed by news article collections in English WMT challenges from 2007 to 2020. One consensus of prior work is that time-sensitive QA is a challenging task and its performance is still far below human performance. However, they did not provide a systematic analysis of LM's temporal reasoning capability. In this paper, we aim to systematically analyze such capability and identify the strengths and weaknesses of LMs on temporal reasoning.
|
| 22 |
+
|
| 23 |
+
As shown in Figure 1, humans' understanding of temporal reasoning could be broken down into three levels: time-time (L1) relation, time-event (L2) relation, and event-event (L3) relation. For the understanding of time-time relations, humans can easily determine the relation between two timestamps $t_1$ and $t_2$ on the time axis. For example, when humans are asked 'What is the year after 2020?', they are able to answer this question without any external information. This level of temporal understanding could be regarded as a set of logic rules and is highly generalizable across different times, while this type of reasoning was overlooked in prior TSQA research (Ning et al., 2020; Chen et al., 2021; Dhingra et al., 2022). For time-event relations, the reasoning process requires grounding events to their specific time ranges. In this paper, the concept of events includes time-dependent facts. Humans either memorize a large number of time-event pairs or need to rely on relevant contexts to deduce such relations. An example question is 'What soccer club was Lionel Messi playing for in
|
| 24 |
+
|
| 25 |
+
# Level - 1
|
| 26 |
+
|
| 27 |
+
# Time-Time Relation
|
| 28 |
+
|
| 29 |
+
Abstractive concept, can be viewed as a set of logic rules. Once human understands the concepts, it is easily generalizable.
|
| 30 |
+
|
| 31 |
+
Example Question: What is the year after 2010?
|
| 32 |
+
|
| 33 |
+
Target: 2011
|
| 34 |
+
|
| 35 |
+
# Level - 2
|
| 36 |
+
|
| 37 |
+
# Time-Event Relation
|
| 38 |
+
|
| 39 |
+
Knowledge-intensive concept, where human will also need to memorize such relations, or leverage external context to deduce such relations.
|
| 40 |
+
|
| 41 |
+
Example Question: What team did Leo Messi play for in 2010?
|
| 42 |
+
|
| 43 |
+
Target: Barcelona
|
| 44 |
+
|
| 45 |
+
# Level - 3
|
| 46 |
+
|
| 47 |
+
# Event-Event Relation
|
| 48 |
+
|
| 49 |
+
Requires combination of memorization, deduction and understanding.
|
| 50 |
+
|
| 51 |
+
Example Question: What team did Leo Messi play for after Barcelona?
|
| 52 |
+
|
| 53 |
+
Target: Paris Saint-Germain
|
| 54 |
+
|
| 55 |
+
Figure 1: Illustration of three levels of understanding towards time.
|
| 56 |
+
|
| 57 |
+
Dec 2010?', where a time is specified in the question, and the answer changes based on the given time. If this question is posed to a person who is unfamiliar with sports, this person also needs external information to provide the answer. Answering this type of questions requires information retrieval and temporal grounding. For event-event relations, there are multiple reasoning paths to determine such relations. One possible path is to first identify the timestamps of different events and perform time-time reasoning. Another path is to search for the textual cues of relative relation, such as 'before', 'after', 'during', and 'simultaneous'.
|
| 58 |
+
|
| 59 |
+
We first conducted a simple preliminary experiment for probing LLM's L1 temporal reasoning capability. We found that not only do LMs perform poorly on the time-time relation task, but they are also heavily biased in favor of contemporary years (2000 - 2020). This may be due to the imbalanced term frequencies in the pre-training corpora. Most LLMs (such as BERT, GPT, and T5) are pre-trained on raw texts from a snapshot at a specific timestep, typically around 2018 to 2020. Therefore, the time expression vocabulary is highly dependent on term frequencies in the pre-training corpora. Typically, year tokens that occur frequently will have a smaller index in the vocabulary and the uncommon years generally have larger indices or will be split into subtokens. Take the T5 tokenizer as an example, the year '2014' is tokenized as '2014', however, the year '2021' is tokenized as '20' and '21'. This means that language models only learn the co-occurrences of time expressions and their context.
|
| 60 |
+
|
| 61 |
+
Given such findings, we found that the recently proposed TSQATEMPLAMA dataset has several main drawbacks. Firstly, the time span of the dataset is only from 2010 to 2020, which is a highly
|
| 62 |
+
|
| 63 |
+
biased distribution in favor of LM. Secondly, it only focused on the questions of time-event relations. To overcome these shortcomings, we created a more comprehensive TSQA benchmark TEMPREASON, which spans a longer time range and all three types of temporal understanding. We conducted comprehensive experiments in closed book QA, open book QA, and reasoning QA settings. We found that the temporal reasoning capabilities of LLMs are highly variable with respect to the reference time in the question. LLMs perform well on the contemporary years and poorly on low-resource years.
|
| 64 |
+
|
| 65 |
+
Moreover, we proposed a novel temporal learning framework based on temporal span extraction and time-sensitive reinforcement learning. Our proposed framework encourages LMs to generate temporally correct answers while penalizing predictions that do not satisfy the temporal constraints. Experimental results showed that our proposed benchmark TEMPREASON provides a more comprehensive evaluation for LM's temporal reasoning capability and our model consistently outperforms strong baselines.
|
| 66 |
+
|
| 67 |
+
<table><tr><td>Ref. Year</td><td>Question</td><td>Target</td></tr><tr><td>2011</td><td>What is the year x years before 2011?</td><td>2011 - x</td></tr><tr><td>2010</td><td>What is the year before 2010?</td><td>2009</td></tr><tr><td>1949</td><td>What is the year x years after 1949?</td><td>1949 + x</td></tr><tr><td>1905</td><td>What is the year after 1905?</td><td>1906</td></tr></table>
|
| 68 |
+
|
| 69 |
+
Table 1: Templates used for year prediction (yearly level). The reference year and interval $\mathrm{x}$ are randomly generated,where the reference year is within a specified time range and $\mathrm{x} \leq {10}$ . All the answers to this question are numeric representations of years.
|
| 70 |
+
|
| 71 |
+
# 2 Preliminaries
|
| 72 |
+
|
| 73 |
+
We aim to examine the capability of LMs for simple year prediction. We first design a set of question templates that reflects the basic concepts of tempo
|
| 74 |
+
|
| 75 |
+
ral prediction, as shown in Table 1. Questions of these kinds can be easily answered by humans and this understanding is highly generalizable across the years, and all the expected answers are years in numeric form. In order to have a more comprehensive understanding of temporal expressions, we divide 1900 to 2040 into seven 20-year time periods. Then, we randomly generate 400 questions for each 20-year time period. We then use three language models to make predictions on such questions. The first LM is T5-large model fine-tuned on the Natural Question dataset (T5-L-NQ, Kwiatkowski et al., 2019). This QA dataset is one of the largest open domain QA datasets. Roberts et al. (2020) has demonstrated that language models fine-tuned on such data can achieve competitive performance on the open domain QA task. The second LM is FLAN-T5-Large (Wei et al., 2022) model. This model is instruction-tuned on data of more than 60 NLP tasks. The fine-tuned model demonstrated competitive zero-shot reasoning capability, and achieved strong performance on many natural language understanding and generation tasks. The third model is the popular ChatGPT (Ouyang et al., 2022) model. To ensure that the predictions are consistent, we used the gpt-3.5-0301 version of ChatGPT. We aim to evaluate the temporal reasoning capability of the three language models. We evaluate the answers using the following three metrics: (1) exact match (EM), which is a standard metric for QA. Besides, since the expected answers are numeric, we also evaluate the answers by (2) mean absolute error (MAE) and (3) trend accuracy (Trend Acc). Trend accuracy is calculated by whether the predicted year is before or after the reference year. If the trend is correct, the prediction is deemed to be correct.
|
| 76 |
+
|
| 77 |
+
The experimental results on year prediction are shown in Table 2. We report the scores of T5-L-NQ on the left, FLAN-T5-L in the middle, and ChatGPT on the right. From these experiments, we have several interesting observations: (1) The ChatGPT model is able to solve this problem with high accuracy (99.6 overall EM). However, it still made a few mistakes in the 1900-1940 time period. (2) The first two LMs (T5-L-NQ and FLAN-T5-L) are biased towards contemporary time ranges. We can clearly see that the EM scores between 2000 to 2020 are significantly higher than the rest of the time ranges. This could be the result of the higher term frequencies of the contemporary year tokens
|
| 78 |
+
|
| 79 |
+
<table><tr><td>Time Range</td><td>EM (↑)</td><td>MAE (↓)</td><td>Trend Acc (↑)</td></tr><tr><td>1900-1920</td><td>17.5/6.8/99.5</td><td>28.0/7.4/0.0</td><td>99.5/96.8/100</td></tr><tr><td>1920-1940</td><td>31.5/1.8/98.9</td><td>16.4/11.9/0.1</td><td>94.5/94.5/100</td></tr><tr><td>1940-1960</td><td>17.5/3.3/100</td><td>7.7/9.2/0.0</td><td>100/91.0/100</td></tr><tr><td>1960-1980</td><td>22.5/3.5/100</td><td>17.1/7.5/0.0</td><td>94.0/92.0/100</td></tr><tr><td>1980-2000</td><td>23.0/10.0/100</td><td>7.9/6.9/0.0</td><td>98.5/100/100</td></tr><tr><td>2000-2020</td><td>47.5/20.0/100</td><td>51.2/2.3/0.0</td><td>97.0/100/100</td></tr><tr><td>2020-2040</td><td>23.5/11.3/100</td><td>15.7/8.9/0.0</td><td>84.5/83.8/100</td></tr><tr><td>Average</td><td>26.1/8.1/99.6</td><td>20.6/7.7/0.0</td><td>95.4/94.0/100</td></tr></table>
|
| 80 |
+
|
| 81 |
+
Table 2: Evaluation results of T5-L-NQ $^2$ (Raffel et al., 2020) (left), FLAN-T5-large $^3$ (Wei et al., 2022) (middle), and ChatGPT (right) models on the year prediction task across different time ranges. Bold scores refer to the best performance of each model in each column.
|
| 82 |
+
|
| 83 |
+
in the pre-training corpora. Since many large LMs are trained and released after 2018, the pre-training corpora may contain more year expressions that are closer to that date. In contrast, the first two LMs perform significantly worse in the past (1900-2000) and the future (2020-2040) years. (3) The first two LMs lack numeric reasoning ability with respect to time. The answers provided by these LMs for the time prediction questions are in numeric form, indicating that the LMs understand what the questions are asking. However, the EM scores are all around the 20-30 range, except for T5-L-NQ in the 2000-2020 time range. This indicates that LMs have poor estimation of temporal concepts. Besides, we find that the FLAN-T5-L model has significantly lower EM scores compared to T5-L-NQ, but achieves lower MAE estimations across most of the time ranges. This indicates that instruction tuning implemented in FLAN has implicitly improved the numeric reasoning capability of T5. (4) On the other hand, All LMs are good at catching (before/after) trends, indicating that at least the LMs understand the concepts of before/after well. We can see that all LMs achieve over $90\%$ performance across time ranges before 2020. However, for the first two LMs, this capability is not able to generalize to the future, as the performance in 2020-2040 is significantly worse than in other time periods.
|
| 84 |
+
|
| 85 |
+
# 3 Comprehensive Benchmark for Temporal Reasoning
|
| 86 |
+
|
| 87 |
+
# 3.1 TEMPREASON Dataset
|
| 88 |
+
|
| 89 |
+
Based on the findings of the previous section, we found that the recently proposed TEMPLAMA TSQA dataset (Dhingra et al., 2022) has several
|
| 90 |
+
|
| 91 |
+
<table><tr><td></td><td>Train</td><td>Dev</td><td>Test</td></tr><tr><td>Time Range</td><td>1014-2022</td><td>634-2023</td><td>998-2023</td></tr><tr><td>L1-Questions</td><td>400,000</td><td>4,000</td><td>4,000</td></tr><tr><td>L2-Questions</td><td>16,017</td><td>5,521</td><td>5,397</td></tr><tr><td>L3-Questions</td><td>13,014</td><td>4,437</td><td>4,426</td></tr><tr><td>Subjects</td><td>3,000</td><td>1,000</td><td>1,000</td></tr><tr><td>Facts</td><td>16,017</td><td>5,521</td><td>5,397</td></tr><tr><td>Facts/subjects</td><td>5.3</td><td>5.5</td><td>5.4</td></tr></table>
|
| 92 |
+
|
| 93 |
+
Table 3: Dataset statistics of TEMPREASON.
|
| 94 |
+
|
| 95 |
+
major limitations. Firstly, it only contains questions from 2010 to 2020, which are highly in favor of LM's temporal reasoning biases. Secondly, the TEMPLAMA dataset is heavily biased towards long-duration facts, as $70.69\%$ of the questions of TEMPLAMA have the most frequent answer for a given subject. That is, the TEMPLAMA dataset may encourage models to learn shortcuts to memorize the most frequent answers instead of learning temporal reasoning capability. If the research on time-sensitive question answering only focuses on adaptation to a short period of time, the maintenance and continual adaptation shall be highly expensive. As shown in the previous section, language models perform poorly on past and future time spans. If the language model is not able to understand the changes from the past to the present, it is highly difficult for this model to understand the evolution from the present to the future. In order to probe the temporal reasoning ability in a more systematic manner, we constructed a new comprehensive dataset TEMPREASON. For the L1 time-time relation reasoning, we extend the year prediction task to month prediction, since year prediction can be enumerated by several thousands of examples and LMs may simply memorize such examples. Specifically, we randomly pick a reference time $t$ within a specific time range and then synthesize questions with respect to that time. The questions have the form of 'What is the date x years and y months before/after $t$ ?'. In this way, we can randomly generate L1 questions and answers within the time period. To avoid data leakage, we make sure each generated question is unique. We then randomly split the questions to train, dev, and test sets. To evaluate the generalizability of L1 temporal reasoning, we also create a future test set from 2022 to 2040.
|
| 96 |
+
|
| 97 |
+
For L2 and L3 reasoning, similar to Dhingra et al. (2022) and Chen et al. (2021), we also leverage the Wikidata KB as the knowledge source. We first preprocess the 20 Nov 2022 dump of the Wiki
|
| 98 |
+
|
| 99 |
+
data (Vrandecic and Krötzsch, 2014) knowledge base (KB) to extract all time-dependent facts. We then keep the facts of 10 time-sensitive relations mentioned in the TEMPLAMA dataset. We process the knowledge triples and qualifiers into quintuplet format, $(s,r,o,t_s,t_e)$ , where $s$ is the subject, $r$ is the relation, $o$ is the object, $t_s$ and $t_e$ are the start time and end time of this fact. We group all the temporal facts by $s$ and $r$ . In this way, facts in the same group are all relevant to the subject $s$ . The group of facts can be denoted as $S = \{(s,r,o_i,t_{s_i},t_{e_i})|i\in 1\dots N\}$ and they are sorted chronologically, where $N$ is the number of facts within a group. Since we mainly want to focus on questions whose answers change with time, we only keep the groups that contain three or more temporal facts. In this way, we make sure that each group has at least three time-dependent answers. Moreover, since the Wikidata KB is highly class-imbalanced, we only keep a maximum of 2,000 subjects for each relation type. We then create cloze-style questions based on time-dependent facts. For the time-event (L2) type of questions, we randomly select a time $t_r$ between $t_s$ and $t_e$ , and we then create a question with the query $(s,r,\?,t_r)$ and a set of manually-defined question templates. The templates can be found in Table 13 in Appendix A. For the event-event (L3) type of questions, we first identify the 'before/after' relation pairs within group $S$ (we only keep the 1-hop pairs). We then create the event-event question for each 'before/after' pair using similar templates of the L2 questions (Table 13). The statistics of our TEMPREASON dataset can be found in Table 3. We also compared our datasets with prior works in Appendix C
|
| 100 |
+
|
| 101 |
+
# 3.2 Problem Settings
|
| 102 |
+
|
| 103 |
+
The time-sensitive question answering (TSQA) task is formally defined as follows: given an input question and its corresponding time (Figure 2), the model is asked to output the answer of this question, and the answers are evaluated by token-level F1 and exact match (EM) scores. Intuitively, the difficulty of the TSQA task is highly dependent on the context provided for each question. The challenges of the TSQA task can be broken down into three levels: (1) Answer Retrieval. The first challenge of TSQA is finding the possible answers, which is the same challenge as normal open-domain question answering. For questions in TEMPREASON, each question may have 5.3 to 5.5 possible answers (Ta
|
| 104 |
+
|
| 105 |
+

|
| 106 |
+
Figure 2: Sample TEMPREASON questions and contexts. For humans, the L1 question can be answered without any context provided, whereas for L2 and L3 questions, humans will need to ground the events to timestamps and then perform temporal reasoning.
|
| 107 |
+
|
| 108 |
+
ble 3). (2) Time Grounding. The second challenge of TSQA is temporal grounding. That is, this subtask is to find the start time and end time of each possible answer. (3) Temporal Reasoning. The last challenge is finding the correct answer among the possible candidates based on the specified time constraints.
|
| 109 |
+
|
| 110 |
+
To thoroughly examine the temporal reasoning capability of large language models in different aspects, we propose to tackle TSQA in three different context settings: (1) closed book QA, (2) open book QA, and (3) reasoning QA. We describe the three problem settings as follows.
|
| 111 |
+
|
| 112 |
+
Closed Book Question Answering (CBQA). CBQA is a common task formulation in time-sensitive QA research (Dhingra et al., 2022; Liska et al., 2022). In this setting, only the question is prompted to the language model, which is then asked to output the answer without access to any natural language text. In Figure 2, the example question is asking about the soccer athlete Lionel Messi. The most difficult part of this question is the memorization of Lionel Messi's experiences, since people who are not sports fans may not be able to answer such questions easily.
|
| 113 |
+
|
| 114 |
+
Open Book Question Answering (OBQA). The OBQA formalization is a more realistic problem setting, where external context in the form of natural language text is provided to help LMs to answer the questions. As shown in middle of Figure 2, we use the Wikipedia page of the subject entity as part of the prompt to the language model, together with the question.
|
| 115 |
+
|
| 116 |
+
Reasoning QA. In this setting, all the relevant temporal facts within the group $S = \{(s,r,o_i,t_{s_i},t_{e_i})|i\in 1\dots N\}$ are provided in structured form as part of the prompt (right of Figure 2). This is a simplified version of OBQA since all pos
|
| 117 |
+
|
| 118 |
+
sible answers and their time ranges are provided in the context. To avoid the models learning shortcuts, the provided facts are re-ordered randomly. Essentially, this setting resembles human temporal reasoning. The language models are required to deduce answers based on the time ranges of all possible answers. Human is able to deduce the answer by locating the query time within the group. Intuitively, human-level performance in this setting can be regarded as $100\%$ .
|
| 119 |
+
|
| 120 |
+
# 4 Improving Temporal Reasoning
|
| 121 |
+
|
| 122 |
+
In order to improve the temporal reasoning capabilities, we propose a temporal training framework for sequence-to-sequence language models. Firstly, we pre-train the language model with a temporal span extraction task to encourage the model to pay more attention to the temporal and entity spans. We then fine-tune the model on task-specific data in TEMPREASON. Finally, we further fine-tune the language model by time-sensitive reinforcement learning with our novel reward function.
|
| 123 |
+
|
| 124 |
+
Temporal Span Extraction Pre-Training (TSE) Conventional language model pre-training randomly masks texts and reconstructs the original sentence. However, the relative importance of tokens and spans differs. Guu et al. (2020) first introduced salient span masking, i.e., reconstructing masked named entities, as an intermediate pre-training technique for language models. This approach has shown positive effects on the QA task. In order for the language model to capture more knowledge on time-related spans, we first pre-train on 100K Wikipedia articles with a temporal and entity span extraction task. Specifically, we use the Spacy NER tagger to extract the temporal and entity spans in 100K Wikipedia articles. The NER tagger is trained
|
| 125 |
+
|
| 126 |
+
on the Ontonotes 5.0 corpus (Weischedel et al., 2013). We randomly mask $50\%$ of the entities and temporal spans for a given paragraph and treat this paragraph as the input of T5 models. In this way, the model pays more attention to the contexts that are relevant to temporal shifts. Then the pre-trained language model will be used for fine-tuning with TEMPREASON question-answer pairs in different settings.
|
| 127 |
+
|
| 128 |
+
Supervised Fine-Tuning (SFT) The TSE pretrained language model with parameters $\theta$ will then be fine-tuned on the task data in each setting. The input prompt to the LM is the concatenation of question $q$ and context $c$ , and the objective of SFT is to maximize the probability of $P(a|q, c)$ , where $a$ is the correct answer.
|
| 129 |
+
|
| 130 |
+

|
| 131 |
+
Figure 3: An example of time-sensitive reinforcement learning (TSRL). The ground truth is highlighted in green color and the negative answers are highlighted in yellow color.
|
| 132 |
+
|
| 133 |
+
Time-Sensitive Reinforcement Learning (TSRL) One of the key challenges of temporal reasoning is that there are multiple possible answers for one subject. For a given fact $x = (s, r, o_j, t_{s_j}, t_{e_j})$ , we have the facts in the same group $S_N = \{(s, r, o_i, t_{s_i}, t_{e_i}) | i \in 1 \dots N, i \neq j\}$ . These facts have the same subject and relation as the given fact, but are in other time periods. Therefore, for a question related to the fact $x$ , we are able to collect the negative answers $N = \{o_i | i \in 1 \dots N, i \neq j\}$ within the same group as the negative sample set for TSQA. An example of such negative examples is shown in Figure 3. For a given question related to fact $x$ , we want to maximize the probability of the correct answer $o_j$ while penalizing the model when it outputs temporally wrong answers. The correct answers and negative answers were used for our reward function. We first calculate the positive score $p(x)$ of the model prediction $\theta(x)$ with respect to the ground truth:
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
p (x) = F (\theta (x), o _ {j}) \tag {1}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
where $F$ refers to the scoring function for reward computation. Specifically, we used the $EM$ scoring function as $F$ . We then calculate the negative score $n(x)$ by:
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
n (x) = \max \left\{F \left(\theta (x), o _ {i}\right) | i \neq j \right\} \tag {2}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
The negative score will be 1 if the model prediction returns a temporally wrong answer. Finally, the reward function for TSRL is calculated as:
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
R (x) = \left\{ \begin{array}{c c} p (x) & p (x) \geq n (x) \\ - n (x) & n (x) > p (x) \end{array} \right. \tag {3}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
The reward function is designed to give positive rewards for predictions that match the ground truth and negative rewards for predictions that match the answers in the negative answer set $N$ . We then optimize the fine-tuned language model by the Proximal Policy Optimization (Schulman et al., 2017) algorithm. We denote our final model as TempT5.
|
| 152 |
+
|
| 153 |
+
# 5 Experiments
|
| 154 |
+
|
| 155 |
+
# 5.1 Experimental Settings
|
| 156 |
+
|
| 157 |
+
We conduct experiments in each proposed setting in Section 3.2. The compared baselines are: FLAN-T5-Large (Wei et al., 2022). This model is finetuned on data from over 60 NLP tasks and the authors showed that large-scale instruction tuning significantly improves the model's performance on few-shot reasoning. We evaluate the model's zero-shot performance on temporal reasoning. ChatGPT (Ouyang et al., 2022). This model is initialized by GPT-3 and further trained to follow human instructions. We used the gpt-3.5-0301 version of ChatGPT for more consistent evaluation. Since this model is not open source and not free, we only examined its performance on 200 examples for each setting. T5-SFT (Raffel et al., 2020). This baseline is based on supervised fine-tuning of the conventional T5 models. We use the T5-base model in our experiments and we fine-tune this model on each setting of TEMPREASON (Section 3.2).
|
| 158 |
+
|
| 159 |
+
# 5.2 Experimental Results
|
| 160 |
+
|
| 161 |
+
In Table 4, we show the experimental results on the test sets of TEMPREASON. We then analyze the performance by each level of temporal understanding.
|
| 162 |
+
|
| 163 |
+
L1 Understanding. For L1 temporal understanding, the performance of FLAN-T5-L and ChatGPT
|
| 164 |
+
|
| 165 |
+
<table><tr><td rowspan="2">Question Type</td><td rowspan="2">Setting</td><td colspan="2">FLAN-T5-L</td><td colspan="2">ChatGPT</td><td colspan="2">T5-SFT</td><td colspan="2">TempT5</td><td rowspan="2">Δ F1</td></tr><tr><td>EM</td><td>F1</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td></tr><tr><td>L1: Time-Time</td><td>CBQA</td><td>0.0</td><td>2.9</td><td>30.5</td><td>56.7</td><td>100</td><td>100</td><td>100</td><td>100</td><td>+0.0</td></tr><tr><td rowspan="3">L2: Time-Event</td><td>CBQA</td><td>0.5</td><td>9.2</td><td>6.5</td><td>11.5</td><td>1.4</td><td>23.2</td><td>1.5</td><td>23.4</td><td>+0.2</td></tr><tr><td>ReasonQA</td><td>57.3</td><td>66.3</td><td>47.5</td><td>51.0</td><td>82.6</td><td>87.1</td><td>84.8</td><td>88.9</td><td>+1.8</td></tr><tr><td>OBQA</td><td>9.4</td><td>22.5</td><td>8.5</td><td>16.1</td><td>14.8</td><td>35.2</td><td>15.4</td><td>36.3</td><td>+1.1</td></tr><tr><td rowspan="3">L3: Event-Event</td><td>CBQA</td><td>0.4</td><td>10.5</td><td>12.0</td><td>21.8</td><td>12.1</td><td>25.3</td><td>12.3</td><td>25.4</td><td>+0.1</td></tr><tr><td>ReasonQA</td><td>36.3</td><td>47.5</td><td>49.5</td><td>52.3</td><td>78.2</td><td>83.0</td><td>81.1</td><td>86.1</td><td>+3.1</td></tr><tr><td>OBQA</td><td>8.1</td><td>19.2</td><td>17.0</td><td>25.3</td><td>19.7</td><td>31.2</td><td>21.1</td><td>32.4</td><td>+1.2</td></tr></table>
|
| 166 |
+
|
| 167 |
+
Table 4: Experimental results of each setting in TEMPREASON. $\Delta$ F1 refers to the F1 difference between TempT5 and T5-SFT. The reported results are the average scores of three runs.
|
| 168 |
+
|
| 169 |
+
<table><tr><td colspan="3">TempT5</td></tr><tr><td>Time Range</td><td>EM</td><td>F1</td></tr><tr><td>1000-2022</td><td>100</td><td>100</td></tr><tr><td>2022-2040</td><td>94.4</td><td>97.1</td></tr></table>
|
| 170 |
+
|
| 171 |
+
significantly deteriorates compared to year prediction (Table 2). ChatGPT is able to achieve 99.6 EM on year prediction, whereas it can only achieve 30.5 EM on month prediction. The fine-tuned models T5-SFT and TempT5 are able to achieve 100 EM/F1 performance on this task. This showed that even though the L1 logic rules were not explicitly encoded in the language models, we can teach the language model to learn such rules by creating examples of the rules on a large scale. We further evaluate the trained L1-TempT5 model on an out-of-domain futuristic test set (Table 5). The questions of the futuristic test set have reference times from 2022 to 2040, which are disjoint from the time period of TEMPREASON. The TempT5 model performs decently on the future test set, achieving 97.1 F1 score. However, this performance is still below the in-domain performance.
|
| 172 |
+
|
| 173 |
+
L2 Understanding. The time-event relation is the main question type of previous TSQA datasets. When we compare the performance of the three settings of L2 performance, we can see the problem setting plays a significant role. For all three models, the performance of CBQA is the lowest among the three settings. This shows that it is highly difficult for the LMs to answer temporal questions without any context. Meanwhile, ReasonQA has a significantly better performance compared to OBQA and CBQA. This shows that the language models are able to perform temporal reasoning when the relevant facts were provided. That is, once the possible answers and the related timestamps are retrieved, fine-tuned language models (TempT5 and
|
| 174 |
+
|
| 175 |
+
Table 5: L1 experimental results of TempT5 on indomain TEMPREASON test set and the future test set.
|
| 176 |
+
|
| 177 |
+
<table><tr><td></td><td>Question Type</td><td>EM</td><td>F1</td></tr><tr><td rowspan="2">L2: CBQA</td><td>P39</td><td>1.6</td><td>21.1</td></tr><tr><td>Others</td><td>1.3</td><td>19.9</td></tr><tr><td rowspan="2">L3: CBQA</td><td>P39</td><td>51.4</td><td>68.2</td></tr><tr><td>Others</td><td>0.6</td><td>12.1</td></tr></table>
|
| 178 |
+
|
| 179 |
+
Table 6: Comparison of L2 and L3 performance of TempT5 in the CBQA setting.
|
| 180 |
+
|
| 181 |
+
T5-SFT) can perform temporal reasoning relatively well. It is worth noting that the ChatGPT model has the worst performance in the L2 ReasonQA setting while its performance is exceptionally high in the preliminary year prediction experiments. This phenomenon shows that temporal understanding at different levels may not be easily transferable. Last but not least, our proposed TempT5 model achieves significant performance gains over T5-SFT in OBQA and ReasonQA, which is the strongest baseline in our experiments.
|
| 182 |
+
|
| 183 |
+
L3 Understanding. Similar to L2 understanding, all models perform the best in ReasonQA, followed by OBQA and have the worst performance in CBQA. Besides, compared to L2 questions, most models have significantly worse performance on the L3 questions in the ReasonQA setting (except for ChatGPT), showing that L3 temporal reasoning is more challenging than L2. For the FLAN-T5-L model, the performance deterioration from L2 to L3 is 18.8 F1 (L2: 66.3 vs L3: 47.5), whereas the performance gaps of T5-SFT and TempT5 are much lower. It is worth noting that for the T5-SFT model, the exact match scores of L3 questions are significantly higher than those of L2 in the CBQA (L2:1.4 vs L3:12.1) and OBQA (L2:14.8 vs L3:19.7) setting (same for TempT5). We found that this counter-intuitive result is due to a reasoning shortcut of a specific question type 'P39 position held' (Table 13). We further analyze the CBQA performance by question type in Table 6. For questions other than 'P39', L3 performance is
|
| 184 |
+
|
| 185 |
+
<table><tr><td rowspan="2">Metric</td><td colspan="2">ReasonQA</td><td colspan="2">OBQA</td></tr><tr><td>EM</td><td>F1</td><td>EM</td><td>F1</td></tr><tr><td>TempT5</td><td>84.8</td><td>88.9</td><td>15.4</td><td>36.3</td></tr><tr><td>-TSE</td><td>84.0</td><td>88.0</td><td>14.8</td><td>35.5</td></tr><tr><td>-TSRL</td><td>83.4</td><td>87.7</td><td>15.0</td><td>35.8</td></tr></table>
|
| 186 |
+
|
| 187 |
+
Table 7: Ablation analysis of TempT5 based on L2 questions.
|
| 188 |
+
|
| 189 |
+
<table><tr><td>Time Range</td><td>% Train</td><td>FLAN-T5-L F1</td><td>ChatGPT F1</td><td>TempT5 F1</td></tr><tr><td>before 1900</td><td>8.4</td><td>69.5</td><td>77.8</td><td>85.6</td></tr><tr><td>1900-1920</td><td>4.1</td><td>67.9</td><td>78.7</td><td>87.5</td></tr><tr><td>1920-1940</td><td>6.6</td><td>65.3</td><td>43.8</td><td>87.6</td></tr><tr><td>1940-1960</td><td>7.5</td><td>71.9</td><td>47.9</td><td>88.7</td></tr><tr><td>1960-1980</td><td>11.0</td><td>68.0</td><td>43.8</td><td>90.5</td></tr><tr><td>1980-2000</td><td>18.3</td><td>65.6</td><td>43.9</td><td>89.6</td></tr><tr><td>2000-2020</td><td>37.8</td><td>66.1</td><td>49.1</td><td>89.8</td></tr><tr><td>2020-2040</td><td>6.3</td><td>68.5</td><td>72.7</td><td>82.6</td></tr><tr><td>Overall</td><td>100</td><td>67.1</td><td>51.0</td><td>88.9</td></tr></table>
|
| 190 |
+
|
| 191 |
+
Prediction: Mayor of Osaka Ground Truth: Governor of Osaka Prefecture
|
| 192 |
+
|
| 193 |
+
Table 8: Performance breakdown of different models in L2 ReasonQA across different time periods. We can see that ChatGPT has the worst performance among the three models and its performance is highly variable across different time periods.
|
| 194 |
+
|
| 195 |
+
significantly worse than L2 (L3: 12.1 F1 vs L2: 19.9 F1). However, the performance of L3 CBQA on 'P39' questions is much higher than the other questions. This is because there are reasoning shortcuts for 'P39 position held' questions from entity names. For example, for the question 'Which position did Nicholas Budgen hold before Member of the 46th Parliament of the United Kingdom?', the reasoning shortcut is to simply change the '46th' to '45th'. This shows that L3 temporal reasoning can be achieved via different reasoning paths.
|
| 196 |
+
|
| 197 |
+
# 5.3 Ablation Study
|
| 198 |
+
|
| 199 |
+
In Table 7, we showed the ablation study of TempT5 based on the L2 questions in the OBQA and ReasonQA settings. We can see that TSE and TSRL have different effects in the two settings. Removing TSRL has a heavier impact on the ReasonQA setting, leading to a 1.2 F1 drop. On the other hand, TSE pre-training is more important in the OBQA setting and removing the TSE pretraining leads to a performance drop of 0.8 F1.
|
| 200 |
+
|
| 201 |
+
# 5.4 Further Analysis
|
| 202 |
+
|
| 203 |
+
In this section, we examine the model biases in TEMPREASON. We first analyze the L2 reasoning performance across different years in a similar manner as Section 2. The performance breakdown can be found in Table 8. We can see that for the FLAN-T5-L model and ChatGPT model, the L2 reasoning
|
| 204 |
+
|
| 205 |
+
<table><tr><td></td><td>Question Type</td><td>EM</td><td>F1</td></tr><tr><td rowspan="2">L2: ReasonQA</td><td>Intra-year</td><td>80.5</td><td>86.3</td></tr><tr><td>Inter-year</td><td>86.9</td><td>90.3</td></tr></table>
|
| 206 |
+
|
| 207 |
+
Table 9: Performance of TempT5 in L2 ReasonQA by question type. The intra-year question type refers to questions that have multiple possible answers within one year. In contrast, the inter-year question type only has one possible answer in that specific year.
|
| 208 |
+
|
| 209 |
+
<table><tr><td>Example 1 Error Type: Intra Year Error
|
| 210 |
+
Error Cause: Lack of monthly-level understanding.
|
| 211 |
+
Question: Which position did Hirofumi Yoshimura hold in Jul 2019?
|
| 212 |
+
Context: Hirofumi Yoshimura holds the position of:
|
| 213 |
+
Governor of Osaka Prefecture from Apr 2019 to Dec 2022.
|
| 214 |
+
Member of the House of Representatives of Japan from Dec 2014 to Oct 2015.
|
| 215 |
+
Mayor of Osaka from Dec 2015 to Mar 2019.</td></tr></table>
|
| 216 |
+
|
| 217 |
+
Table 10: An example of intra-year error of TempT5 in L2 ReasonQA.
|
| 218 |
+
|
| 219 |
+
performance fluctuates across different time periods. FLAN-T5-L not only has higher performance but also lower variability across the different time periods. On the other hand, from the performance breakdown of our proposed TempT5, we can see that the temporal biases shown in the year prediction experiments (Table 2) were alleviated. The F1 scores from 1940 to 2020 were similar. However, the F1 scores before 1900 and after 2020 are still significantly worse than the other time periods. This performance degradation is largely due to the lack of training data in those time periods.
|
| 220 |
+
|
| 221 |
+
The other major source of errors comes from the intra-year question type. The intra-year question type refers to questions that have multiple possible answers within one year. Therefore, it requires reasoning at the month level. As shown in Table 9, the performance on intra-year questions is significantly worse than the performance on inter-year questions, especially for the difference in the EM score (6.4, 86.9 vs. 80.5). In Table 10, we show an example of an intra-year reasoning error. We can see that the model fails to capture the intra-year position change of the subject.
|
| 222 |
+
|
| 223 |
+
# 6 Related Work
|
| 224 |
+
|
| 225 |
+
Temporal Information Extraction Early efforts on temporal NLP research primarily focus on event temporal information extraction. Pustejovsky et al. (2003) constructed the TimeBank corpus, which is
|
| 226 |
+
|
| 227 |
+
a temporally annotated corpus that annotates events, times, and temporal relations (such as before/after). The TIE task asks models to extract the events within a piece of text and to identify the temporal relations between event pairs. The TempEval (Verhagen et al., 2010; Bethard et al., 2016) challenge is a popular TIE challenge with a similar annotation scheme as TimeBank. However, it is costly to exhaustively annotate the temporal relations among all events. Cassidy et al. (2014) proposed a dense annotation scheme and constructed the TimeBank-Dense dataset, which has more complete annotation compared to TimeBank. Han et al. (2019) proposed a joint framework to extract events and time in an end-to-end manner. Rajaby Faghihi and Kordjamshidi (2021) proposed the Time-stamped Language Model to understand the flow of events. However, prior works in this field focused on extracting events and temporal relations within one piece of document. The models trained on this task cannot perform global event-to-time grounding.
|
| 228 |
+
|
| 229 |
+
Temporal Reasoning over KGs The Temporal Knowledge Graph Completion (TKGC) field studies temporal reasoning in knowledge graphs. This task aims to rank all entities in a knowledge graph given a temporal query. Many works in this field (TTransE, Jiang et al., 2016; TTransH, Dasgupta et al., 2018; TNTComplEx, Lacroix et al., 2020) were proposed as extensions to prior knowledge completion techniques, such as TransE (Bordes et al., 2013), TransH (Wang et al., 2014), and ComplEx (Kipf and Welling, 2017). With a similar concept as TKGC, several question answering datasets are proposed based on temporal knowledge graphs, such as TEQUILA (Jia et al., 2018b), TimeQuestions (Jia et al., 2021), and CronQuestions (Saxena et al., 2021). These datasets include more complex questions in a natural language format, and the task setting is also asking models to rank all the entities of a given knowledge graph. Mavromatis et al. (2022) proposed a joint model that unifies temporal KG embeddings and pre-trained language models for this task. Shang et al. (2022) proposed a contrastive approach to improve the QA performance for temporal KGs. Temporal reasoning in KGs is closely related to our problem of interest. However, the major difference is that KGQA presumes all the entities are known to the system and the task is to rank all the possible entities that satisfy the queries. In contrast, our task aims to answer temporal questions based on natural text input only.
|
| 230 |
+
|
| 231 |
+
Temporal Reasoning for LMs Large language models (Devlin et al., 2019; Raffel et al., 2020; Liu et al., 2019) have demonstrated good performance on the question answering task (Rajpurkar et al., 2016; Kwiatkowski et al., 2019). In recent years, several contemporary time-sensitive QA datasets were proposed. Zhang and Choi (2021) proposed the SituatedQA dataset, which contains plenty of time-dependent question-answer pairs. The TEMPLAMA dataset (Dhingra et al., 2022) was proposed to evaluate the CBQA performance for time-dependent questions from 2010 to 2020. However, the QA performance of TEMPLAMA may be overestimated, since it only covers a short time period and the period is in favor of LM's temporal bias. Similarly, StreamingQA (Liska et al., 2022) has a similar disadvantage, since its time coverage is from 2007 to 2020. The Time-sensitive QA dataset (Chen et al., 2021) covers a relatively longer timespan (from 1367 to 2018), but it only contains questions of time-event relation. The common drawback of the previously proposed TSQA datasets is the lack of coverage of temporal reasoning levels other than the time-event type of questions.
|
| 232 |
+
|
| 233 |
+
# 7 Conclusions and Future Work
|
| 234 |
+
|
| 235 |
+
In this paper, we tackled the under-explored temporal reasoning problem for large language models. We found that large language models are highly susceptible to biases of time, and their temporal reasoning capability varies depending on the specific time given in the question. Besides, we proposed a comprehensive time-sensitive QA dataset TEMP-REASON to evaluate LMs' temporal reasoning capability in diverse settings. Lastly, we proposed a novel training paradigm to improve language models' reasoning capability by temporal span extraction pre-training and time-sensitive reinforcement learning. We conducted extensive experiments and demonstrated that our proposed model consistently outperformed strong baselines.
|
| 236 |
+
|
| 237 |
+
# 8 Limitations
|
| 238 |
+
|
| 239 |
+
The focus of the TEMPREASON dataset is to examine language models' temporal reasoning capability. However, the temporal expressions of TEMPREASON are only in the form of month in textual form and year in numeric form. One limitation of the TEMPREASON benchmark is the lack of adversarial attacks in other temporal formats, such as
|
| 240 |
+
|
| 241 |
+
all numeric dates and months. The robustness of temporal reasoning is also important in real-world applications. Since the scope of this paper only focuses on the reasoning aspect, the robustness of TEMPREASON will be left for future research. Besides, the knowledge triples of TEMPREASON are from the crowd-sourced Wikidata KB, and these triples are used to construct the question-answer pairs in this paper. Hence, it is possible that errors in the Wikidata KB propagate to the answers in TEMPREASON. However, such errors have minimal effect in the ReasonQA setting, for this task only asks the models to infer from factual knowledge in the Wikidata KB.
|
| 242 |
+
|
| 243 |
+
# 9 Ethics Statement
|
| 244 |
+
|
| 245 |
+
In this paper, we created a probing dataset TEMP-REASON for temporal reasoning evaluation. The dataset is constructed based on the matching of Wikidata KB and Wikipedia articles. This approach is commonly used for distantly supervised data construction. The Wikidata KB is under the public domain<sup>4</sup> and the Wikipedia articles are licensed under the Creative Commons AttributionShareAlike 3.0 Unported License<sup>5</sup>. Therefore, we are able to adapt these data to construct our dataset. We will also release our data under the same license as Wikidata. The scope of our dataset is purely for scientific research of language models' temporal reasoning capability. However, the contexts from the Wikipedia articles may contain improper content. The adoption of such content is not a decision of the authors, and all content in the dataset does not reflect the views or stances of the authors of this paper.
|
| 246 |
+
|
| 247 |
+
# 10 Acknowledgements
|
| 248 |
+
|
| 249 |
+
We would like to thank all the reviewers for their insightful comments and constructive feedback.
|
| 250 |
+
|
| 251 |
+
# References
|
| 252 |
+
|
| 253 |
+
Steven Bethard, Guergana Savova, Wei-Te Chen, Leon Derczynski, James Pustejovsky, and Marc Verhagen. 2016. SemEval-2016 task 12: Clinical TempEval. In Proceedings of SemEval.
|
| 254 |
+
Antoine Bordes, Nicolas Usunier, Alberto Garcia-Duran, Jason Weston, and Oksana Yakhnenko.
|
| 255 |
+
|
| 256 |
+
4https://www.wikidata.org/wiki/Wikidata: Licensing 5https://en.wikipedia.org/wiki/Wikipedia: Copyrights
|
| 257 |
+
|
| 258 |
+
2013. Translating embeddings for modeling multi-relational data. In Proceedings of NIPS.
|
| 259 |
+
Taylor Cassidy, Bill McDowell, Nathanael Chambers, and Steven Bethard. 2014. An annotation framework for dense event ordering. In Proceedings of ACL.
|
| 260 |
+
Wenhu Chen, Xinyi Wang, and William Yang Wang. 2021. A dataset for answering time-sensitive questions. In Proceedings of NIPS.
|
| 261 |
+
Yew Ken Chia, Lidong Bing, Sharifah Mahani Aljunied, Luo Si, and Soujanya Poria. 2022. A dataset for hyper-relational extraction and a cube-filling approach. In Proceedings of EMNLP.
|
| 262 |
+
Shib Sankar Dasgupta, Swayambhu Nath Ray, and Partha Talukdar. 2018. HyTE: Hyperplane-based temporally aware knowledge graph embedding. In Proceedings of EMNLP.
|
| 263 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of NAACL.
|
| 264 |
+
Bhuwan Dhingra, Jeremy R. Cole, Julian Martin Eisenschlos, Daniel Gillick, Jacob Eisenstein, and William W. Cohen. 2022. Time-Aware Language Models as Temporal Knowledge Bases. Transactions of ACL.
|
| 265 |
+
Bosheng Ding, Chengwei Qin, Linlin Liu, Yew Ken Chia, Boyang Li, Shafiq Joty, and Lidong Bing. 2023. Are large language models good data annotators? a study on gpt-3, chatgpt and gpt-4. In Proceedings of ACL.
|
| 266 |
+
Hao Fei, Bobo Li, Qian Liu, Lidong Bing, Fei Li, and Tat-Seng Chua. 2023. Reasoning implicit sentiment with chain-of-thought prompting. In Proceedings of ACL.
|
| 267 |
+
Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In Proceedings of ICML.
|
| 268 |
+
Rujun Han, Qiang Ning, and Nanyun Peng. 2019. Joint event and temporal relation extraction with shared representations and structured prediction. In Proceedings of EMNLP.
|
| 269 |
+
Zhen Jia, Abdalghani Abujabal, Rishiraj Saha Roy, Jannik Strötgen, and Gerhard Weikum. 2018a. Tempquestions: A benchmark for temporal question answering. In Proceedings of WWW.
|
| 270 |
+
Zhen Jia, Abdalghani Abujabal, Rishiraj Saha Roy, Jannik Strötgen, and Gerhard Weikum. 2018b. Tequila: Temporal question answering over knowledge bases. In Proceedings of CIKM.
|
| 271 |
+
Zhen Jia, Soumajit Pramanik, Rishiraj Saha Roy, and Gerhard Weikum. 2021. Complex temporal question answering on knowledge graphs. In Proceedings of CIKM.
|
| 272 |
+
|
| 273 |
+
Tingsong Jiang, Tianyu Liu, Tao Ge, Lei Sha, Baobao Chang, Sujian Li, and Zhifang Sui. 2016. Towards time-aware knowledge graph completion. In Proceedings of COLING.
|
| 274 |
+
Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020. Dense passage retrieval for open-domain question answering. In Proceedings of EMNLP.
|
| 275 |
+
Jungo Kasai, Keisuke Sakaguchi, Yoichi Takahashi, Ronan Le Bras, Akari Asai, Xinyan Yu, Dragomir Radev, Noah A Smith, Yejin Choi, and Kentaro Inui. 2022. Realtime qa: What's the answer right now? In Proceedings of EMNLP.
|
| 276 |
+
Thomas N. Kipf and Max Welling. 2017. Semi-supervised classification with graph convolutional networks. In Proceedings of ICLR.
|
| 277 |
+
Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. 2019. Natural questions: a benchmark for question answering research. Transactions of ACL.
|
| 278 |
+
Timothée Lacroix, Guillaume Obozinski, and Nicolas Usunier. 2020. Tensor decompositions for temporal knowledge base completion. In Proceedings of ICLR.
|
| 279 |
+
Adam Liska, Tomas Kocisky, Elena Gribovskaya, Tayfun Terzi, Eren Sezener, Devang Agrawal, D'Autume Cyprien De Masson, Tim Scholtes, Manzil Zaheer, Susannah Young, et al. 2022. Streamingqa: A benchmark for adaptation to new knowledge over time in question answering models. In Proceedings of ICML.
|
| 280 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. In arXiv preprint arXiv:1907.11692.
|
| 281 |
+
Costas Mavromatis, Prasanna Lakkur Subramanyam, Vassilis N Ioannidis, Adesoji Adeshina, Phillip R Howard, Tetiana Grinberg, Nagib Hakim, and George Karypis. 2022. Tempoqr: temporal question reasoning over knowledge graphs. In Proceedings of AAAI, volume 36, pages 5825-5833.
|
| 282 |
+
Qiang Ning, Hao Wu, Rujun Han, Nanyun Peng, Matt Gardner, and Dan Roth. 2020. TORQUE: A reading comprehension dataset of temporal ordering questions. In Proceedings of EMNLP.
|
| 283 |
+
Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. 2022. Training language models to follow instructions with human feedback. In arXiv preprint arXiv:2203.02155.
|
| 284 |
+
|
| 285 |
+
James Pustejovsky, Patrick Hanks, Roser Sauri, Andrew See, Robert Gaizauskas, Andrea Setzer, Dragomir Radev, Beth Sundheim, David Day, Lisa Ferro, et al. 2003. The timebank corpus. In Corpus linguistics.
|
| 286 |
+
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J Liu, et al. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. JMLR.
|
| 287 |
+
Hossein Rajaby Faghihi and Parisa Kordjamshidi. 2021. Time-stamped language model: Teaching language models to understand the flow of events. In Proceedings of NAACL.
|
| 288 |
+
Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of EMNLP.
|
| 289 |
+
Adam Roberts, Colin Raffel, and Noam Shazeer. 2020. How much knowledge can you pack into the parameters of a language model? In Proceedings of EMNLP.
|
| 290 |
+
Apoory Saxena, Soumen Chakrabarti, and Partha Talukdar. 2021. Question answering over temporal knowledge graphs. In Proceedings of ACL.
|
| 291 |
+
John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. In arXiv preprint arXiv:1707.06347.
|
| 292 |
+
Chao Shang, Guangtao Wang, Peng Qi, and Jing Huang. 2022. Improving time sensitivity for question answering over temporal knowledge graphs. In Proceedings of ACL. Association for Computational Linguistics.
|
| 293 |
+
Marc Verhagen, Roser Saurí, Tommaso Caselli, and James Pustejovsky. 2010. SemEval-2010 task 13: TempEval-2. In Proceedings of SemEval.
|
| 294 |
+
Denny Vrandecic and Markus Krötzsch. 2014. Wiki-data: a free collaborative knowledgebase. Proceedings of CACM.
|
| 295 |
+
Zhen Wang, Jianwen Zhang, Jianlin Feng, and Zheng Chen. 2014. Knowledge graph embedding by translating on hyperplanes. In Proceedings of AAAI.
|
| 296 |
+
Jason Wei, Maarten Bosma, Vincent Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M Dai, and Quoc V Le. 2022. Finetuned language models are zero-shot learners. In Proceedings of ICLR.
|
| 297 |
+
Ralph Weischedel, Martha Palmer, Mitchell Marcus, Edward Hovy, Sameer Pradhan, Lance Ramshaw, Nianwen Xue, Ann Taylor, Jeff Kaufman, Michelle Franchini, et al. 2013. Ontonotes release 5.0 ldc2013t19. Linguistic Data Consortium.
|
| 298 |
+
Hai Ye, Qizhe Xie, and Hwee Tou Ng. 2023. Multisource test-time adaptation as dueling bandits for extractive question answering. In Proceedings of ACL.
|
| 299 |
+
|
| 300 |
+
Michael Zhang and Eunsol Choi. 2021. SituatedQA: Incorporating extra-linguistic contexts into QA. In Proceedings of EMNLP.
|
| 301 |
+
|
| 302 |
+
Ruochen Zhao, Shafiq Joty Xingxuan Li, Chengwei Qin, and Lidong Bing. 2023. Verify-and-edit: A knowledge-enhanced chain-of-thought framework. In Proceedings of ACL.
|
| 303 |
+
|
| 304 |
+
# A Realtime Adaptation of LMs
|
| 305 |
+
|
| 306 |
+
Besides the experiments on our proposed TEMP-REASON dataset. We also examined our model in the RealtimeQA (Kasai et al., 2022) leaderboard. This leaderboard releases time-sensitive questions every week based on weekly quizzes from news websites (such as CNN and NBC). The RealtimeQA challenge has two tracks: (1) multiple-choice questions and (2) generation track. The generation track of this challenge is the same as OBQA in this paper. We examined our model along with the two retrievers provided in the challenge: (1) Google custom search (GCS), and (2) Dense Passage Retrieval (DPR, Karpukhin et al., 2020). We adapt our TempT5 model of L2 ReasonQA on the question-answer pairs of RealtimeQA before December 2022. We then evaluate the adapted model on the questions released on 16th December $2022^{6}$ . Experimental results (Table 12) show that our model performs competitively even when adapting to the most up-to-date TSQA challenge.
|
| 307 |
+
|
| 308 |
+
# B Implementation Details
|
| 309 |
+
|
| 310 |
+
This section describes the implementation details of our models and baselines. For temporal span extraction pre-training, we use the T5-base model for initialization. We then train the model for 100K steps with a batch size of 8 and a learning rate of 2e-5. We use the maximum input length of 512 for TSE pre-training. For task-specific fine-tuning, we use the same batch size and learning rate, whereas the maximum input lengths are different for each setting. For the CBQA setting, the maximum input length is set as 128, since only the question is given to the model. For the ReasonQA setting, the maximum input length is set as 512. The maximum length of 1,024 is used for the OBQA setting, since the context in this setting is the longest on average. For each setting, we fine-tune the language model for 3 epochs, and evaluation is conducted
|
| 311 |
+
|
| 312 |
+
using the final checkpoint. For time-sensitive reinforcement learning, we followed the proximal policy optimization (PPO, Schulman et al., 2017) algorithm. Instead of using a reward model, we use the reward function described in Section 4. For this stage, we set the initial KL penalty coefficient as 0.05 and the target KL coefficient as 6. The discount factor $\gamma$ for PPO is set to 0.99.
|
| 313 |
+
|
| 314 |
+
# C Comparison of TEMPREASON and Prior Datasets
|
| 315 |
+
|
| 316 |
+
In Table 11, we show the detailed comparison of our TEMPREASON dataset and prior time-sensitive question answering datasets. Our dataset is the first to include all three temporal reasoning types and the ReasonQA setting.
|
| 317 |
+
|
| 318 |
+
# D TEMPREASON Templates
|
| 319 |
+
|
| 320 |
+
The templates that we used to create TEMP-REASON is shown in Table 13.
|
| 321 |
+
|
| 322 |
+
<table><tr><td>Dataset</td><td>QA format</td><td>Knowledge Corpus</td><td>Closed/Open/Reason</td><td>Time Coverage</td><td>Size</td><td>L1</td><td>L2</td><td>L3</td></tr><tr><td>TEMPREASON</td><td>Language</td><td>Wikidata/Wikipedia</td><td>Closed/Open/Reason</td><td>634-2023</td><td>52.8K</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>TEMPLAMA (Dhingra et al., 2022)</td><td>Language</td><td>Wikidata</td><td>Closed</td><td>2010-2020</td><td>50k</td><td>✗</td><td>✓</td><td>✗</td></tr><tr><td>Time-Sensitive QA (Chen et al., 2021)</td><td>Language</td><td>Wikidata/Wikipedia</td><td>Open</td><td>1367-2018</td><td>41.2k</td><td>✗</td><td>✓</td><td>✗</td></tr><tr><td>StreamingQA (Liska et al., 2022)</td><td>Language</td><td>WMT</td><td>Closed/Open</td><td>2007-2020</td><td>147k</td><td>✗</td><td>✓</td><td>✗</td></tr><tr><td>SituatedQA (Zhang and Choi, 2021)</td><td>Language</td><td>Wikipedia/Human Annotation</td><td>Closed/Open</td><td>1270-2021</td><td>12.2k</td><td>✗</td><td>✓</td><td>✗</td></tr><tr><td>TempQuestions (Jia et al., 2018a)</td><td>KG</td><td>Wikipedia</td><td>KG</td><td>NA</td><td>1.2k</td><td>✗</td><td>✓</td><td>✗</td></tr><tr><td>TimeQuestions (Jia et al., 2021)</td><td>KG</td><td>Wikidata</td><td>KG</td><td>NA</td><td>16.1k</td><td>✗</td><td>✓</td><td>✗</td></tr><tr><td>CronQuestions (Saxena et al., 2021)</td><td>KG</td><td>Wikidata</td><td>KG</td><td>34-2021</td><td>410k</td><td>✗</td><td>✓</td><td>✓</td></tr></table>
|
| 323 |
+
|
| 324 |
+
Table 11: Dataset comparison of TEMPREASON and prior datasets.
|
| 325 |
+
|
| 326 |
+
<table><tr><td></td><td>EM</td><td>F1</td></tr><tr><td>GPT3+GCS†</td><td>55.0</td><td>63.6</td></tr><tr><td>TempT5-L+GCS</td><td>48.3</td><td>53.3</td></tr><tr><td>RAG+GCS†</td><td>35.0</td><td>45.9</td></tr><tr><td>GPT3+DPR†</td><td>17.2</td><td>23.0</td></tr><tr><td>TempT5-L+DPR</td><td>10.3</td><td>18.4</td></tr><tr><td>RAG+DPR†</td><td>0.0</td><td>3.1</td></tr></table>
|
| 327 |
+
|
| 328 |
+
Table 12: Experimental results on the generation track of RealtimeQA leaderboard based on December 16, 2022's questions. The task formulation of this track is the same as OBQA in this paper. Results with $\dagger$ are taken from the URL in the footnote.
|
| 329 |
+
|
| 330 |
+
<table><tr><td>WikiData ID</td><td>KB Relation</td><td># Queries</td><td>Template</td></tr><tr><td colspan="4">L1 Question Templates:</td></tr><tr><td>NA</td><td>NA</td><td>NA</td><td>What is the time x year(s) and y month(s) before/after t?</td></tr><tr><td>NA</td><td>NA</td><td>NA</td><td>What is the time x year(s) before/after t?</td></tr><tr><td>NA</td><td>NA</td><td>NA</td><td>What is the time y month(s) before/after t?</td></tr><tr><td colspan="4">L2 Question Templates:</td></tr><tr><td>P54</td><td>member of sports team</td><td>4,087</td><td>Which team did <subject> play for in t?</td></tr><tr><td>P39</td><td>position held</td><td>3,133</td><td>Which position did <subject> hold in t?</td></tr><tr><td>P108</td><td>employer</td><td>2,368</td><td>Which employer did <subject> work for in t?</td></tr><tr><td>P102</td><td>political party</td><td>500</td><td>Which political party did <subject> belong to in t?</td></tr><tr><td>P286</td><td>head coach</td><td>1,153</td><td>Who was the head coach of <subject> in t?</td></tr><tr><td>P69</td><td>educated at</td><td>750</td><td>Which school was <subject> attending in t?</td></tr><tr><td>P488</td><td>chairperson</td><td>1,904</td><td>Who was the chair of <subject> in t?</td></tr><tr><td>P6</td><td>head of government</td><td>1,627</td><td>Who was the head of the government of <subject> in t?</td></tr><tr><td>P35</td><td>head of state</td><td>250</td><td>Who was the head of the state of <subject> in t?</td></tr><tr><td>P127</td><td>owned by</td><td>245</td><td>Who was the owner of <subject> in t?</td></tr><tr><td colspan="4">L3 Question Templates:</td></tr><tr><td>P54</td><td>member of sports team</td><td>2,524</td><td>Which team did <subject> play for before/after oj?</td></tr><tr><td>P39</td><td>position held</td><td>2,538</td><td>Which position did <subject> hold before/after oj?</td></tr><tr><td>P108</td><td>employer</td><td>1,991</td><td>Which employer did <subject> work for before/after oj?</td></tr><tr><td>P102</td><td>political party</td><td>433</td><td>Which political party did <subject> belong to before/after oj?</td></tr><tr><td>P286</td><td>head coach</td><td>1,051</td><td>Who was the head coach of <subject> before/after oj?</td></tr><tr><td>P69</td><td>educated at</td><td>594</td><td>Which school was <subject> attending before/after oj?</td></tr><tr><td>P488</td><td>chairperson</td><td>1,881</td><td>Who was the chair of <subject> before/after oj?</td></tr><tr><td>P6</td><td>head of government</td><td>1,535</td><td>Who was the head of the government of <subject> before/after oj?</td></tr><tr><td>P35</td><td>head of state</td><td>268</td><td>Who was the head of the state of <subject> before/after oj?</td></tr><tr><td>P127</td><td>owned by</td><td>199</td><td>Who was the owner of <subject> before/after oj?</td></tr></table>
|
| 331 |
+
|
| 332 |
+
Table 13: Templates used for converting Wikidata facts into natural questions. For the L2 questions, $t$ is a randomly sampled time between the start time ${t}_{s}$ and end time ${t}_{e}$ of the given fact. The format of $t$ is month and year (examples shown in Figure 2). ${o}_{j}$ refers to the object entity name that is before or after the correct answer. The numbers of queries are from the L2 and L3 training sets of TEMPREASON.
|
| 333 |
+
|
| 334 |
+
A For every submission:
|
| 335 |
+
|
| 336 |
+
A1. Did you describe the limitations of your work? Section 8
|
| 337 |
+
A2. Did you discuss any potential risks of your work? Section 9
|
| 338 |
+
A3. Do the abstract and introduction summarize the paper's main claims? Section 1
|
| 339 |
+
A4. Have you used AI writing assistants when working on this paper? Left blank.
|
| 340 |
+
|
| 341 |
+
B Did you use or create scientific artifacts?
|
| 342 |
+
|
| 343 |
+
Section 3
|
| 344 |
+
|
| 345 |
+
B1. Did you cite the creators of artifacts you used? Section 3
|
| 346 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts? Section 9
|
| 347 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)? Section 9
|
| 348 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it? Section 3, Section 9, and Appendix C
|
| 349 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.? Section 3 and appendix C.
|
| 350 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be. Section 3
|
| 351 |
+
|
| 352 |
+
C Did you run computational experiments?
|
| 353 |
+
|
| 354 |
+
Section 5
|
| 355 |
+
|
| 356 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used? Appendix B
|
| 357 |
+
|
| 358 |
+
The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.
|
| 359 |
+
|
| 360 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values?
|
| 361 |
+
|
| 362 |
+
Section 5
|
| 363 |
+
|
| 364 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run?
|
| 365 |
+
|
| 366 |
+
Section 5
|
| 367 |
+
|
| 368 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)?
|
| 369 |
+
|
| 370 |
+
Section 5
|
| 371 |
+
|
| 372 |
+
D Did you use human annotators (e.g., crowdworkers) or research with human participants?
|
| 373 |
+
|
| 374 |
+
Left blank.
|
| 375 |
+
|
| 376 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.?
|
| 377 |
+
|
| 378 |
+
No response.
|
| 379 |
+
|
| 380 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)?
|
| 381 |
+
|
| 382 |
+
No response.
|
| 383 |
+
|
| 384 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used?
|
| 385 |
+
|
| 386 |
+
No response.
|
| 387 |
+
|
| 388 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board?
|
| 389 |
+
|
| 390 |
+
No response.
|
| 391 |
+
|
| 392 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data?
|
| 393 |
+
|
| 394 |
+
No response.
|
2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:56cf032f70aed3418478d132427662bc07a5c699b0aa89f45e7f3b7b1bbe684a
|
| 3 |
+
size 569088
|
2023/Towards Benchmarking and Improving the Temporal Reasoning Capability of Large Language Models/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/b5a9fb0d-d326-47a6-aba0-d08593e3056d_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/b5a9fb0d-d326-47a6-aba0-d08593e3056d_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/b5a9fb0d-d326-47a6-aba0-d08593e3056d_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71d7eeafd9bc17b151732307e7ef65e83769a36eff054a920e4af8bb64b57227
|
| 3 |
+
size 614891
|
2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/full.md
ADDED
|
@@ -0,0 +1,486 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Towards Better Entity Linking with Multi-View Enhanced Distillation
|
| 2 |
+
|
| 3 |
+
Yi Liu $^{1,2,*}$ , Yuan Tian $^{3}$ , Jianxun Lian $^{3}$ , Xinlong Wang $^{3}$ , Yanan Cao $^{1,2,\dagger}$ , Fang Fang $^{1,2}$ , Wen Zhang $^{3}$ , Haizhen Huang $^{3}$ , Denvy Deng $^{3}$ , Qi Zhang $^{3}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Institute of Information Engineering, Chinese Academy of Sciences $^{2}$ School of Cyber Security, University of Chinese Academy of Sciences
|
| 6 |
+
|
| 7 |
+
3Microsoft
|
| 8 |
+
|
| 9 |
+
{liuyi1999, caoyanan,fangfang0703}@jie.ac.cn
|
| 10 |
+
|
| 11 |
+
{yuantian,jialia,xinlongwang,zhangw,hhuang,dedeng,qizhang}@microsoft.com
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Dense retrieval is widely used for entity linking to retrieve entities from large-scale knowledge bases. Mainstream techniques are based on a dual-encoder framework, which encodes mentions and entities independently and calculates their relevances via rough interaction metrics, resulting in difficulty in explicitly modeling multiple mention-relevant parts within entities to match divergent mentions. Aiming at learning entity representations that can match divergent mentions, this paper proposes a Multi-View Enhanced Distillation (MVD) framework, which can effectively transfer knowledge of multiple fine-grained and mention-relevant parts within entities from cross-encoders to dual-encoders. Each entity is split into multiple views to avoid irrelevant information being over-squashed into the mention-relevant view. We further design cross-alignment and self-alignment mechanisms for this framework to facilitate fine-grained knowledge distillation from the teacher model to the student model. Meanwhile, we reserve a global-view that embeds the entity as a whole to prevent dispersal of uniform information. Experiments show our method achieves state-of-the-art performance on several entity linking benchmarks<sup>1</sup>.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Entity Linking (EL) serves as a fundamental task in Natural Language Processing (NLP), connecting mentions within unstructured contexts to their corresponding entities in a Knowledge Base (KB). EL usually provides the entity-related data foundation for various tasks, such as KBQA (Ye et al., 2022), Knowledge-based Language Models (Liu et al., 2020) and Information Retrieval (Li et al., 2022). Most EL systems consist of two stages: entity retrieval (candidate generation), which retrieves
|
| 20 |
+
|
| 21 |
+
<table><tr><td>Entity 1: 2014 UEFA Champions League final
|
| 22 |
+
Description: Real Madrid won the match 4-1 after extra time, with goals from Cristiano Ronaldo, Gareth Bale, Marcelo and Sergio Ramos. In doing so, Real Madrid secured a record 10th title in the competition. As the winners, Real Madrid earned the right to play against 2013–14 UEFA Europa League winners Sevilla in the 2014 UEFA Super Cup.</td></tr><tr><td>Mention: Ronaldo fired home the penalty as Real Madrid won Europe's biggest prize for the 10th time in its history.</td></tr></table>
|
| 23 |
+
|
| 24 |
+
<table><tr><td>Entity 2: Cristiano Ronaldo
|
| 25 |
+
Description: ... Ronaldo has won five Ballon d'Or awards and four European Golden Shoes, he has won 32 trophies in his career, including seven league titles, five UEFA Champions Leagues, the UEFA European Championship and the UEFA Nations League. ... Ronaldo was cautioned by police for smashing a phone out of a 14-year-old boy's hand following his team's 1-0 Premier League defeat to Everton in April.</td></tr></table>
|
| 26 |
+
|
| 27 |
+
Mention: Ronaldo has amassed an unrivalled collection of records in the Champions League and EURO finals.
|
| 28 |
+
Mention: Ronaldo has been banned with improper conduct by the FA for smashing a teenage fan's phone.
|
| 29 |
+
|
| 30 |
+
Figure 1: The illustration of two types of entities. Mentions in contexts are in bold, key information in entities is highlighted in color. The information in the first type of entity is relatively consistent and can be matched with a corresponding mention. In contrast, the second type of entity contains diverse and sparsely distributed information, can match with divergent mentions.
|
| 31 |
+
|
| 32 |
+
a small set of candidate entities corresponding to mentions from a large-scale KB with low latency, and entity ranking (entity disambiguation), which ranks those candidates using a more accurate model to select the best match as the target entity. This paper focuses on the entity retrieval task, which poses a significant challenge due to the need to retrieve targets from a large-scale KB. Moreover, the performance of entity retrieval is crucial for EL systems, as any recall errors in the initial stage can have a significant impact on the performance of the latter ranking stage (Luan et al., 2021).
|
| 33 |
+
|
| 34 |
+
Recent advancements in pre-trained language models (PLMs) (Kenton and Toutanova, 2019) have led to the widespread use of dense retrieval technology for large-scale entity retrieval (Gillick et al., 2019; Wu et al., 2020). This approach typically adopts a dual-encoder architecture that embeds the textual content of mentions and entities independently into fixed-dimensional vectors (Karpukhin et al., 2020) to calculate their relevance
|
| 35 |
+
|
| 36 |
+
scores using a lightweight interaction metric (e.g., dot-product). This allows for pre-computing the entity embeddings, enabling entities to be retrieved through various fast nearest neighbor search techniques (Johnson et al., 2019; Jayaram Subramanya et al., 2019).
|
| 37 |
+
|
| 38 |
+
The primary challenge in modeling relevance between an entity and its corresponding mentions lies in explicitly capturing the mention-relevant parts within the entity. By analyzing the diversity of intra-information within the textual contents of entities, we identify two distinct types of entities, as illustrated in Figure 1. Entities with uniform information can be effectively represented by the dual-encoder; however, due to its single-vector representation and coarse-grained interaction metric, this framework may struggle with entities containing divergent and sparsely distributed information. To alleviate the issue, existing methods construct multi-vector entity representations from different perspectives (Ma et al., 2021; Zhang and Stratos, 2021; Tang et al., 2021). Despite these efforts, all these methods rely on coarse-grained entity-level labels for training and lack the necessary supervised signals to select the most relevant representation for a specific mention from multiple entity vectors. As a result, their capability to effectively capture multiple fine-grained aspects of an entity and accurately match mentions with varying contexts is significantly hampered, ultimately leading to suboptimal performance in dense entity retrieval.
|
| 39 |
+
|
| 40 |
+
In order to obtain fine-grained entity representations capable of matching divergent mentions, we propose a novel Multi-View Enhanced Distillation (MVD) framework. MVD effectively transfers knowledge of multiple fine-grained and mention-relevant parts within entities from cross-encoders to dual-encoders. By jointly encoding the entity and its corresponding mentions, cross-encoders enable the explicit capture of mention-relevant components within the entity, thereby facilitating the learning of fine-grained elements of the entity through more accurate soft-labels. To achieve this, our framework constructs the same multi-view representation for both modules by splitting the textual information of entities into multiple fine-grained views. This approach prevents irrelevant information from being over-squashed into the mention-relevant view, which is selected based on the results of cross-encoders.
|
| 41 |
+
|
| 42 |
+
We further design cross-alignment and self
|
| 43 |
+
|
| 44 |
+
alignment mechanisms for our framework to separately align the original entity-level and fine-grained view-level scoring distributions, thereby facilitating fine-grained knowledge transfer from the teacher model to the student model. Motivated by prior works (Xiong et al., 2020; Zhan et al., 2021; Qu et al., 2021; Ren et al., 2021), MVD jointly optimizes both modules and employs an effective hard negative mining technique to facilitate transferring of hard-to-train knowledge in distillation. Meanwhile, we reserve a global-view that embeds the entity as a whole to prevent dispersal of uniform information and better represent the first type of entities in Figure 1.
|
| 45 |
+
|
| 46 |
+
Through extensive experiments on several entity linking benchmarks, including ZESHEL, AIDA-B, MSNBC, and WNED-CWEB, our method demonstrates superior performance over existing approaches. The results highlight the effectiveness of MVD in capturing fine-grained entity representations and matching divergent mentions, which significantly improves entity retrieval performance and facilitates overall EL performance by retrieving high-quality candidates for the ranking stage.
|
| 47 |
+
|
| 48 |
+
# 2 Related Work
|
| 49 |
+
|
| 50 |
+
To accurately and efficiently acquire target entities from large-scale KBs, the majority of EL systems are designed in two stages: entity retrieval and entity ranking. For entity retrieval, prior approaches typically rely on simple methods like frequency information (Yamada et al., 2016), alias tables (Fang et al., 2019) and sparse-based models (Robertson et al., 2009) to retrieve a small set of candidate entities with low latency. For the ranking stage, neural networks had been widely used for calculating the relevance score between mentions and entities (Yamada et al., 2016; Ganea and Hofmann, 2017; Fang et al., 2019; Kolitsas et al., 2018).
|
| 51 |
+
|
| 52 |
+
Recently, with the development of PLMs (Kenton and Toutanova, 2019; Lewis et al., 2020), PLM-based models have been widely used for both stages of EL. Logeswaran et al. (2019) and Yao et al. (2020) utilize the cross-encoder architecture that jointly encodes mentions and entities to rank candidates, Gillick et al. (2019) employs the dual-encoder architecture for separately encoding mentions and entities into high-dimensional vectors for entity retrieval. BLINK (Wu et al., 2020) improves overall EL performance by incorporating both architectures in its retrieve-then-rank pipeline,
|
| 53 |
+
|
| 54 |
+
making it a strong baseline for the task. GERENE (De Cao et al., 2020) directly generates entity names through an auto-regressive approach.
|
| 55 |
+
|
| 56 |
+
To further improve the retrieval performance, various methods have been proposed. Zhang and Stratos (2021) and Sun et al. (2022) demonstrate the effectiveness of hard negatives in enhancing retrieval performance. Agarwal et al. (2022) and GER (Wu et al., 2023) construct mention/entity centralized graph to learn the fine-grained entity representations. However, being limited to the single vector representation, these methods may struggle with entities that have multiple and sparsely distributed information. Although Tang et al. (2021) and MuVER (Ma et al., 2021) construct multi-view entity representations and select the optimal view to calculate the relevance score with the mention, they still rely on the same entity-level supervised signal to optimize the scores of different views within the entity, which limit the capacity of matching with divergent mentions.
|
| 57 |
+
|
| 58 |
+
In contrast to existing methods, MVD is primarily built upon the knowledge distillation technique (Hinton et al., 2015), aiming to acquire fine-grained entity representations from cross-encoders to handle diverse mentions. To facilitate fine-grained knowledge transfer of multiple mention-relevant parts, MVD splits the entity into multiple views to avoid irrelevant information being squashed into the mention-relevant view, which is selected by the more accurate teacher model. This Framework further incorporates cross-alignment and self-alignment mechanisms to learn mention-relevant view representation from both original entity-level and fine-grained view-level scoring distributions, these distributions are derived from the soft-labels generated by the cross-encoders.
|
| 59 |
+
|
| 60 |
+
# 3 Methodology
|
| 61 |
+
|
| 62 |
+
# 3.1 Task Formulation
|
| 63 |
+
|
| 64 |
+
We first describe the task of entity linking as follows. Give a mention $m$ in a context sentence $s = < c_{l}, m, c_{r} >$ , where $c_{l}$ and $c_{r}$ are words to the left/right of the mention, our goal is to efficiently obtain the entity corresponding to $m$ from a large-scale entity collection $\varepsilon = \{e_{1}, e_{2}, \dots, e_{N}\}$ , each entity $e \in \varepsilon$ is defined by its title $t$ and description $d$ as a generic setting in neural entity linking (Ganea and Hofmann, 2017). Here we follow the two-stage paradigm proposed by (Wu et al., 2020): 1) retrieving a small set of candidate enti
|
| 65 |
+
|
| 66 |
+
ties $\{e_1,e_2,\dots ,e_K\}$ corresponding to mention $m$ from $\varepsilon$ , where $K\ll N$ 2) ranking those candidates to obtain the best match as the target entity. In this work, we mainly focus on the first-stage retrieval.
|
| 67 |
+
|
| 68 |
+
# 3.2 Encoder Architecture
|
| 69 |
+
|
| 70 |
+
In this section, we describe the model architectures used for dense retrieval. Dual-encoder is the most adopted architecture for large-scale retrieval as it separately embeds mentions and entities into high-dimensional vectors, enabling offline entity embeddings and efficient nearest neighbor search. In contrast, the cross-encoder architecture performs better by computing deeply-contextualized representations of mention tokens and entity tokens, but is computationally expensive and impractical for first-stage large-scale retrieval (Reimers and Gurevych, 2019; Humeau et al., 2019). Therefore, in this work, we use the cross-encoder only during training, as the teacher model, to enhance the performance of the dual-encoder through the distillation of relevance scores.
|
| 71 |
+
|
| 72 |
+
# 3.2.1 Dual-Encoder Architecture
|
| 73 |
+
|
| 74 |
+
Similar to the work of (Wu et al., 2020) for entity retrieval, the retriever contains two-tower PLM-based encoders $\mathrm{Enc}_{\mathrm{m}}(\cdot)$ and $\mathrm{Enc}_{\mathrm{e}}(\cdot)$ that encode mention and entity into single fixed-dimension vectors independently, which can be formulated as:
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\begin{array}{l} E (m) = \operatorname {E n c} _ {\mathrm {m}} ([ \mathrm {C L S} ] \mathrm {c} _ {\mathrm {l}} [ \mathrm {M} _ {\mathrm {s}} ] \mathrm {m} [ \mathrm {M} _ {\mathrm {e}} ] \mathrm {c} _ {\mathrm {r}} [ \mathrm {S E P} ]) \\ E (e) = \operatorname {E n c} _ {\mathrm {e}} ([ \text {C L S} ] \mathrm {t} [ \text {E N T} ] \mathrm {d} [ \text {S E P} ]) \tag {1} \\ \end{array}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
where $\mathrm{m},\mathrm{c}_{\mathrm{l}},\mathrm{c}_{\mathrm{r}},\mathrm{t}$ , and $\mathrm{d}$ are the word-piece tokens of the mention, the context before and after the mention, the entity title, and the entity description. The special tokens $[\mathrm{M_s}]$ and $[\mathrm{M_e}]$ are separators to identify the mention, and [ENT] serves as the delimiter of titles and descriptions. [CLS] and [SEP] are special tokens in BERT. For simplicity, we directly take the [CLS] embeddings $E(m)$ and $E(e)$ as the representations for mention $m$ and entity $e$ , then the relevance score $s_{de}(m,e)$ can be calculated by a dot product $s_{de}(m,e) = E(m)\cdot E(e)$ .
|
| 81 |
+
|
| 82 |
+
# 3.2.2 Cross-Encoder Architecture
|
| 83 |
+
|
| 84 |
+
Cross-encoder is built upon a PLM-based encoder $\mathrm{Enc}_{\mathrm{ce}}(\cdot)$ , which concatenates and jointly encodes mention $m$ and entity $e$ (remove the [CLS] token in the entity tokens), then gets the [CLS] vectors as their relevance representation $\operatorname{E}(m, e)$ , finally fed
|
| 85 |
+
|
| 86 |
+
it into a multi-layer perceptron (MLP) to compute the relevance score $s_{ce}(m, e)$ .
|
| 87 |
+
|
| 88 |
+
# 3.2.3 Multi-View Based Architecture
|
| 89 |
+
|
| 90 |
+
With the aim to prevent irrelevant information being over-squashed into the entity representation and better represent the second type of entities in Figure 1, we construct multi-view entity representations for the entity-encoder $\mathrm{Enc}_{\mathrm{e}}(\cdot)$ . The textual information of the entity is split into multiple fine-grained local-views to explicitly capture the key information in the entity and match mentions with divergent contexts. Following the settings of MuVER (Ma et al., 2021), for each entity $e$ , we segment its description $d$ into several sentences $d^{t}(t = 1,2,\dots,n)$ with NLTK toolkit<sup>2</sup>, and then concatenate with its title $t$ as the $t$ -th view $e^{t}(t = 1,2,\dots,n)$ :
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
E \left(e ^ {t}\right) = \operatorname {E n c} _ {\mathrm {e}} \left(\left[ \mathrm {C L S} \right] \mathrm {t} [ \mathrm {E N T} ] \mathrm {d} ^ {\mathrm {t}} [ \mathrm {S E P} ]\right) \tag {2}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
Meanwhile, we retain the original entity representation $E(e)$ defined in Eq. (1) as the global-view $e^0$ in inference, to avoid the uniform information being dispersed into different views and better represent the first type of entities in Figure 1. Finally, the relevance score $s(m, e_i)$ of mention $m$ and entity $e_i$ can be calculated with their multiple embeddings. Here we adopt a max-pooler to select the view with the highest relevant score as the mention-relevant view:
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
\begin{array}{l} s \left(m, e _ {i}\right) = \max _ {t} \left\{s \left(m, e _ {i} ^ {t}\right) \right\} (3) \\ = \max _ {t} \left\{E (m) \cdot E \left(e ^ {t}\right) \right\} (5) \\ \end{array}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
# 3.3 Multi-View Enhanced Distillation
|
| 103 |
+
|
| 104 |
+
The basic intuition of MVD is to accurately transfer knowledge of multiple fine-grained views from a more powerful cross-encoder to the dual-encoder to obtain mention-relevant entity representations. First, in order to provide more accurate relevance between mention $m$ and each view $e^t (t = 1,2,\dots,n)$ of the entity $e$ as a supervised signal for distillation, we introduce a multi-view based cross-encoder following the formulation in Sec 3.2.3:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
E (m, e ^ {t}) = \operatorname {E n c} _ {\mathrm {c e}} ([ \mathrm {C L S} ] \mathrm {m} _ {\mathrm {e n c}} [ \mathrm {S E P} ] \mathrm {e} _ {\mathrm {e n c}} ^ {\mathrm {t}} [ \mathrm {S E P} ]) \tag {4}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where $\mathrm{m_{enc}}$ and $\mathrm{e}_{\mathrm{enc}}^{\mathrm{t}}(\mathrm{t} = 1,2,\dots ,\mathrm{n})$ are the wordpiece tokens of the mention and entity representations defined as in Eq. (1) and (2), respectively.
|
| 111 |
+
|
| 112 |
+
We further design cross-alignment and self-alignment mechanisms to separately align the original entity-level scoring distribution and fine-grained view-level scoring distribution, in order to facilitate the fine-grained knowledge distillation from the teacher model to the student model.
|
| 113 |
+
|
| 114 |
+
Cross-alignment In order to learn entity-level scoring distribution among candidate entities at the multi-view scenario, we calculate the relevance score $s(m, e_i)$ for mention $m$ and candidate entity $e_i$ in candidates $\{e_1, e_2, \dots, e_K\}$ by all its views $\{e_i^1, e_i^2, \dots, e_i^n\}$ , the indexes of relevant views $i_{de}$ and $i_{ce}$ for dual-encoder and cross-encoder are as follows:
|
| 115 |
+
|
| 116 |
+
$$
|
| 117 |
+
\begin{array}{l} i _ {d e} = \underset {t} {\arg \max } \left\{s _ {d e} \left(m, e _ {i} ^ {t}\right) \right\} \tag {5} \\ i _ {c e} = \underset {t} {\arg \max } \{s _ {c e} (m, e _ {i} ^ {t}) \} \\ \end{array}
|
| 118 |
+
$$
|
| 119 |
+
|
| 120 |
+
here to avoid the mismatch of relevant views (i.e., $i_{de} \neq i_{ce}$ ), we align their relevant views based on the index $i_{ce}$ of max-score view in cross-encoder, the loss can be measured by KL-divergence as
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
\mathcal {L} _ {c r o s s} = \sum_ {i = 1} ^ {K} \tilde {s} _ {c e} (m, e _ {i}) \cdot \log \frac {\tilde {s} _ {c e} (m , e _ {i})}{\tilde {s} _ {d e} (m , e _ {i})} \tag {6}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
where
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
\begin{array}{l} \tilde {s} _ {d e} (m, e _ {i}) = \frac {e ^ {s _ {d e} (m , e _ {i} ^ {i c e})}}{e ^ {s _ {d e} (m , e _ {i} ^ {i c e})} + \sum_ {j \neq i} e ^ {s _ {d e} (m , e _ {j} ^ {j c e})}} \\ \tilde {s} _ {c e} (m, e _ {i}) = \frac {e ^ {s _ {c e} \left(m , e _ {i} ^ {i c e}\right)}}{e ^ {s _ {c e} \left(m , e _ {i} ^ {i c e}\right)} + \sum_ {j \neq i} e ^ {s _ {c e} \left(m , e _ {j} ^ {j c e}\right)}} \tag {7} \\ \end{array}
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
here $\tilde{s}_{de}(m,e_i)$ and $\tilde{s}_{ce}(m,e_i)$ denote the probability distributions of the entity-level scores which are represented by the $i_{ce}$ -th view over all candidate entities.
|
| 133 |
+
|
| 134 |
+
Self-alignment Aiming to learn the view-level scoring distribution within each entity for better distinguishing relevant view from other irrelevant views, we calculate the relevance score $s(m, e^t)$ for mention $m$ and each view $e_i^t (t = 1,2,\dots,n)$ of entity $e_i$ , the loss can be measured by KL-divergence as:
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
\mathcal {L} _ {s e l f} = \sum_ {i = 1} ^ {K} \sum_ {t = 1} ^ {n} \tilde {s} _ {c e} \left(m, e _ {i} ^ {t}\right) \cdot \log \frac {\tilde {s} _ {c e} \left(m , e _ {i} ^ {t}\right)}{\tilde {s} _ {d e} \left(m , e _ {i} ^ {t}\right)} \tag {8}
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
Figure 2: The general framework of Multi-View Enhanced Distillation (MVD). $V_{de}^{i}$ and $V_{ce}^{i}$ are the relevance scores between $m$ and $e^{i}$ separately calculated by dual-encoder and cross-encoder, $E_{de}$ and $E_{ce}$ are the entity relevance scores represented by $V_{de}^{i}$ and $V_{ce}^{i}$ , base on the max-score view's index $i$ in cross-encoder.
|
| 142 |
+
|
| 143 |
+
where
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
\tilde{s}_{de}(m,e_{i}^{t}) = \frac{e^{s_{de}(m,e_{i}^{t})}}{e^{s_{de}(m,e_{i}^{t})} + \sum_{j\neq t}e^{s_{de}(m,e_{i}^{j})}}
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\tilde {s} _ {c e} (m, e _ {i} ^ {t}) = \frac {e ^ {s _ {c e} (m , e _ {i} ^ {t})}}{e ^ {s _ {c e} (m , e _ {i} ^ {t})} + \sum_ {j \neq t} e ^ {s _ {c e} (m , e _ {i} ^ {j})}}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
here $\tilde{s}_{de}(m,e_i^t)$ and $\tilde{s}_{ce}(m,e_i^t)$ denote the probability distributions of the view-level scores over all views within each entity.
|
| 154 |
+
|
| 155 |
+
Joint training The overall joint training framework can be found in Figure 2. The final loss function is defined as
|
| 156 |
+
|
| 157 |
+
$$
|
| 158 |
+
\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {d e} + \mathcal {L} _ {c e} + \alpha \mathcal {L} _ {\text {c r o s s}} + \beta \mathcal {L} _ {\text {s e l f}} \tag {10}
|
| 159 |
+
$$
|
| 160 |
+
|
| 161 |
+
Here, $\mathcal{L}_{cross}$ and $\mathcal{L}_{self}$ are the knowledge distillation loss with the cross-encoder and defined as in Eq. (6) and (8) respectively, $\alpha$ and $\beta$ are coefficients for them. Besides, $\mathcal{L}_{de}$ and $\mathcal{L}_{ce}$ are the supervised training loss of the dual-encoder and cross-encoder on the labeled data to maximize the $s(m,e_k)$ for the golden entity $e_k$ in the set of candidates $\{e_1,e_2,\dots,e_K\}$ , the loss can be defined as:
|
| 162 |
+
|
| 163 |
+
$$
|
| 164 |
+
\mathcal {L} _ {d e} = - s _ {d e} (m, e _ {k}) + \log \sum_ {j = 1} ^ {K} \exp \left(s _ {d e} (m, e _ {j})\right)
|
| 165 |
+
$$
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\mathcal {L} _ {c e} = - s _ {c e} (m, e _ {k}) + \log \sum_ {j = 1} ^ {K} \exp \left(s _ {c e} (m, e _ {j})\right) \tag {11}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
Inference we only apply the mention-encoder to obtain the mention embeddings, and then retrieve targets directly from pre-computing view embeddings via efficient nearest neighbor search. These view embeddings encompass both global and local views and are generated by the entity-encoder following joint training. Although the size of the entity index may increase due to the number of views, the time complexity can remain sub-linear with the index size due to maturest neighbor search techniques (Zhang et al., 2022).
|
| 172 |
+
|
| 173 |
+
# 3.4 Hard Negative Sampling
|
| 174 |
+
|
| 175 |
+
Hard negatives are effective information carriers for difficult knowledge in distillation. Mainstream techniques for generating hard negatives include utilizing static samples (Wu et al., 2020) or top-K dynamic samples retrieved from a recent iteration of the retriever (Xiong et al., 2020; Zhan et al., 2021), but these hard negatives may not be suitable for the current model or are pseudo-negatives (i.e., unlabeled positives) (Qu et al., 2021). Aiming to mitigate this issue, we adopt a simple negative sampling method that first retrieves top-N candidates, then randomly samples K negatives from them, which reduces the probability of pseudo-negatives and improves the generalization of the retriever.
|
| 176 |
+
|
| 177 |
+
# 4 Experiments
|
| 178 |
+
|
| 179 |
+
# 4.1 Datasets
|
| 180 |
+
|
| 181 |
+
We evaluate MVD under two distinct types of datasets: three standard EL datasets AIDA-CoNLL
|
| 182 |
+
|
| 183 |
+
<table><tr><td>Method</td><td>R@1</td><td>R@2</td><td>R@4</td><td>R@8</td><td>R@16</td><td>R@32</td><td>R@50</td><td>R@64</td></tr><tr><td>BM25</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>69.26</td></tr><tr><td>BLINK (Wu et al., 2020)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>82.06</td></tr><tr><td>Partialidou et al. (2022)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>84.28</td><td>-</td></tr><tr><td>BLINK*</td><td>45.59</td><td>57.55</td><td>66.10</td><td>72.47</td><td>77.65</td><td>81.69</td><td>84.31</td><td>85.56</td></tr><tr><td>SOM (Zhang and Stratos, 2021)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>89.62</td></tr><tr><td>MuVER (Ma et al., 2021)</td><td>43.49</td><td>58.56</td><td>68.78</td><td>75.87</td><td>81.33</td><td>85.86</td><td>88.35</td><td>89.52</td></tr><tr><td>Agarwal et al. (2022)</td><td>50.31</td><td>61.04</td><td>68.34</td><td>74.26</td><td>78.40</td><td>82.02</td><td>-</td><td>85.11</td></tr><tr><td>GER (Wu et al., 2023)</td><td>42.86</td><td>-</td><td>66.48</td><td>73.00</td><td>78.11</td><td>82.15</td><td>84.41</td><td>85.65</td></tr><tr><td>MVD (ours)</td><td>52.51</td><td>64.77</td><td>73.43</td><td>79.74</td><td>84.35</td><td>88.17</td><td>90.43</td><td>91.55</td></tr></table>
|
| 184 |
+
|
| 185 |
+
Table 1: Recall@K(R@K) on test set of ZESHEL, R@K measures the percentage of mentions for which the top-K retrieved entities include the golden entities. The best results are shown in bold and the results unavailable are left blank. * is reproduced by Ma et al. (2021) that expands context length to 512.
|
| 186 |
+
|
| 187 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">AIDA-b</td><td colspan="3">MSNBC</td><td colspan="3">WNED-CWEB</td></tr><tr><td>R@10</td><td>R@30</td><td>R@100</td><td>R@10</td><td>R@30</td><td>R@100</td><td>R@10</td><td>R@30</td><td>R@100</td></tr><tr><td>BLINK</td><td>92.38</td><td>94.87</td><td>96.63</td><td>93.03</td><td>95.46</td><td>96.76</td><td>82.23</td><td>86.09</td><td>88.68</td></tr><tr><td>MuVER</td><td>94.53</td><td>95.25</td><td>98.11</td><td>95.02</td><td>96.62</td><td>97.75</td><td>79.31</td><td>83.94</td><td>88.15</td></tr><tr><td>MVD (ours)</td><td>97.05</td><td>98.15</td><td>98.80</td><td>96.74</td><td>97.71</td><td>98.04</td><td>85.01</td><td>88.18</td><td>91.11</td></tr></table>
|
| 188 |
+
|
| 189 |
+
Table 2: Recall@K(R@K) on test set of Wikipedia datasets, best results are shown in bold. Underline notes for the results we reproduce.
|
| 190 |
+
|
| 191 |
+
(Hoffart et al., 2011), WNED-CWEB (Guo and Barbosa, 2018) and MSNBC (Cucerzan, 2007), these datasets are all constructed based on a uniform Wikipedia KB; and a more challenging Wikiabased dataset ZESHEL (Logeswaran et al., 2019), adopts a unique setup where the train, valid, and test sets correspond to different KBs. Statistics of these datasets are listed in Appendix A.1.
|
| 192 |
+
|
| 193 |
+
# 4.2 Training Procedure
|
| 194 |
+
|
| 195 |
+
The training pipeline of MVD consists of two stages: Warmup training and MVD training. In the Warmup training stage, we separately train dual-encoder and cross-encoder by in-batch negatives and static negatives. Then we initialize the student model and the teacher model with the well-trained dual-encoder and cross-encoder, and perform multiview enhanced distillation to jointly optimize the two modules following Section 3.3. Implementation details are listed in Appendix A.2.
|
| 196 |
+
|
| 197 |
+
# 4.3 Main Results
|
| 198 |
+
|
| 199 |
+
Compared Methods We compare MVD with previous state-of-the-art methods. These methods can be divided into several categories according to the representations of entities: BM25 (Robertson et al.,
|
| 200 |
+
|
| 201 |
+
2009) is a sparse retrieval model based on exact term matching. BLINK (Wu et al., 2020) adopts a typical dual-encoder architecture that embeds the entity independently into a single fixed-size vector. SOM (Zhang and Stratos, 2021) represents entities by its tokens and computes relevance scores via the sum-of-max operation (Khattab and Zaharia, 2020). Similar to our work, MuVER (Ma et al., 2021) constructs multi-view entity representations to match divergent mentions and achieved the best results, so we select MuVER as the main compared baseline. Besides, ARBORESCENCE (Agarwal et al., 2022) and GER (Wu et al., 2023) construct mention/entity centralized graphs to learn fine-grained entity representations.
|
| 202 |
+
|
| 203 |
+
For Zeshel dataset we compare MVD with all the above models. As shown in Table 1, MVD performs better than all the existing methods. Compared to the previously best performing method MuVER, MVD significantly surpasses in all metrics, particularly in $\mathbf{R}@\mathbf{1}$ , which indicates the ability to directly obtain the target entity. This demonstrates the effectiveness of MVD, which uses hard negatives as information carriers to explicitly transfer knowledge of multiple fine-grained views from the cross-encoder to better represent entities for
|
| 204 |
+
|
| 205 |
+
<table><tr><td>Model</td><td>R@1</td><td>R@64</td></tr><tr><td>MVD</td><td>51.69</td><td>89.78</td></tr><tr><td>- w/o multi-view cross-encoder</td><td>50.85</td><td>89.24</td></tr><tr><td>- w/o relevant-view alignment</td><td>51.02</td><td>89.55</td></tr><tr><td>- w/o self-alignment</td><td>51.21</td><td>89.43</td></tr><tr><td>- w/o cross-alignment</td><td>50.82</td><td>88.71</td></tr><tr><td>- w/o all components</td><td>51.40</td><td>84.16</td></tr></table>
|
| 206 |
+
|
| 207 |
+
Table 3: Ablation for fine-grained components in MVD on test set of ZESHEL. Results on Wikipedia-based datasets are similar and omitted due to limited space.
|
| 208 |
+
|
| 209 |
+
<table><tr><td>Method</td><td>R@1</td><td>R@64</td></tr><tr><td>MVD</td><td>51.69</td><td>89.78</td></tr><tr><td>- w/o dynamic distillation</td><td>51.11</td><td>88.50</td></tr><tr><td>- w/o dynamic negatives</td><td>50.26</td><td>88.46</td></tr><tr><td>- w/o all strategies</td><td>50.16</td><td>87.54</td></tr></table>
|
| 210 |
+
|
| 211 |
+
matching multiple mentions, resulting in higher-quality candidates for the ranking stage.
|
| 212 |
+
|
| 213 |
+
For Wikipedia datasets we compare MVD with BLINK $^{3}$ and MuVER (Ma et al., 2021). As shown in Table 2, our MVD framework also outperforms other methods and achieves state-of-the-art performance on AIDA-b, MSNBC, and WNED-CWEB datasets, which verifies the effectiveness of our method again in standard EL datasets.
|
| 214 |
+
|
| 215 |
+
# 4.4 Ablation and Comparative Studies
|
| 216 |
+
|
| 217 |
+
# 4.4.1 Ablation Study
|
| 218 |
+
|
| 219 |
+
For conducting fair ablation studies and clearly evaluating the contributions of each fine-grained component and training strategy in MVD, we exclude the coarse-grained global-view to evaluate the capability of transferring knowledge of multiple fine-grained views, and utilize Top-K dynamic hard negatives without random sampling to mitigate the effects of randomness on training.
|
| 220 |
+
|
| 221 |
+
Fine-grained components ablation results are presented in Table 3. When we replace the multiview representations in the cross-encoder with the original single vector or remove the relevant view selection based on the results of the cross-encoder, the retrieval performance drops, indicat
|
| 222 |
+
|
| 223 |
+
Table 4: Ablation for training strategies in MVD on test set of ZESHEL.
|
| 224 |
+
|
| 225 |
+
<table><tr><td>Method</td><td>View Type</td><td>R@1</td><td>R@64</td></tr><tr><td>BLINK</td><td>global</td><td>46.04</td><td>87.46</td></tr><tr><td>MuVER</td><td>global</td><td>36.90</td><td>80.65</td></tr><tr><td>MVD (ours)</td><td>global</td><td>47.11</td><td>87.04</td></tr><tr><td>BLINK</td><td>local</td><td>37.20</td><td>86.38</td></tr><tr><td>MuVER</td><td>local</td><td>41.99</td><td>89.25</td></tr><tr><td>MVD (ours)</td><td>local</td><td>51.27</td><td>90.25</td></tr><tr><td>MVD (ours)</td><td>global+local</td><td>52.51</td><td>91.55</td></tr></table>
|
| 226 |
+
|
| 227 |
+
Table 5: Comparison for representing entities from multi-grained views on test set of ZESHEL. Results of BLINK and MuVER are reproduced by us.
|
| 228 |
+
|
| 229 |
+
ing the importance of providing accurate supervised signals for each view of the entity during distillation. Additionally, the removal of cross-alignment and self-alignment results in a decrease in performance, highlighting the importance of these alignment mechanisms. Finally, when we exclude all fine-grained components in MVD and employ the traditional distillation paradigm based on single-vector entity representation and entity-level soft-labels, there is a significant decrease in performance, which further emphasizes the effectiveness of learning knowledge of multiple fine-grained and mention-relevant views during distillation.
|
| 230 |
+
|
| 231 |
+
Training strategies we further explore the effectiveness of joint training and hard negative sampling in distillation, Table 4 shows the results. First, we examine the effect of joint training by freezing the teacher model's parameters to do a static distillation, the retrieval performance drops due to the teacher model's limitation. Similarly, the performance drops a lot when we replace the dynamic hard negatives with static negatives, which demonstrates the importance of dynamic hard negatives for making the learning task more challenging. Furthermore, when both training strategies are excluded and the student model is independently trained using static negatives, a substantial decrease in retrieval performance is observed, which validates the effectiveness of both training strategies in enhancing retrieval performance.
|
| 232 |
+
|
| 233 |
+
# 4.4.2 Comparative Study on Entity Representation
|
| 234 |
+
|
| 235 |
+
To demonstrate the capability of representing entities from multi-grained views, we carry out comparative analyses between MVD and BLINK (Wu et al., 2020), as well as MuVER (Ma et al., 2021).
|
| 236 |
+
|
| 237 |
+
<table><tr><td>Candidate Retriever</td><td>U.Acc.</td></tr><tr><td colspan="2">Base Version Ranker</td></tr><tr><td>BM25 (Logeswaran et al., 2019)</td><td>55.08</td></tr><tr><td>BLINK (Wu et al., 2020)</td><td>61.34</td></tr><tr><td>SOM (Zhang and Stratos, 2021)</td><td>65.39</td></tr><tr><td>Agarwal et al. (2022)</td><td>62.53</td></tr><tr><td>MVD (ours)</td><td>66.85</td></tr><tr><td colspan="2">Large Version Ranker</td></tr><tr><td>BLINK (Wu et al., 2020)</td><td>63.03</td></tr><tr><td>SOM (Zhang and Stratos, 2021)</td><td>67.14</td></tr><tr><td>MVD (ours)</td><td>67.84</td></tr></table>
|
| 238 |
+
|
| 239 |
+
Table 6: Performance of ranker based on different candidate retrievers on the test set of ZESHEL. U.Acc. means the unnormalized macro accuracy.
|
| 240 |
+
|
| 241 |
+
These systems are founded on the principles of coarse-grained global-views and fine-grained local-views, respectively.
|
| 242 |
+
|
| 243 |
+
We evaluate the retrieval performance of both entity representations and present the results in Table 5. The results clearly indicate that MVD surpasses both BLINK and MuVER in terms of entity representation performance, even exceeding BLINK's global-view performance in $\mathbf{R}@\mathbf{1}$ , despite being a fine-grained training framework. Unsurprisingly, the optimal retrieval performance is attained when MVD employs both entity representations concurrently during the inference process.
|
| 244 |
+
|
| 245 |
+
# 5 Further Analysis
|
| 246 |
+
|
| 247 |
+
# 5.1 Facilitating Ranker's Performance
|
| 248 |
+
|
| 249 |
+
To evaluate the impact of the quality of candidate entities on overall performance, we consider two aspects: candidates generated by different retrievers and the number of candidate entities used in inference. First, we separately train BERT-base and BERT-large based cross-encoders to rank the top-64 candidate entities retrieved by MVD. As shown in Table 6, the ranker based on our framework achieves the best results in the two-stage performance compared to other candidate retrievers, demonstrating its ability to generate high-quality candidate entities for the ranking stage.
|
| 250 |
+
|
| 251 |
+
Additionally, we study the impact of the number of candidate entities on overall performance, as shown in Figure 3, with the increase of candidates number $k$ , the retrieval performance grows steadily while the overall performance is likely to
|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
Figure 3: Recall and overall micro accuracy based on different number of candidates k.
|
| 255 |
+
|
| 256 |
+
be stagnant. This indicates that it's ideal to choose an appropriate $k$ to balance the efficiency and efficacy, we observe that $k = 16$ is optimal on most of the existing EL benchmarks.
|
| 257 |
+
|
| 258 |
+
# 5.2 Qualitative Analysis
|
| 259 |
+
|
| 260 |
+
To better understand the practical implications of fine-grained knowledge transfer and global-view entity representation in MVD, as shown in Table 7, we conduct comparative analysis between our method and MuVER (Ma et al., 2021) using retrieval examples from the test set of ZESHEL for qualitative analysis.
|
| 261 |
+
|
| 262 |
+
In the first example, MVD clearly demonstrates its ability to accurately capture the mention-relevant information Rekelen were members of this movement and professor Natima Lang in the golden entity "Cardassian dissident movement". In contrast, MuVER exhibits limited discriminatory ability in distinguishing between the golden entity and the hard negative entity "Romulan underground movement". In the second example, Unlike MuVER which solely focuses on local information within the entity, MVD can holistically model multiple mention-relevant parts within the golden entity "Greater ironguard" through a global-view entity representation, enabling matching with the corresponding mention "improved version of lesser ironguard".
|
| 263 |
+
|
| 264 |
+
# 6 Conclusion
|
| 265 |
+
|
| 266 |
+
In this paper, we propose a novel Multi-View Enhanced Distillation framework for dense entity retrieval. Our framework enables better representation of entities through multi-grained views, and by using hard negatives as information car
|
| 267 |
+
|
| 268 |
+
<table><tr><td>Mention and Context</td><td>Entity retrieved by MVD</td><td>Entity retrieved by MuVER</td></tr><tr><td>Rekelen was a member of the underground movement and a student under professor Natima Lang. In 2370, Rekelen was forced to flee Cardassia prime because of her political views.</td><td>Title: Cardassian dissident movement
|
| 269 |
+
The Cardassian dissident movement was a resistance movement formed to resist and oppose the Cardassian Central Command and restore the authority of the Detapa Council. They believed this change was critical for the future of their people. Professor Natima Lang, Hogue, and Reke-len were members of this movement in the late 2360s and 2370s. ...</td><td>Title: Romulan underground movement
|
| 270 |
+
The Romulan underground movement was formed sometime prior to the late 24th century on the planet Romulus by a group of Romu-
|
| 271 |
+
lan citizens who opposed the Romulan High Command and who supported a Romulan -
|
| 272 |
+
Vulcan reunification. Its methods and princi-
|
| 273 |
+
ples were similar to those of the Cardassian
|
| 274 |
+
dissident movement which emerged in the
|
| 275 |
+
Cardassian Union around the same time. ...</td></tr><tr><td>Known as the improved ver-
|
| 276 |
+
sion of lesser ironguard, this spell granted the complete im-
|
| 277 |
+
munity from all common, un-
|
| 278 |
+
enchanted metals to the caster or one creature touched by the caster.</td><td>Title: Greater ironguard
|
| 279 |
+
Greater ironguard was an arcane abjura-
|
| 280 |
+
tion spell that temporarily granted one creature immunity from all non-magical metals and some enchanted metals. It was an improved version of ironguard.
|
| 281 |
+
The effects of this spell were the same as for "lesser ironguard" except that it also granted immunity and transparency to met-
|
| 282 |
+
als that had been enchanted up to a certain degree. ...</td><td>Title: Lesser ironguard
|
| 283 |
+
... after an improved version was devel-
|
| 284 |
+
oped, this spell became known as lesser
|
| 285 |
+
ironguard. Upon casting this spell, the caster
|
| 286 |
+
or one creature touched by the caster became
|
| 287 |
+
completely immune to common, unenchanted
|
| 288 |
+
metal. metal weapons would pass through the
|
| 289 |
+
individual without causing harm. likewise,
|
| 290 |
+
the target of this spell could pass through
|
| 291 |
+
metal barriers such as iron bars, grates, or
|
| 292 |
+
portcullises. ...</td></tr></table>
|
| 293 |
+
|
| 294 |
+
Table 7: Examples of entities retrieved by MVD and MuVER, mentions in contexts and mention-relevant information in entities are in bold.
|
| 295 |
+
|
| 296 |
+
riers to effectively transfer knowledge of multiple fine-grained and mention-relevant views from the more powerful cross-encoder to the dual-encoder. We also design cross-alignment and self-alignment mechanisms for this framework to facilitate the fine-grained knowledge distillation from the teacher model to the student model. Our experiments on several entity linking benchmarks show that our approach achieves state-of-the-art entity linking performance.
|
| 297 |
+
|
| 298 |
+
# Limitations
|
| 299 |
+
|
| 300 |
+
The limitations of our method are as follows:
|
| 301 |
+
|
| 302 |
+
- We find that utilizing multi-view representations in the cross-encoder is an effective method for MVD, however, the ranking performance of the cross-encoder may slightly decrease. Therefore, it is sub-optimal to directly use the cross-encoder model for entity ranking.
|
| 303 |
+
- Mention detection is the predecessor task of our retrieval model, so our retrieval model will be affected by the error of the mention detection. Therefore, designing a joint model of mention detection and entity retrieval is an improvement direction of our method.
|
| 304 |
+
|
| 305 |
+
# Acknowledgements
|
| 306 |
+
|
| 307 |
+
This work is supported by the National Key Research and Development Program of China (NO.2022YFB3102200) and Strategic Priority Research Program of the Chinese Academy of Sciences with No. XDC02030400.
|
| 308 |
+
|
| 309 |
+
# References
|
| 310 |
+
|
| 311 |
+
Dhruv Agarwal, Rico Angell, Nicholas Monath, and Andrew McCallum. 2022. Entity linking via explicit mention-mention coreference modeling. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4644-4658.
|
| 312 |
+
Silviu Cucerzan. 2007. Large-scale named entity disambiguation based on wikipedia data. In Proceedings of the 2007 joint conference on empirical methods in natural language processing and computational natural language learning (EMNLP-CoNLL), pages 708-716.
|
| 313 |
+
Nicola De Cao, Gautier Izacard, Sebastian Riedel, and Fabio Petroni. 2020. Autoregressive entity retrieval. In International Conference on Learning Representations.
|
| 314 |
+
Zheng Fang, Yanan Cao, Qian Li, Dongjie Zhang, Zhenyu Zhang, and Yanbing Liu. 2019. Joint entity linking with deep reinforcement learning. In The world wide web conference, pages 438-447.
|
| 315 |
+
Octavian-Eugen Ganea and Thomas Hofmann. 2017. Deep joint entity disambiguation with local neural
|
| 316 |
+
|
| 317 |
+
attention. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2619-2629.
|
| 318 |
+
Dan Gillick, Sayali Kulkarni, Larry Lansing, Alessandro Presta, Jason Baldridge, Eugene Ie, and Diego Garcia-Olano. 2019. Learning dense representations for entity retrieval. In Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pages 528-537.
|
| 319 |
+
Zhaochen Guo and Denilson Barbosa. 2018. Robust named entity disambiguation with random walks. Semantic Web, 9(4):459-479.
|
| 320 |
+
Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. 2015. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531.
|
| 321 |
+
Johannes Hoffart, Mohamed Amir Yosef, Ilaria Bordino, Hagen Fürstenau, Manfred Pinkal, Marc Spaniol, Bilyana Taneva, Stefan Thater, and Gerhard Weikum. 2011. Robust disambiguation of named entities in text. In Proceedings of the 2011 conference on empirical methods in natural language processing, pages 782-792.
|
| 322 |
+
Samuel Humeau, Kurt Shuster, Marie-Anne Lachaux, and Jason Weston. 2019. Poly-encoders: Architectures and pre-training strategies for fast and accurate multi-sentence scoring. In International Conference on Learning Representations.
|
| 323 |
+
Suhas Jayaram Subramanya, Fnu Devvrit, Harsha Vardhan Simhadri, Ravishankar Krishnawamy, and Rohan Kadekodi. 2019. Diskann: Fast accurate billion-point nearest neighbor search on a single node. Advances in Neural Information Processing Systems, 32.
|
| 324 |
+
Jeff Johnson, Matthijs Douze, and Herve Jégou. 2019. Billion-scale similarity search with gpus. IEEE Transactions on Big Data, 7(3):535-547.
|
| 325 |
+
Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020. Dense passage retrieval for open-domain question answering. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6769-6781.
|
| 326 |
+
Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of NAACL-HLT, pages 4171-4186.
|
| 327 |
+
Omar Khattab and Matei Zaharia. 2020. Colbert: Efficient and effective passage search via contextualized late interaction over bert. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval, pages 39-48.
|
| 328 |
+
Nikolaos Kolitsas, Octavian-Eugen Ganea, and Thomas Hofmann. 2018. End-to-end neural entity linking. In Proceedings of the 22nd Conference on Computational Natural Language Learning, pages 519-529.
|
| 329 |
+
|
| 330 |
+
Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880.
|
| 331 |
+
Xiangsheng Li, Jiaxin Mao, Weizhi Ma, Zhijing Wu, Yiqun Liu, Min Zhang, Shaoping Ma, Zhaowei Wang, and Xiuqiang He. 2022. A cooperative neural information retrieval pipeline with knowledge enhanced automatic query reformulation. In Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining, pages 553-561.
|
| 332 |
+
Weijie Liu, Peng Zhou, Zhe Zhao, Zhiruo Wang, Qi Ju, Haotang Deng, and Ping Wang. 2020. K-bert: Enabling language representation with knowledge graph. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 2901-2908.
|
| 333 |
+
Lajanugen Logeswaran, Ming-Wei Chang, Kenton Lee, Kristina Toutanova, Jacob Devlin, and Honglak Lee. 2019. Zero-shot entity linking by reading entity descriptions. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3449-3460.
|
| 334 |
+
Yi Luan, Jacob Eisenstein, Kristina Toutanova, and Michael Collins. 2021. Sparse, dense, and attentional representations for text retrieval. Transactions of the Association for Computational Linguistics, 9:329-345.
|
| 335 |
+
Xinyin Ma, Yong Jiang, Nguyen Bach, Tao Wang, Zhongqiang Huang, Fei Huang, and Weiming Lu. 2021. Mover: Improving first-stage entity retrieval with multi-view entity representations. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 2617-2624.
|
| 336 |
+
Eleni Partialidou, Despina Christou, and Grigorios Tsoumakas. 2022. Improving zero-shot entity retrieval through effective dense representations. In Proceedings of the 12th Hellenic Conference on Artificial Intelligence, pages 1-5.
|
| 337 |
+
Fabio Petroni, Aleksandra Piktus, Angela Fan, Patrick Lewis, Majid Yazdani, Nicola De Cao, James Thorne, Yacine Jernite, Vladimir Karpukhin, Jean Maillard, et al. 2021. Kilt: a benchmark for knowledge intensive language tasks. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2523-2544.
|
| 338 |
+
Yingqi Qu, Yuchen Ding, Jing Liu, Kai Liu, Ruiyang Ren, Wayne Xin Zhao, Daxiang Dong, Hua Wu, and Haifeng Wang. 2021. Rocketqa: An optimized training approach to dense passage retrieval for open-domain question answering. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5835-5847.
|
| 339 |
+
|
| 340 |
+
Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3982-3992.
|
| 341 |
+
Ruiyang Ren, Yingqi Qu, Jing Liu, Wayne Xin Zhao, Qiaoqiao She, Hua Wu, Haifeng Wang, and Ji-Rong Wen. 2021. Rocketqav2: A joint training method for dense passage retrieval and passage re-ranking. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 2825-2835.
|
| 342 |
+
Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 3(4):333-389.
|
| 343 |
+
Kai Sun, Richong Zhang, Samuel Mensah, Yongyi Mao, and Xudong Liu. 2022. A transformational biencoder with in-domain negative sampling for zero-shot entity linking. In Findings of the Association for Computational Linguistics: ACL 2022, pages 1449-1458.
|
| 344 |
+
Hongyin Tang, Xingwu Sun, Beihong Jin, and Fuzheng Zhang. 2021. A bidirectional multi-paragraph reading model for zero-shot entity linking. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 13889-13897.
|
| 345 |
+
Ledell Wu, Fabio Petroni, Martin Josifoski, Sebastian Riedel, and Luke Zettlemoyer. 2020. Scalable zero-shot entity linking with dense entity retrieval. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6397-6407.
|
| 346 |
+
Taiqiang Wu, Xingyu Bai, Weigang Guo, Weijie Liu, Siheng Li, and Yujiu Yang. 2023. Modeling fine-grained information via knowledge-aware hierarchical graph for zero-shot entity retrieval. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining, pages 1021-1029.
|
| 347 |
+
Lee Xiong, Chenyan Xiong, Ye Li, Kwok-Fung Tang, Jialin Liu, Paul N Bennett, Junaid Ahmed, and Arnold Overwijk. 2020. Approximate nearest neighbor negative contrastive learning for dense text retrieval. In International Conference on Learning Representations.
|
| 348 |
+
Ikuya Yamada, Hiroyuki Shindo, Hideaki Takeda, and Yoshiyasu Takefuji. 2016. Joint learning of the embedding of words and entities for named entity disambiguation. In Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning, pages 250-259.
|
| 349 |
+
Zonghai Yao, Liangliang Cao, and Huapu Pan. 2020. Zero-shot entity linking with efficient long range sequence modeling. In *Findings of the Association for Computational Linguistics: EMNLP* 2020, pages 2517-2522.
|
| 350 |
+
|
| 351 |
+
Xi Ye, Semih Yavuz, Kazuma Hashimoto, Yingbo Zhou, and Caiming Xiong. 2022. RNG-KBQA: Generation augmented iterative ranking for knowledge base question answering. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6032-6043, Dublin, Ireland. Association for Computational Linguistics.
|
| 352 |
+
Jingtao Zhan, Jiaxin Mao, Yiqun Liu, Jiafeng Guo, Min Zhang, and Shaoping Ma. 2021. Optimizing dense retrieval model training with hard negatives. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 1503-1512.
|
| 353 |
+
Shunyu Zhang, Yaobo Liang, Ming Gong, Daxin Jiang, and Nan Duan. 2022. Multi-view document representation learning for open-domain dense retrieval. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5990-6000.
|
| 354 |
+
Wenzheng Zhang and Karl Stratos. 2021. Understanding hard negatives in noise contrastive estimation. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1090-1101.
|
| 355 |
+
|
| 356 |
+
# A Appendix
|
| 357 |
+
|
| 358 |
+
# A.1 Statistics of Datasets
|
| 359 |
+
|
| 360 |
+
Table 8 shows statistics for ZESHEL dataset, which was constructed based on documents in Wikipedia from 16 domains, 8 for train, 4 for valid, and 4 for test.
|
| 361 |
+
|
| 362 |
+
<table><tr><td>Domain</td><td>#Entity</td><td>#Mention</td></tr><tr><td colspan="3">Training</td></tr><tr><td>American Football</td><td>31929</td><td>3898</td></tr><tr><td>Doctor Who</td><td>40281</td><td>8334</td></tr><tr><td>Fallout</td><td>16992</td><td>3286</td></tr><tr><td>Final Fantasy</td><td>14044</td><td>6041</td></tr><tr><td>Military</td><td>104520</td><td>13063</td></tr><tr><td>Pro Wrestling</td><td>10133</td><td>1392</td></tr><tr><td>Star Wars</td><td>87056</td><td>11824</td></tr><tr><td>World of Warcraft</td><td>27677</td><td>1437</td></tr><tr><td>Training</td><td>332632</td><td>49275</td></tr><tr><td colspan="3">Validation</td></tr><tr><td>Coronation Street</td><td>17809</td><td>1464</td></tr><tr><td>Muppets</td><td>21344</td><td>2028</td></tr><tr><td>Ice Hockey</td><td>28684</td><td>2233</td></tr><tr><td>Elder Scrolls</td><td>21712</td><td>4275</td></tr><tr><td>Validation</td><td>89549</td><td>10000</td></tr><tr><td colspan="3">Testing</td></tr><tr><td>Forgotten Realms</td><td>15603</td><td>1200</td></tr><tr><td>Lego</td><td>10076</td><td>1199</td></tr><tr><td>Star Trek</td><td>34430</td><td>4227</td></tr><tr><td>YuGiOh</td><td>10031</td><td>3374</td></tr><tr><td>Testing</td><td>70140</td><td>10000</td></tr></table>
|
| 363 |
+
|
| 364 |
+
Table 9 shows statistics for three Wikipedia-based datasets: AIDA, MSNBC, and WNED-CWEB. MSNBC and WNED-CWEB are two out-of-domain test sets, which are evaluated on the model trained on AIDA-train, and we test them on the version of Wikipedia dump provided in KILT (Petroni et al., 2021), which contains 5.9M entities.
|
| 365 |
+
|
| 366 |
+
Table 8: Statistics of ZESHEL dataset.
|
| 367 |
+
|
| 368 |
+
<table><tr><td>Dataset</td><td>#Mention</td><td>#Entity</td></tr><tr><td>AIDA-train</td><td>18448</td><td></td></tr><tr><td>AIDA-valid</td><td>4791</td><td></td></tr><tr><td>AIDA-test</td><td>4485</td><td>5903530</td></tr><tr><td>MSNBC</td><td>678</td><td></td></tr><tr><td>WNED-CWEB</td><td>10392</td><td></td></tr></table>
|
| 369 |
+
|
| 370 |
+
# A.2 Implementation Details
|
| 371 |
+
|
| 372 |
+
For ZESHEL, we use the BERT-base to initialize both the student dual-encoder and the teacher cross-encoder. For Wikipedia-based datasets, we finetune our model based on the model released by BLINK, which is pre-trained on 9M annotated mention-entity pairs with BERT-large. All experiments are performed on 4 A6000 GPUs and the results are the average of 5 runs with different random seeds.
|
| 373 |
+
|
| 374 |
+
Warmup training We initially train a dual-encoder using in-batch negatives, followed by training a cross-encoder as the teacher model via the top-k static hard negatives generated by the dual-encoder. Both models utilize multi-view entity representations and are optimized using the loss defined in Eq. (11), training details are listed in Table 10.
|
| 375 |
+
|
| 376 |
+
Table 9: Statistics of three Wikipedia-based datasets.
|
| 377 |
+
|
| 378 |
+
<table><tr><td>Hyperparameter</td><td>ZESHEL</td><td>Wikipedia</td></tr><tr><td colspan="3">Dual-encoder</td></tr><tr><td>Max mention length</td><td>128</td><td>32</td></tr><tr><td>Max view num</td><td>10</td><td>5</td></tr><tr><td>Max view length</td><td>40</td><td>40</td></tr><tr><td>Learning rate</td><td>1e-5</td><td>1e-5</td></tr><tr><td>Negative num</td><td>63</td><td>63</td></tr><tr><td>Batch size</td><td>64</td><td>64</td></tr><tr><td>Training epoch</td><td>40</td><td>40</td></tr><tr><td>Training time</td><td>4h</td><td>2h</td></tr><tr><td colspan="3">Cross-encoder</td></tr><tr><td>Max input length</td><td>168</td><td>72</td></tr><tr><td>Learning rate</td><td>2e-5</td><td>2e-5</td></tr><tr><td>Negative num</td><td>15</td><td>15</td></tr><tr><td>Batch size</td><td>1</td><td>1</td></tr><tr><td>Training epoch</td><td>3</td><td>3</td></tr><tr><td>Training time</td><td>7h</td><td>5h</td></tr></table>
|
| 379 |
+
|
| 380 |
+
Table 10: Hyperparameters for Warmup training.
|
| 381 |
+
|
| 382 |
+
MVD training Next, we initialize the student model and the teacher model with the well-trained dual-encoder and cross-encoder obtained from the Warmup training stage. We then employ multiview enhanced distillation to jointly optimize both modules, as described in Section 3.3. To determine the values of $\alpha$ and $\beta$ in Eq. (10), we conduct a grid search and find that setting $\alpha = 0.3$ and $\beta = 0.1$ yields the best performance. We further adopt a simple negative sampling method in Sec 3.4 that first retrieves top-N candidates and then samples K as negatives. Based on the analysis in Sec 5.1 that
|
| 383 |
+
|
| 384 |
+
16 is the optimal candidate number to cover most hard negatives and balance the efficiency, we set it as the value of K; then to ensure high recall rates and sampling high quality negatives, we search from a candidate list [50, 100, 150, 200, 300] and eventually determine $N = 100$ is the most suitable value. The training details are listed in Table 11.
|
| 385 |
+
|
| 386 |
+
<table><tr><td>Hyperparameter</td><td>ZESHEL</td><td>Wikipedia</td></tr><tr><td>Max mention length</td><td>128</td><td>32</td></tr><tr><td>Max view num</td><td>10</td><td>5</td></tr><tr><td>Max view length</td><td>40</td><td>40</td></tr><tr><td>Max cross length</td><td>168</td><td>72</td></tr><tr><td>Learning rate</td><td>2e-5</td><td>2e-5</td></tr><tr><td>Negative num</td><td>15</td><td>15</td></tr><tr><td>Batch size</td><td>1</td><td>1</td></tr><tr><td>Training epoch</td><td>5</td><td>5</td></tr><tr><td>Training time</td><td>15h</td><td>6h</td></tr></table>
|
| 387 |
+
|
| 388 |
+
Inference MVD employs both local-view and global-view entity representations concurrently during the inference process, details are listed in Table 12.
|
| 389 |
+
|
| 390 |
+
Table 11: Hyperparameters for MVD training.
|
| 391 |
+
|
| 392 |
+
<table><tr><td>Hyperparameter</td><td>ZESHEL</td><td>Wikipedia</td></tr><tr><td>Local-view length</td><td>40</td><td>40</td></tr><tr><td>Global-view length</td><td>512</td><td>128</td></tr><tr><td>Avg view num</td><td>16</td><td>6</td></tr></table>
|
| 393 |
+
|
| 394 |
+
Table 12: Hyperparameters for Inference.
|
| 395 |
+
|
| 396 |
+
A For every submission:
|
| 397 |
+
|
| 398 |
+
A1. Did you describe the limitations of your work?
|
| 399 |
+
|
| 400 |
+
Section Limitations
|
| 401 |
+
|
| 402 |
+
A2. Did you discuss any potential risks of your work?
|
| 403 |
+
|
| 404 |
+
our work mainly focuses on the general entity linking task, without adding special potential risks.
|
| 405 |
+
|
| 406 |
+
A3. Do the abstract and introduction summarize the paper's main claims?
|
| 407 |
+
|
| 408 |
+
Section 1
|
| 409 |
+
|
| 410 |
+
A4. Have you used AI writing assistants when working on this paper?
|
| 411 |
+
|
| 412 |
+
Left blank.
|
| 413 |
+
|
| 414 |
+
B Did you use or create scientific artifacts?
|
| 415 |
+
|
| 416 |
+
Left blank.
|
| 417 |
+
|
| 418 |
+
B1. Did you cite the creators of artifacts you used?
|
| 419 |
+
|
| 420 |
+
No response.
|
| 421 |
+
|
| 422 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts?
|
| 423 |
+
|
| 424 |
+
No response.
|
| 425 |
+
|
| 426 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)?
|
| 427 |
+
|
| 428 |
+
No response.
|
| 429 |
+
|
| 430 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it?
|
| 431 |
+
|
| 432 |
+
No response.
|
| 433 |
+
|
| 434 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.?
|
| 435 |
+
|
| 436 |
+
No response.
|
| 437 |
+
|
| 438 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be.
|
| 439 |
+
|
| 440 |
+
No response.
|
| 441 |
+
|
| 442 |
+
C Did you run computational experiments?
|
| 443 |
+
|
| 444 |
+
Section 4
|
| 445 |
+
|
| 446 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used?
|
| 447 |
+
|
| 448 |
+
Section 4
|
| 449 |
+
|
| 450 |
+
The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.
|
| 451 |
+
|
| 452 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values?
|
| 453 |
+
|
| 454 |
+
Section 4
|
| 455 |
+
|
| 456 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run?
|
| 457 |
+
|
| 458 |
+
Section 4
|
| 459 |
+
|
| 460 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)?
|
| 461 |
+
|
| 462 |
+
Section 4
|
| 463 |
+
|
| 464 |
+
D Did you use human annotators (e.g., crowdworkers) or research with human participants?
|
| 465 |
+
|
| 466 |
+
Left blank.
|
| 467 |
+
|
| 468 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.?
|
| 469 |
+
|
| 470 |
+
No response.
|
| 471 |
+
|
| 472 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)?
|
| 473 |
+
|
| 474 |
+
No response.
|
| 475 |
+
|
| 476 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used?
|
| 477 |
+
|
| 478 |
+
No response.
|
| 479 |
+
|
| 480 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board?
|
| 481 |
+
|
| 482 |
+
No response.
|
| 483 |
+
|
| 484 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data?
|
| 485 |
+
|
| 486 |
+
No response.
|
2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:229c4d848fffc750c45f05f98f334b2c94d664259ec1f116a8202413f4591000
|
| 3 |
+
size 827785
|
2023/Towards Better Entity Linking with Multi-View Enhanced Distillation/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/1de0907b-7695-457e-9022-fcf3d0255480_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/1de0907b-7695-457e-9022-fcf3d0255480_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/1de0907b-7695-457e-9022-fcf3d0255480_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:673f7f3210a7ed10936e0220cfe401e03d9e6f548c7d2f9534d646dc987b89d5
|
| 3 |
+
size 913263
|
2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/full.md
ADDED
|
@@ -0,0 +1,475 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Towards Boosting the Open-Domain Chatbot with Human Feedback
|
| 2 |
+
|
| 3 |
+
Hua Lu* Siqi Bao*† Huang He Fan Wang Hua Wu Haifeng Wang Baidu Inc., China
|
| 4 |
+
|
| 5 |
+
{luhua05, baosiqi, hehuang, wang.fan, wu_hua, wanghaifeng}@baidu.com
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Many open-domain dialogue models pretrained with social media comments can generate coherent replies but have difficulties producing engaging responses. This phenomenon might mainly result from the deficiency of annotated human-human conversations and the misalignment with human preference. In this paper, we propose a novel and efficient framework Diamante to boost the open-domain chatbot, where two kinds of human feedback (including explicit demonstration and implicit preference) are collected and leveraged. By asking annotators to select or amend the model-generated candidate responses, Diamante efficiently collects the human demonstrated responses and constructs a Chinese chit-chat dataset. To enhance the alignment with human preference, Diamante leverages the implicit preference in the data collection process and introduces the generation-evaluation joint training. Comprehensive experiments indicate that the Diamante dataset and joint training paradigm can significantly boost the performance of pre-trained dialogue models. The overall engagingness of the previous state-of-the-art model has been improved remarkably by $50\%$ in Chinese open-domain conversations.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
In recent years, the self-supervised pre-training based on tremendous unlabeled data has brought great success for many natural language processing tasks (Brown et al., 2020; Chowdhery et al., 2022). In dialogue generation, the pre-training is usually carried out with massive social media comments, acting as human-like conversations (Adiwardana et al., 2020; Bao et al., 2021; Thoppilan et al., 2022). Despite that these pre-trained dialogue models are capable of generating coherent replies, they have difficulties producing engaging
|
| 14 |
+
|
| 15 |
+
responses. The main reasons for this phenomenon might be twofold. Firstly, there exists a considerable gap in the data distribution between the proxy human-like conversations (public group discussion) and the real human-human conversations (private two-way messaging). Secondly, the dialogue model usually outputs the response with the highest generation probability, which could reflect the probability mass over all the training data but might not align well with human preference (e.g., some biased or unsafe statements).
|
| 16 |
+
|
| 17 |
+
One straightforward way to narrow the data distribution gap is to fine-tune the pre-trained dialogue model with annotated human-human conversations. For instance, Blender (Roller et al., 2021) employs four annotated datasets (Zhang et al., 2018; Dinan et al., 2019; Rashkin et al., 2019; Smith et al., 2020) to emphasize the conversational skills of personality, knowledge, empathy, and engagingness. As for the alignment with human preference, LaMDA (Thoppilan et al., 2022) defines and quantifies some critical metrics for dialogue evaluation, including safety, interestingness, and so on. By filtering out those candidate responses with poor performance on these metrics, the human preference towards the dialogue model has increased significantly. However, compared with English, the annotations of high-quality human-human conversations or dialogue evaluation samples are relatively scarce in other languages. As a result, even the state-of-the-art Chinese chatbot - PLATO-XL (Bao et al., 2021), is only pre-trained with social media comments and not involved with advanced response evaluation.
|
| 18 |
+
|
| 19 |
+
In this paper, we propose a novel and efficient framework, namely Diamante, consisting of a data collection strategy and a learning method to boost the performance of pre-trained dialogue models. Two kinds of human feedback are collected and leveraged in Diamante, including explicit demonstration and implicit preference. Firstly, to bridge the gap in data distribution, Diamante collects
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
Figure 1: Illustration of Diamante's annotation interface.
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
|
| 26 |
+
an open-domain chit-chat dataset in Chinese with the assistance of PLATO-XL. Based on model-generated candidate responses, human annotators can efficiently produce an engaging response to continue the conversation. Secondly, we propose to leverage the implicit human preference that appeared in the data collection process, i.e., the annotator's selected or amended response is preferred over the other candidates. To this end, Diamante introduces a novel generation-evaluation joint training paradigm, where high-quality response generation and human preference estimation are learned simultaneously. During inference, the candidate response with the highest preference score would be selected as the final response and returned to the user.
|
| 27 |
+
|
| 28 |
+
Extensive and intensive experiments have been carried out to evaluate the effectiveness of the Diamante framework, including the collected dataset and joint training paradigm. Experimental results reveal that Diamante significantly boosts PLATO-XL's performance and establishes a new state-of-the-art result in Chinese open-domain conversation. It is notable that compared to the human reference, Diamante even achieves competitive or slightly better performance. In addition to PLATO-XL, Diamante brings remarkable improvements to other pre-trained dialogue models. The Diamante dataset is now publicly available, which can be accessed and downloaded under the license agreement at the data platform<sup>1</sup>. We have also released all source code<sup>2</sup>, hoping to facilitate future research in dia
|
| 29 |
+
|
| 30 |
+
logue generation.
|
| 31 |
+
|
| 32 |
+
# 2 Diamante Dataset
|
| 33 |
+
|
| 34 |
+
In this paper, we collect an open-domain chit-chat dataset in Chinese with the assistance of a pretrained dialogue model. In the following, we will describe the creation of the Diamante dataset.
|
| 35 |
+
|
| 36 |
+
# 2.1 Data Collection
|
| 37 |
+
|
| 38 |
+
Diamante aims to explore an efficient way to collect a batch of high-quality chit-chat conversations that align well with human values. The data annotation interface is shown in Figure 1 (the original interface is in Chinese and displayed in Figure 5 of the Appendix). The data collection process is carried out as follows.
|
| 39 |
+
|
| 40 |
+
Step 1: Crafting the Dialogue Opening. Firstly, the annotator is encouraged to craft a start utterance based on any topic of interest, as an informative and engaging dialogue opening is critical to a good conversation. As shown in Figure 1, the start utterance is "My cat started shedding everywhere in the spring. How to deal with it?" We also provide various topics and examples in the guidelines to inspire annotators to write dialogue openings.
|
| 41 |
+
|
| 42 |
+
Step 2: Generating Candidate Responses with the Dialogue Model. Given the dialogue context, a dialogue model (PLATO-XL in the Diamante
|
| 43 |
+
|
| 44 |
+
<table><tr><td>Diamante</td><td>Train</td><td>Valid</td><td>Test</td><td>Total</td></tr><tr><td>Number of Dialogues</td><td>5,838</td><td>500</td><td>500</td><td>6,838</td></tr><tr><td>Number of Utterances</td><td>83,765</td><td>7,166</td><td>7,184</td><td>98,115</td></tr><tr><td>Average Utterance Length</td><td>14.26</td><td>14.20</td><td>14.29</td><td>14.25</td></tr><tr><td>Select / Revise / Rewrite</td><td>18% / 41% / 41%</td><td>19% / 40% / 41%</td><td>19% / 40% / 41%</td><td>18% / 41% / 41%</td></tr></table>
|
| 45 |
+
|
| 46 |
+
Table 1: Statistics of the Diamante dataset.
|
| 47 |
+
|
| 48 |
+
dataset) is employed to generate multiple candidate responses. To ensure the diversity of response content and conversation flow, we adopt the top- $k$ sampling as the decoding strategy and select seven candidates for the demonstration to the annotator.
|
| 49 |
+
|
| 50 |
+
Step 3: Producing Response with Human Feedback. We then ask the annotator to select, revise or rewrite the candidate to produce an appropriate response.
|
| 51 |
+
|
| 52 |
+
- Select. As large-scale dialogue models can generate coherent and occasionally interesting responses, the annotator is allowed to select one response directly from the candidates where appropriate.
|
| 53 |
+
- Revise. Given the possible defects in the candidate responses, such as a lack of consistency or attractiveness, the annotator can choose the preferred candidate and further revise it for better quality.
|
| 54 |
+
- Rewrite. If no appropriate candidate exists, the annotator needs to write a suitable and engaging response by themselves.
|
| 55 |
+
|
| 56 |
+
Iterating Step 2 & Step 3 to Continue the Dialogue. After collecting the response with human feedback, the conversation will continue by iterating step 2 and step 3. The dialogue collection with the human-model in the loop will continue for at least seven rounds. To ensure the annotation quality of the Diamante dataset, we also designed and followed a rigorous quality control process, with details discussed in the Appendix.
|
| 57 |
+
|
| 58 |
+
The above data collection strategy works well in terms of efficiency and quality. The annotator can produce the final response efficiently by directly selecting or amending the model-generated candidates. The conversation quality is guaranteed or enhanced with the human annotator's verification or embellishment. Moreover, the implicit human preference that appeared in the data collection process also allows the training of one preference estimation model without additional annotation.
|
| 59 |
+
|
| 60 |
+
# 2.2 Data Analysis
|
| 61 |
+
|
| 62 |
+
Corpus Statistics. In total, 147 annotators participated in the dataset collection. The detailed statistics of the Diamante dataset are summarized in Table 1. The dataset consists of 6,838 dialogues with 98,115 utterances, and the average utterance length is about 14.25. We split the collected data into train, validation, and test sets. As for the annotator operation proportions, $18\%$ of the utterances are produced from Select, $41\%$ from Revise, and $41\%$ from Rewrite.
|
| 63 |
+
|
| 64 |
+
Dialogue Topics. The Diamante dataset is about open-domain chit-chat and is not limited to any topic. For further quantitative analysis, we employ the topic tagger on the Baidu AI platform<sup>3</sup> to categorize the dialogues. (The topic visualization of the Diamante dataset is displayed in Figure 4 of the Appendix.) The results show that the Diamante dataset covers all 26 main categories. The top five topics are Society $(23\%)$ , Entertainment $(11\%)$ , People $(10\%)$ , Education $(8\%)$ , and Food & Drink $(8\%)$ , which are in line with our daily life.
|
| 65 |
+
|
| 66 |
+
# 3 Generation-Evaluation Joint Training
|
| 67 |
+
|
| 68 |
+
In this paper, we propose to leverage not only the explicit human demonstrations but also the implicit human preference that appeared in the data collection to boost the open-domain chatbot comprehensively. A novel generation-evaluation joint training paradigm is introduced and illustrated in Figure 2, where the high-quality response generation and human preference estimation are optimized simultaneously. The classical training objective of dialogue generation is to minimize the negative log-likelihood (NLL) loss:
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\mathcal {L} _ {N L L} = - \log p _ {\theta} \left(r _ {\mathcal {H}} | c\right) \tag {1}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
where $c$ refers to the dialogue context and $r_{\mathcal{H}}$ is the human annotator's selected or amended response.
|
| 75 |
+
|
| 76 |
+
Besides generation, Diamante encodes evaluation into the joint optimization to enhance the align-
|
| 77 |
+
|
| 78 |
+

|
| 79 |
+
Figure 2: Overview of the generation-evaluation joint training in Diamante. The high-quality response generation and human preference estimation are optimized simultaneously. The three input pairs share the same network, which is unfolded for illustration.
|
| 80 |
+
|
| 81 |
+
ment with human preference. Recall that in the data collection process, there exists implicit human preference: given the dialogue context $c$ , the final response $r_{\mathcal{H}}$ is preferred by human annotators as compared to a model-generated candidate $r_{\mathcal{M}} \in R_{\mathcal{M}}$ (displayed during annotation). Moreover, either $r_{\mathcal{H}}$ or $r_{\mathcal{M}}$ is better than a randomly selected response $r_{\mathcal{R}}$ in most cases. As such, we can have the following preference ranking $r_{\mathcal{H}} > r_{\mathcal{M}} > r_{\mathcal{R}}$ . The preference estimation (PE) loss is then defined as:
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
\begin{array}{l} \mathcal {L} _ {P E} = - \frac {1}{3} \left[ \log \left(\sigma \left(s (c, r _ {\mathcal {H}}) - s (c, r _ {\mathcal {M}})\right)\right) \right. \\ + \log \left(\sigma \left(s (c, r _ {\mathcal {H}}) - s (c, r _ {\mathcal {R}})\right)\right) \tag {2} \\ \left. + \log \Big (\sigma \big (s (c, r _ {\mathcal {M}}) - s (c, r _ {\mathcal {R}}) \big) \Big) \right] \\ \end{array}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
where the input is a quadruple of $(c, r_{\mathcal{H}}, r_{\mathcal{M}}, r_{\mathcal{R}})$ , $\sigma(\cdot)$ is the sigmoid function, and $s(\cdot)$ is the scalar output of the model.
|
| 88 |
+
|
| 89 |
+
The total objective of the generation-evaluation joint training is to minimize the following integrated loss:
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\mathcal {L} = \mathcal {L} _ {N L L} + \mathcal {L} _ {P E} \tag {3}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
The first term helps the model learn to mimic human demonstrations and generate high-quality candidate responses. And the second term helps the model learn the nuanced distinctions among human preferences. During inference, we adopt the top- $k$ sampling to produce multiple candidate responses and then perform ranking with their corresponding preference estimation scores. The one with the highest preference score would be selected as the final response and returned to the user. Notably, the preference estimation follows the candidate response decoding and only involves one more token
|
| 96 |
+
|
| 97 |
+
processing, which incurs negligible computational cost.
|
| 98 |
+
|
| 99 |
+
One similar work to Diamante's joint training is LaMDA (Thoppilan et al., 2022), where a single model functions as both a generator and a discriminator. In comparison, there exist several critical differences between Diamante and LaMDA. Firstly, LaMDA chooses to learn the discriminator and generator sequentially. By contrast, Diamante optimizes generation and evaluation simultaneously, trying to avoid the catastrophic forgetting issue of the two-stage training (Kirkpatrick et al., 2017; Liu et al., 2022b). Secondly, LaMDA defines fine-grained dialogue evaluation metrics and collects corresponding discriminator training samples. Considering the expensive cost of data collection and the difficulty of reaching an agreement in fine-grained dialogue evaluation (Smith et al., 2022), Diamante leverages the implicit human preference as the overall evaluation and gets rid of additional annotations. Thirdly, as suggested in the works of human alignment (Askell et al., 2021), the ranked preference evaluation adopted in Diamante performs better than the binary discrimination used in LaMDA.
|
| 100 |
+
|
| 101 |
+
# 4 Experiments
|
| 102 |
+
|
| 103 |
+
# 4.1 Settings
|
| 104 |
+
|
| 105 |
+
# 4.1.1 Implementation Details
|
| 106 |
+
|
| 107 |
+
We apply the Diamante dataset and joint training paradigm to boost PLATO-XL's performance. In the generation-evaluation joint training, the input samples are formulated as quadruples $(c,r_{\mathcal{H}},r_{\mathcal{M}},r_{\mathcal{R}})$ , where $c$ is the dialogue context, $r_{\mathcal{H}}$ is the human annotator's selected or amended response, $r_{\mathcal{M}}$ is one candidate response displayed during annotation, and $r_{\mathcal{R}}$ is one randomly selected
|
| 108 |
+
|
| 109 |
+
response from the dataset. During the construction of joint training samples, if the sampled model-generated candidate $r_{\mathcal{M}}$ is found to be the same as the human-generated response $r_{\mathcal{H}}$ , $r_{\mathcal{M}}$ will be re-sampled to guarantee the agreement (preference ranking $r_{\mathcal{H}} > r_{\mathcal{M}}$ ). In addition, $r_{\mathcal{M}}$ and $r_{\mathcal{R}}$ are re-sampled at each training epoch.
|
| 110 |
+
|
| 111 |
+
The model is initialized with the 11B parameter PLATO-XL, with the transformer architecture of PrefixLM (Radford et al., 2018; Dong et al., 2019). (There are 72 transformer blocks and 32 attention heads, with the embedding dimension of 3072. The hidden dimension of the feedforward layer is set to 18432.) The preference estimation value $s(\cdot)$ is obtained through one fully-connected layer (converting the transformer output into one scalar). The hyper-parameter settings used in the training process are listed as follows. The maximum sequence length of context and response is set to 384 and 128, respectively. We use Adam (Kingma and Ba, 2015) as the optimizer, with a learning rate scheduler including a linear warmup and an invsqrt decay (Vaswani et al., 2017). The peak learning rate is set to 2e-6, and the warmup step is set to 500. The model is trained for five epochs with a batch size of 168. The implementation is based on the PaddlePaddle framework, and the experiments are carried out on 8 Nvidia A100 GPUs (40G RAM). During inference, we adopt the top-k sampling ( $k$ set to 10) to produce 20 candidate responses and select one with the highest preference estimation score as the final response.
|
| 112 |
+
|
| 113 |
+
# 4.1.2 Compared Approaches
|
| 114 |
+
|
| 115 |
+
In the experiments, the following Chinese dialogue models are considered:
|
| 116 |
+
|
| 117 |
+
- CDial-GPT (Wang et al., 2020) is a 104M parameter model trained on LCCC conversations.
|
| 118 |
+
- EVA2.0 (Gu et al., 2022) is a 2.8B parameter model pre-trained on cleaned WDC-Dialogue.
|
| 119 |
+
- PLATO-XL (Bao et al., 2021) is the largest Chinese dialogue model with up to 11B parameters, pre-trained on social media conversations.
|
| 120 |
+
|
| 121 |
+
In addition to the above dialogue models, the following commercial chatbots in Chinese are included: Microsoft XiaoIce (Zhou et al., 2020), Xiao AI, Tmall Genie, and Apple Siri.
|
| 122 |
+
|
| 123 |
+
# 4.1.3 Evaluation Metrics
|
| 124 |
+
|
| 125 |
+
In the experiments, we employ crowd-sourcing workers to evaluate the dialogue quality in four aspects: coherence, informativeness, safety, and
|
| 126 |
+
|
| 127 |
+
engagingness. We discuss these criteria below and provide scoring details in Appendix C.
|
| 128 |
+
|
| 129 |
+
- Coherence assesses whether the response is relevant and consistent with the context.
|
| 130 |
+
- Informativeness evaluates whether the response includes appropriate information.
|
| 131 |
+
- Safety evaluates whether the response contains harmful, biased, or misleading content.
|
| 132 |
+
- Engagingness measures the willingness to have a long conversation with the partner.
|
| 133 |
+
|
| 134 |
+
The coherence, informativeness, and safety are the utterance-level metrics. The engagingness is the dialogue-level metric. These metrics are evaluated on a range of [0, 1, 2], with higher scores being better. Each sample is distributed to three crowdsourcing workers without model names, and the final score is determined through majority voting to mitigate the bias and variance among evaluators.
|
| 135 |
+
|
| 136 |
+
# 4.2 Experimental Results
|
| 137 |
+
|
| 138 |
+
Considering the limitations of automatic dialogue evaluation (Liu et al., 2016), we employ crowdsourcing workers to evaluate the dialogue quality, including static evaluation, self-chat evaluation, and human-bot chat evaluation.
|
| 139 |
+
|
| 140 |
+
# 4.2.1 Static Evaluation
|
| 141 |
+
|
| 142 |
+
In the static evaluation, we randomly select 100 samples from the test set and employ the models to generate the response given the multi-turn dialogue context. In addition to PLATO-XL and Diamante, we also provide the performance of ground truth for reference. The evaluation results are summarized in Table 2. Diamante significantly improves the response quality on all criteria compared to PLATO-XL. Diamante even achieves competitive or slightly better performance compared to the human reference. For a detailed analysis, we further reviewed the 14/100 cases where Diamante achieved a higher engagingness score than the human reference. We found out that possible reasons for this phenomenon could be twofold. Firstly, it is difficult for annotators to keep producing attractive and engaging responses at each round in multi-turn conversations, which is regular and consistent with our daily conversations. Secondly, Diamante encodes the preference estimation in the joint training to enhance the alignment with human preference, which helps it select the human-preferred response among candidate responses.
|
| 143 |
+
|
| 144 |
+
<table><tr><td></td><td>Coherence</td><td>Informativeness</td><td>Safety</td><td>Engagingness</td></tr><tr><td>PLATO-XL</td><td>1.73</td><td>1.61</td><td>1.87</td><td>1.56</td></tr><tr><td>Human Reference</td><td>1.88</td><td>1.87</td><td>1.92</td><td>1.83</td></tr><tr><td>PLATO-XL (Diamante)</td><td>1.90</td><td>1.91</td><td>1.96</td><td>1.93</td></tr></table>
|
| 145 |
+
|
| 146 |
+
Table 2: Static evaluation results, with statistically significant improvements over PLATO-XL (independent two-sample $t$ -test, $p < 0.005$ ) written in bold.
|
| 147 |
+
|
| 148 |
+
<table><tr><td></td><td>Coherence</td><td>Informativeness</td><td>Safety</td><td>Engagingness</td></tr><tr><td>CDial-GPT</td><td>0.484</td><td>0.400</td><td>0.660</td><td>0.140</td></tr><tr><td>EVA 2.0</td><td>1.508</td><td>1.352</td><td>1.764</td><td>0.960</td></tr><tr><td>PLATO-XL</td><td>1.788</td><td>1.624</td><td>1.788</td><td>1.240</td></tr><tr><td>PLATO-XL (Diamante)</td><td>1.948</td><td>1.920</td><td>1.988</td><td>1.860</td></tr></table>
|
| 149 |
+
|
| 150 |
+
Table 3: Self-chat evaluation results, with statistically significant improvements over all other methods (independent two-sample $t$ -test, $p < 0.005$ ) written in bold.
|
| 151 |
+
|
| 152 |
+
<table><tr><td></td><td>Coherence</td><td>Informativeness</td><td>Safety</td><td>Engagingness</td></tr><tr><td>XiaoIce</td><td>1.54</td><td>1.49</td><td>1.79</td><td>1.15</td></tr><tr><td>Xiao AI</td><td>1.57</td><td>1.54</td><td>1.88</td><td>1.20</td></tr><tr><td>Tmall Genie</td><td>1.58</td><td>1.51</td><td>1.78</td><td>1.25</td></tr><tr><td>Siri</td><td>1.17</td><td>1.13</td><td>1.42</td><td>0.75</td></tr><tr><td>PLATO-XL (Diamante)</td><td>1.92</td><td>1.91</td><td>1.98</td><td>1.90</td></tr></table>
|
| 153 |
+
|
| 154 |
+
Table 4: Human-bot chat evaluation results, with statistically significant improvements over all other methods (independent two-sample $t$ -test, $p < 0.005$ ) written in bold.
|
| 155 |
+
|
| 156 |
+
# 4.2.2 Self-Chat Evaluation
|
| 157 |
+
|
| 158 |
+
As suggested by Adiwardana et al. (2020), the static evaluation can be biased by the construction of dialogue context. Therefore, we also include the interactive evaluation in the experiments, including the self-chat evaluation and human-bot chat evaluation. Following the settings in PLATO-XL, 50 open-domain utterances are selected as dialogue openings, and models play the roles of both partners to continue the conversation for 5 rounds. Then these conversations are distributed to crowdsourcing workers for evaluation. The self-chat evaluation results are summarized in Table 3. Diamante outperforms the rest models in all evaluation aspects and establishes a new state-of-the-art result in Chinese open-domain conversation. In particular, Diamante achieves a remarkable $50\%$ improvement on the metric of engagingness compared to PLATO-XL. These results verify the effectiveness of the Diamante dataset and generation-evaluation joint training paradigm.
|
| 159 |
+
|
| 160 |
+
# 4.2.3 Human-Bot Chat Evaluation
|
| 161 |
+
|
| 162 |
+
In addition to the above dialogue models, Diamante is compared to common commercial chatbots in Chinese through human-bot chat evaluations. We select 20 high-frequency topics from a deployed
|
| 163 |
+
|
| 164 |
+
chatbot and ask in-house data specialists to interact with these chatbots for 7-14 rounds. The humanbot chat evaluation results are summarized in Table 4. Diamante consistently outperforms the rest of the commercial chatbots by a large margin across all the human evaluation metrics. These results indicate that Diamante can produce high-quality responses when interacting with real users.
|
| 165 |
+
|
| 166 |
+
The Fleiss' kappa (Fleiss, 1971) score for the static evaluation, self-chat evaluation, and humanbot chat evaluation is 0.433, 0.468, and 0.424, respectively. This suggests that crowd-sourcing workers have reached a moderate agreement in human evaluation.
|
| 167 |
+
|
| 168 |
+
# 4.3 Discussions
|
| 169 |
+
|
| 170 |
+
# 4.3.1 Ablation Study on Joint Training
|
| 171 |
+
|
| 172 |
+
As discussed in previous sections, the improvements of Diamante compared to PLATO-XL come from two aspects: the Diamante dataset bridges the distribution gap towards human-human conversations, and the joint training paradigm enhances the alignment with human preference. For further dissection, we carry out ablation studies on joint training as follows. Without joint training, PLATO-XL is trained with the Diamante dataset to minimize the NLL loss, and the final response
|
| 173 |
+
|
| 174 |
+
<table><tr><td></td><td>Coherence</td><td>Informativeness</td><td>Safety</td><td>Engagingness</td></tr><tr><td>PLATO-XL (Diamante)</td><td>1.948</td><td>1.920</td><td>1.988</td><td>1.860</td></tr><tr><td>- Joint Training</td><td>1.912</td><td>1.820</td><td>1.908</td><td>1.600</td></tr><tr><td>- Joint Training & Dataset</td><td>1.788</td><td>1.624</td><td>1.788</td><td>1.240</td></tr></table>
|
| 175 |
+
|
| 176 |
+
Table 5: Self-chat evaluation results in the ablation of joint training, with statistically significant improvements over all other methods (independent two-sample $t$ -test, $p < 0.005$ ) written in bold.
|
| 177 |
+
|
| 178 |
+
<table><tr><td></td><td>Coherence</td><td>Informativeness</td><td>Safety</td><td>Engagingness</td></tr><tr><td>CDial-GPT</td><td>0.484</td><td>0.400</td><td>0.660</td><td>0.140</td></tr><tr><td>CDial-GPT (Diamante)</td><td>0.968</td><td>0.960</td><td>1.368</td><td>0.480</td></tr></table>
|
| 179 |
+
|
| 180 |
+
Table 6: Exploration to apply Diamante on CDial-GPT, with statistically significant improvements (independent two-sample $t$ -test, $p < 0.005$ ) written in bold.
|
| 181 |
+
|
| 182 |
+
is selected based on generation probability during inference. With joint training, PLATO-XL is trained with the Diamante dataset to minimize the generation-evaluation integrated loss, and the final response is selected based on preference estimation during inference.
|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
Figure 3: Automatic evaluation in the ablation of joint training.
|
| 186 |
+
|
| 187 |
+
Firstly, we conduct automatic evaluations of response selection on the test set to compare these two approaches. Each dialogue context has one human annotated response and seven model-generated candidates (displayed during annotation). The experiments evaluate the ranking of the reference response among these candidates. The results are reported in terms of mean average precision (MAP), mean reciprocal rank (MRR), and precision at position 1 $(\mathrm{P}@\mathrm{1})$ , as summarized in Figure 3. The preference estimation of the joint training is adept at selecting the response that aligns well with human beings. By contrast, the generation probability has difficulty capturing the nuanced distinctions and delivers almost random performance in response ranking.
|
| 188 |
+
|
| 189 |
+
Secondly, we conduct human evaluations to compare these two approaches, with self-chat evaluation results summarized in Table 5. As exhibited in the comparison, the absence of joint training leads to a substantial performance decrease in engagingness, informativeness, and safety. These results validate that the joint training paradigm improves the alignment with human preference and plays a critical role in boosting the open-domain chatbot.
|
| 190 |
+
|
| 191 |
+
# 4.3.2 Applying Diamante to other Dialogue Models
|
| 192 |
+
|
| 193 |
+
Although the Diamante dataset is collected with the assistance of PLATO-XL and the main experiments are carried out to evaluate Diamante's improvements towards PLATO-XL, the framework is indeed universal and not limited to one particular dialogue model. Further explorations of applying Diamante to other dialogue models are carried out, with CDial-GPT taken as an example. The self-chat evaluation results are summarized in Table 6. Compared to the original model, applying Diamante to CDial-GPT brings remarkable improvements across all evaluation metrics, verifying the effectiveness of Diamante in boosting the performance of Chinese pre-trained dialogue models.
|
| 194 |
+
|
| 195 |
+
# 5 Related Work
|
| 196 |
+
|
| 197 |
+
# 5.1 Human Feedback
|
| 198 |
+
|
| 199 |
+
With the rapid development of large language models, it becomes critical to build helpful, honest, and harmless language assistants, keeping in mind the alignment with human values (Askell et al., 2021; Bai et al., 2022; Glaese et al., 2022). Given the misalignment of the conventional training objective and the ultimate human preference, some works (such as WebGPT (Nakano et al., 2021) and
|
| 200 |
+
|
| 201 |
+
InstructGPT (Ouyang et al., 2022)) leverage the human feedback to train a reward model and optimize towards this proxy objective using reinforcement learning. There are some similar works in dialogue generation (Yi et al., 2019; Jaques et al., 2020), where the reward combines multifaceted evaluation scores, including sentiment, repetition, coherence, etc. While using these reinforcement learning-based approaches, it needs to be careful with the "alignment tax" and not optimize too much (Liu et al., 2022a).
|
| 202 |
+
|
| 203 |
+
In addition to the above reinforcement learning approaches, some works (Hancock et al., 2019; Shuster et al., 2020; Xu et al., 2022) in dialogue generation continue supervised training with human feedback, with the primary motivation of lifelong learning. The dialogue agent will iterate the following steps: deploy the dialogue model, collect the human-model conversations, and update the model with the newly collected samples. During this process, only those human responses are used to update the model, and special attention is required to avoid low-quality responses from trolls (Ju et al., 2022). In comparison, Diamante involves human workers during the development phase rather than after deployment, bringing several benefits. Firstly, human annotators in Diamante have access to model-generated candidate responses and can efficiently formulate a high-quality conversation. While other approaches collect indirect demonstrations from human workers with canned responses, which inevitably interrupts the conversation flow and leads to decreased quality. Besides, the Diamante dataset is collected with recruited annotators, eliminating the adverse impact of the trolls. Secondly, in addition to the explicit human demonstration, there exists implicit human preference in Diamante's data collection process, which allows the training of one preference estimation model without additional annotation.
|
| 204 |
+
|
| 205 |
+
# 5.2 Open-Domain Dialogue Dataset
|
| 206 |
+
|
| 207 |
+
Given the limited number of annotated human-human conversations, open-domain dialogue models are typically pre-trained with human-like conversations collected from social media, such as Twitter, Reddit,Weibo,and Douban.To alleviate the problems brought by the data distribution gap, it has become common to fine-tune these dialogue models with annotated human-human conversations. Representative English datasets include
|
| 208 |
+
|
| 209 |
+
DailyDialog (Li et al., 2017), ConvAI2 (Zhang et al., 2018), Empathetic Dialogues (Rashkin et al., 2019), Wizard of Wikipedia (Dinan et al., 2019), Blended Skill Talk (Smith et al., 2020), etc. In comparison, high-quality annotations of human-human conversations are more scarce in other languages. Most Chinese chit-chat datasets are constructed based on social media comments, including LCCC (Wang et al., 2020), WDC-Dialogue (Zhou et al., 2021), and so on. To our knowledge, the Diamante dataset is the first chit-chat dataset with annotated human-human conversations in Chinese. It is worth noting that Diamante is not a simple fix to the limitation in Chinese conversation. It provides a systematic data collection strategy that is applicable to all languages with high efficiency.
|
| 210 |
+
|
| 211 |
+
# 6 Conclusion
|
| 212 |
+
|
| 213 |
+
In this paper, we propose to collect and leverage human feedback to boost the open-domain chatbot. By asking annotators to select or amend the model-generated candidate responses, Diamante efficiently collects a high-quality Chinese chit-chat dataset. Diamante introduces a novel generation-evaluation joint training paradigm, which leverages both explicit human demonstration and implicit human preference that appeared in the data collection process. Experimental results indicate that the Diamante dataset and joint training paradigm significantly improve pre-trained dialogue models.
|
| 214 |
+
|
| 215 |
+
# 7 Ethics Statement
|
| 216 |
+
|
| 217 |
+
In the dataset collection, annotators need to select or amend the model-generated candidate responses, where some candidates may contain potentially unsafe content. We ask annotators to produce safe and engaging responses. (As the model is pre-trained with social media comments, sometimes it may generate biased or harmful statements. During annotation, we have been monitoring the proportion of potentially unsafe candidates, which is less than $1\%$ .) After annotation, we further employ data experts to review collected data and remove ineligible conversations.
|
| 218 |
+
|
| 219 |
+
Diamante's dataset and joint training paradigm help boost the open-domain chatbot and align well with human values. In practical deployments, it is desirable to employ more strategies to guarantee dialogue safety (Dinan et al., 2021), including sensitive topic detection, response safety classification, and so on.
|
| 220 |
+
|
| 221 |
+
# 8 Reproducibility Statement
|
| 222 |
+
|
| 223 |
+
We describe the collection of Diamante's dataset in Section 2 and Appendix A, including the annotation interface, annotation procedures, quality control process, etc. The Diamante dataset is now publicly available, which can be accessed and downloaded under the license agreement at the data platform. We introduce the model designs in Section 3, and discuss the training configurations in Section 4.1.1. We have included Diamante source code in the supplementary materials to facilitate reproducibility.
|
| 224 |
+
|
| 225 |
+
# Limitations
|
| 226 |
+
|
| 227 |
+
Diamante collects and leverages human feedback to enhance the alignment with human preference. Although we executed strict quality control during data collection, the model might still inherit some biases of the population who provided the feedback. Besides, Diamante provides a systematic and efficient data collection strategy that is applicable to multiple languages and conversational tasks. In this paper, we collected a Chinese chit-chat dataset as an initial attempt and will verify the effectiveness of Diamante in more scenarios soon.
|
| 228 |
+
|
| 229 |
+
# Acknowledgement
|
| 230 |
+
|
| 231 |
+
We would like to thank the anonymous reviewers for their constructive comments. We thank Mingzhu Cai and Xin Tian for helpful discussions; Jingzhou He, Shiwei Huang, and Dou Hong for the help on resource coordination.
|
| 232 |
+
|
| 233 |
+
# References
|
| 234 |
+
|
| 235 |
+
Daniel Adiwardana, Minh-Thang Luong, David R So, Jamie Hall, Noah Fiedel, Romal Thoppilan, Zi Yang, Apoorv Kulshreshtha, Gaurav Nemade, Yifeng Lu, and Quoc V. Le. 2020. Towards a human-like open-domain chatbot. arXiv preprint arXiv:2001.09977.
|
| 236 |
+
Amanda Askell, Yuntao Bai, Anna Chen, Dawn Drain, Deep Ganguli, Tom Henighan, Andy Jones, Nicholas Joseph, Ben Mann, Nova DasSarma, Nelson Elhage, Zac Hatfield-Dodds, Danny Hernandez, Jackson Kernion, Kamal Ndousse, Catherine Olsson, Dario Amodei, Tom Brown, Jack Clark, Sam McCandlish, Chris Olah, and Jared Kaplan. 2021. A general language assistant as a laboratory for alignment. arXiv preprint arXiv:2112.00861.
|
| 237 |
+
Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan,
|
| 238 |
+
|
| 239 |
+
Nicholas Joseph, Saurav Kadavath, Jackson Kernion, Tom Conerly, Sheer El-Showk, Nelson Elhage, Zac Hatfield-Dodds, Danny Hernandez, Tristan Hume, Scott Johnston, Shauna Kravec, Liane Lovitt, Neel Nanda, Catherine Olsson, Dario Amodei, Tom Brown, Jack Clark, Sam McCandlish, Chris Olah, Ben Mann, and Jared Kaplan. 2022. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862.
|
| 240 |
+
Siqi Bao, Huang He, Fan Wang, Hua Wu, Haifeng Wang, Wenquan Wu, Zhihua Wu, Zhen Guo, Hua Lu, Xinxian Huang, Xin Tian, Xinchao Xu, Yingzhan Lin, and Zheng-Yu Niu. 2021. Plato-xl: Exploring the large-scale pre-training of dialogue generation. arXiv preprint arXiv:2109.09519.
|
| 241 |
+
Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems, pages 1877-1901.
|
| 242 |
+
Boxing Chen and Colin Cherry. 2014. A systematic comparison of smoothing techniques for sentence-level bleu. In Proceedings of the 9th Workshop on Statistical Machine Translation, pages 362-367.
|
| 243 |
+
Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. 2022. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311.
|
| 244 |
+
Emily Dinan, Gavin Abercrombie, A Stevie Bergman, Shannon Spruit, Dirk Hovy, Y-Lan Boureau, and Verena Rieser. 2021. Anticipating safety issues in e2e conversational ai: Framework and tooling. arXiv preprint arXiv:2107.03451.
|
| 245 |
+
Emily Dinan, Stephen Roller, Kurt Shuster, Angela Fan, Michael Auli, and Jason Weston. 2019. Wizard of wikipedia: Knowledge-powered conversational agents. International Conference on Learning Representations.
|
| 246 |
+
Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2019. Unified language model pre-training for natural language understanding and generation. In Advances in Neural Information Processing Systems, pages 13063-13075.
|
| 247 |
+
Joseph L Fleiss. 1971. Measuring nominal scale agreement among many raters. In Psychological Bulletin, pages 378-382.
|
| 248 |
+
|
| 249 |
+
Amelia Glaese, Nat McAleese, Maja Trebacz, John Aslanides, Vlad Firoiu, Timo Ewalds, Maribeth Rauh, Laura Weidinger, Martin Chadwick, Phoebe Thacker, et al. 2022. Improving alignment of dialogue agents via targeted human judgements. arXiv preprint arXiv:2209.14375.
|
| 250 |
+
Yuxian Gu, Jiaxin Wen, Hao Sun, Yi Song, Pei Ke, Chujie Zheng, Zheng Zhang, Jianzhu Yao, Xiaoyan Zhu, Jie Tang, and Minlie Huang. 2022. Eva2.0: Investigating open-domain chinese dialogue systems with large-scale pre-training. arXiv preprint arXiv:2203.09313.
|
| 251 |
+
Braden Hancock, Antoine Bordes, Pierre-Emmanuel Mazare, and Jason Weston. 2019. Learning from dialogue after deployment: Feed yourself, chatbot! In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3667-3684.
|
| 252 |
+
Natasha Jaques, Judy Hanwen Shen, Asma Ghandeharioun, Craig Ferguson, Agata Lapedriza, Noah Jones, Shixiang Gu, and Rosalind Picard. 2020. Humancentric dialog training via offline reinforcement learning. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, pages 3985-4003.
|
| 253 |
+
Da Ju, Jing Xu, Y-Lan Boureau, and Jason Weston. 2022. Learning from data in the mixed adversarial nonadversarial case: Finding the helpers and ignoring the trolls. arXiv preprint arXiv:2208.03295.
|
| 254 |
+
Diederik P Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In International Conference on Learning Representations.
|
| 255 |
+
James Kirkpatrick, Razvan Pascanu, Neil Rabinowitz, Joel Veness, Guillaume Desjardins, Andrei A Rusu, Kieran Milan, John Quan, Tiago Ramalho, Agnieszka Grabska-Barwinska, et al. 2017. Overcoming catastrophic forgetting in neural networks. Proceedings of the national academy of sciences, 114(13):3521-3526.
|
| 256 |
+
Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016. A diversity-promoting objective function for neural conversation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 110-119.
|
| 257 |
+
Yanran Li, Hui Su, Xiaoyu Shen, Wenjie Li, Ziqiang Cao, and Shuzi Niu. 2017. DailyDialog: A manually labelled multi-turn dialogue dataset. In Proceedings of the 8th International Joint Conference on Natural Language Processing, pages 986-995.
|
| 258 |
+
Chia-Wei Liu, Ryan Lowe, Iulian Vlad Serban, Mike Noseworthy, Laurent Charlin, and Joelle Pineau. 2016. How NOT to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation. In Proceedings
|
| 259 |
+
|
| 260 |
+
of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2122-2132.
|
| 261 |
+
Ruibo Liu, Ge Zhang, Xinyu Feng, and Soroush Vosoughi. 2022a. Aligning generative language models with human values. In *Findings of the Association for Computational Linguistics: NAACL* 2022, pages 241-252.
|
| 262 |
+
Yixin Liu, Pengfei Liu, Dragomir Radev, and Graham Neubig. 2022b. Brio: Bringing order to abstractive summarization. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2890-2903.
|
| 263 |
+
Reiichiro Nakano, Jacob Hilton, Suchir Balaji, Jeff Wu, Long Ouyang, Christina Kim, Christopher Hesse, Shantanu Jain, Vineet Kosaraju, William Saunders, Xu Jiang, Karl Cobbe, Tina Eloundou, Gretchen Krueger, Kevin Button, Matthew Knight, Benjamin Chess, and John Schulman. 2021. Webgpt: Browser-assisted question-answering with human feedback. arXiv preprint arXiv:2112.09332.
|
| 264 |
+
Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. 2022. Training language models to follow instructions with human feedback. arXiv preprint arXiv:2203.02155.
|
| 265 |
+
Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language understanding by generative pre-training. Technical report, OpenAI.
|
| 266 |
+
Hannah Rashkin, Eric Michael Smith, Margaret Li, and Y-Lan Boureau. 2019. Towards empathetic open-domain conversation models: A new benchmark and dataset. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5370-5381.
|
| 267 |
+
Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M Smith, Y-Lan Boureau, and Jason Weston. 2021. Recipes for building an open-domain chatbot. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics.
|
| 268 |
+
Kurt Shuster, Jack Urbanek, Emily Dinan, Arthur Szlam, and Jason Weston. 2020. Deploying lifelong open-domain dialogue learning. arXiv preprint arXiv:2008.08076.
|
| 269 |
+
Eric Smith, Orion Hsu, Rebecca Qian, Stephen Roller, Y-Lan Boureau, and Jason Weston. 2022. Human evaluation of conversations is an open problem: comparing the sensitivity of various methods for evaluating dialogue agents. In Proceedings of the 4th Workshop on NLP for Conversational AI, pages 77-97.
|
| 270 |
+
|
| 271 |
+
Eric Michael Smith, Mary Williamson, Kurt Shuster, Jason Weston, and Y-Lan Boureau. 2020. Can you put it all together: Evaluating conversational agents' ability to blend skills. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2021-2030.
|
| 272 |
+
Romal Thoppilan, Daniel De Freitas, Jamie Hall, Noam Shazeer, Apoorv Kulshreshtha, Heng-Tze Cheng, Alicia Jin, Taylor Bos, Leslie Baker, Yu Du, YaGuang Li, Hongrae Lee, Huaixiu Steven Zheng, Amin Ghafouri, Marcelo Menegali, Yanping Huang, Maxim Krikun, Dmitry Lepikhin, James Qin, Dehao Chen, Yuanzhong Xu, Zhifeng Chen, Adam Roberts, Maarten Bosma, Vincent Zhao, Yanqi Zhou, ChungChing Chang, Igor Krivokon, Will Rusch, Marc Pickett, Pranesh Srinivasan, Laichee Man, Kathleen Meier-Hellstern, Meredith Ringel Morris, Tulsee Doshi, Renelito Delos Santos, Toju Duke, Johnny Soraker, Ben Zevenbergen, Vinodkumar Prabhakaran, Mark Diaz, Ben Hutchinson, Kristen Olson, Alejandra Molina, Erin Hoffman-John, Josh Lee, Lora Aroyo, Ravi Rajakumar, Alena Butryna, Matthew Lamm, Viktoriya Kuzmina, Joe Fenton, Aaron Cohen, Rachel Bernstein, Ray Kurzweil, Blaise Aguera-Arcas, Claire Cui, Marian Croak, Ed Chi, and Quoc Le. 2022. Lamda: Language models for dialog applications. arXiv preprint arXiv:2201.08239.
|
| 273 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 5998-6008.
|
| 274 |
+
Yida Wang, Pei Ke, Yinhe Zheng, Kaili Huang, Yong Jiang, Xiaoyan Zhu, and Minlie Huang. 2020. A large-scale chinese short-text conversation dataset. In CCF International Conference on Natural Language Processing and Chinese Computing, pages 91-103.
|
| 275 |
+
Jing Xu, Megan Ung, Mojtaba Komeili, Kushal Arora, Y-Lan Boureau, and Jason Weston. 2022. Learning new skills after deployment: Improving open-domain internet-driven dialogue with human feedback. arXiv preprint arXiv:2208.03270.
|
| 276 |
+
Sanghyun Yi, Rahul Goel, Chandra Khatri, Alessandra Cervone, Tagyoung Chung, Behnam Hedayatnia, Anu Venkatesh, Raefer Gabriel, and Dilek Hakkani-Tur. 2019. Towards coherent and engaging spoken dialog response generation using automatic conversation evaluators. In Proceedings of the 12th International Conference on Natural Language Generation, pages 65-75.
|
| 277 |
+
Saizheng Zhang, Emily Dinan, Jack Urbanek, Arthur Szlam, Douwe Kiela, and Jason Weston. 2018. Personalizing dialogue agents: I have a dog, do you have pets too? In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, pages 2204-2213.
|
| 278 |
+
Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2019. Bertscore: Evaluating
|
| 279 |
+
|
| 280 |
+
text generation with bert. In International Conference on Learning Representations.
|
| 281 |
+
Han Zhou, Xinchao Xu, Wenquan Wu, Zhengyu Niu, Hua Wu, Siqi Bao, Fan Wang, and Haifeng Wang. 2022. Link the world: Improving open-domain conversation with dynamic spatiotemporal-aware knowledge. arXiv preprint arXiv:2206.14000.
|
| 282 |
+
Hao Zhou, Pei Ke, Zheng Zhang, Yuxian Gu, Yinhe Zheng, Chujie Zheng, Yida Wang, Chen Henry Wu, Hao Sun, Xiaocong Yang, Bosi Wen, Xiaoyan Zhu, Minlie Huang, and Jie Tang. 2021. EVA: An open-domain chinese dialogue system with large-scale generative pre-training. arXiv preprint arXiv:2108.01547.
|
| 283 |
+
Li Zhou, Jianfeng Gao, Di Li, and Heung-Yeung Shum. 2020. The design and implementation of XiaoIce, an empathetic social chatbot. Computational Linguistics, 46(1):53-93.
|
| 284 |
+
|
| 285 |
+
# A Dataset Details
|
| 286 |
+
|
| 287 |
+
# A.1 Annotation Interface
|
| 288 |
+
|
| 289 |
+
The original annotation interface of Diamante is in Chinese, as shown in Figure 5. The annotator first crafts the dialogue opening and then selects or amends the model-generated candidate responses to continue the conversation. The left-hand area displays the dialogue context and the input box. The top right-hand part provides a brief task description and a link to the detailed guidelines. The bottom right-hand part lists some inspiring topics or model-generated candidate responses.
|
| 290 |
+
|
| 291 |
+
# A.2 Quality Control
|
| 292 |
+
|
| 293 |
+
To ensure the annotation quality of the Diamante dataset, we designed and followed a rigorous quality control process. We engaged with a vendor company to recruit experienced annotators, instructed them with detailed guidelines, set up admission tests, answered questions in an online shared room, and executed regular reviews within the annotation. After annotation, we ask data experts to review all collected conversations and remove the conversation whenever one expert deems it ineligible.
|
| 294 |
+
|
| 295 |
+
# A.3 Topic Visualization
|
| 296 |
+
|
| 297 |
+
The topic visualization of the Diamante dataset is displayed in Figure 4. There are 26 categories in the topic tagger, and the Diamante dataset covers all of them. The top five topics are Society (23%), Entertainment (11%), People (10%), Education (8%), and Food & Drink (8%), which are in line with our daily life.
|
| 298 |
+
|
| 299 |
+

|
| 300 |
+
Figure 4: Topic visualization of the Diamante dataset.
|
| 301 |
+
|
| 302 |
+
# B Further Discussions
|
| 303 |
+
|
| 304 |
+
# B.1 More Exploration on Joint Training
|
| 305 |
+
|
| 306 |
+
As shown in Table 5, the Diamante dataset and joint training paradigm bring significant improvements. To further analyze the effects of joint training, we carry out the pairwise comparison between models with and without joint training (PLATO-XL trained on the Diamante dataset). We ask crowd-sourcing workers to compare the self-chat conversations generated by these two models and select the preferred one. The comparison in Figure 6 (upper bar) exhibits that the joint training paradigm is crucial in boosting the open-domain chatbot.
|
| 307 |
+
|
| 308 |
+
In Diamante, the joint training leverages the implicit human preference that appeared in the data collection $r_{\mathcal{H}} > r_{\mathcal{M}}$ . We also explore applying the joint training to other conventional dialogue datasets, with DuSinc (Zhou et al., 2022) taken as an example. To formulate training samples for the preference ranking $r_{\mathcal{H}} > r_{\mathcal{M}} > r_{\mathcal{R}}$ , PLATO-XL is employed to simulate model-generated responses. Two models (PLATO-XL with joint training & PLATO-XL w/o joint training) are trained on the DuSinc dataset. We randomly select 100 samples from the test set for static evaluation and ask crowd-sourcing workers to compare the generated responses by these two models. The comparison in Figure 6 (bottom bar) verifies the effectiveness and generality of the joint training paradigm.
|
| 309 |
+
|
| 310 |
+
# B.2 Safety under Adversarial Attack
|
| 311 |
+
|
| 312 |
+
The main experiments reveal that Diamante achieves better safety on normal/insensitive topics. To further analyze the safety performance under adversarial attacks, we asked annotators to interact with PLATO-XL on sensitive topics and induce unsafe responses from the model. The annotators were then asked to amend these unsafe responses into safe ones. These sensitive topics are designed and selected according to Chinese cultural and social norms, including harmful speech (e.g., offensive content, self-harm suggestions, and personal attacks), group discrimination (e.g., region, gender, disability, and religion), misleading information (e.g., political controversies, ethnic division, and conspiracy theories), and so on.
|
| 313 |
+
|
| 314 |
+
In total, we collected 1000 samples (including adversarial dialogue context, original unsafe response, and amended safe response). We employ these samples to evaluate Diamante's safety under adversarial attacks. The automatic evaluation re
|
| 315 |
+
|
| 316 |
+

|
| 317 |
+
|
| 318 |
+

|
| 319 |
+
|
| 320 |
+

|
| 321 |
+
Figure 5: Diamante's annotation interface. Upper: crafting the dialogue opening. Bottom: selecting or amending the model-generated candidate responses to continue the conversation.
|
| 322 |
+
|
| 323 |
+

|
| 324 |
+
|
| 325 |
+

|
| 326 |
+
Figure 6: Exploration of joint training on the Diamante or DuSinc dataset.
|
| 327 |
+
|
| 328 |
+
sults in Figure 7 suggest that Diamante is adept at selecting safe responses. We also randomly selected 100 samples and employed crowd-sourcing workers to evaluate generated responses. The results in Table 7 reveal that Diamante achieves a remarkable safety improvement, with $76\%$ of responses identified as safe. Even though Diamante is only trained with insensitive conversations, it absorbs human preferences and maintains good safety
|
| 329 |
+
|
| 330 |
+
performance under adversarial attacks.
|
| 331 |
+
|
| 332 |
+
# B.3 Automatic Dialogue Evaluation
|
| 333 |
+
|
| 334 |
+
We also carry out automatic evaluation with rule-based and model-based metrics, including BLEU2/4 (Chen and Cherry, 2014), Distinct-1/2 (Li et al., 2016), Unigram F1 (Dinan et al., 2019), and BERTScore (Zhang et al., 2019). The automatic evaluation results in Table 8 are inconsis
|
| 335 |
+
|
| 336 |
+
<table><tr><td></td><td>Safe</td><td>Unsafe</td></tr><tr><td>PLATO-XL</td><td>0%</td><td>100%</td></tr><tr><td>PLATO-XL (Diamante)</td><td>76%</td><td>24%</td></tr></table>
|
| 337 |
+
|
| 338 |
+
Table 7: Human evaluation on safe response generation under adversarial attack.
|
| 339 |
+
|
| 340 |
+
<table><tr><td></td><td>BLEU-2/4</td><td>Distinct-1/2</td><td>Unigram F1</td><td>BERTScore</td></tr><tr><td>PLATO-XL</td><td>5.87 / 2.12</td><td>32.78 / 79.21</td><td>15.78</td><td>60.41</td></tr><tr><td>Human Reference</td><td>-</td><td>33.35 / 82.25</td><td>-</td><td>-</td></tr><tr><td>PLATO-XL (Diamante)</td><td>6.31 / 2.21</td><td>28.47 / 77.61</td><td>16.25</td><td>60.60</td></tr></table>
|
| 341 |
+
|
| 342 |
+
Table 8: Static evaluation with automatic metrics.
|
| 343 |
+
|
| 344 |
+

|
| 345 |
+
Figure 7: Automatic evaluation on safe response selection under adversarial attack.
|
| 346 |
+
|
| 347 |
+
tent with the human evaluation results in Table 2, where human evaluation is the golden standard in open-domain chitchat evaluation. The difference between Diamante and PLATO-XL is minor in automatic evaluation. In comparison, Diamante significantly improves PLATO-XL in human evaluation.
|
| 348 |
+
|
| 349 |
+
# B.4 Case Analysis
|
| 350 |
+
|
| 351 |
+
We provide two check-picked examples in Figure 8 and Figure 9 for qualitative analysis. In the self-chat example, the dialogue opening is about favorite food, and the model plays the role of both partners to continue the conversation. The two speakers have a depth discussion on hot pot, covering favorite dishes to dipping source recipes. In the human-bot chat example, the bot expresses its opinions on the ideal partner and maintains them well within the multi-turn conversation (i.e., personality is more important). At the same time, the bot respects the different opinions of the other speaker
|
| 352 |
+
|
| 353 |
+
and exhibits a good alignment with human values.
|
| 354 |
+
|
| 355 |
+
We provide two more examples by PLATO-XL and XiaoIce in Figure 10 and Figure 11. These two examples are under the same starting utterances as Diamante in Figure 8 and Figure 9.
|
| 356 |
+
|
| 357 |
+
# C Scoring Criteria in Human Evaluation
|
| 358 |
+
|
| 359 |
+
The criteria used in human evaluation are provided in Table 9.
|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
Figure 8: Check-picked self-chat example.
|
| 363 |
+
|
| 364 |
+

|
| 365 |
+
|
| 366 |
+

|
| 367 |
+
Figure 9: Check-picked human bot chat example.
|
| 368 |
+
|
| 369 |
+

|
| 370 |
+
|
| 371 |
+

|
| 372 |
+
Figure 10: Self-chat example by PLATO-XL.
|
| 373 |
+
|
| 374 |
+

|
| 375 |
+
|
| 376 |
+

|
| 377 |
+
Figure 11: Human-bot chat example by XiaoIce.
|
| 378 |
+
|
| 379 |
+

|
| 380 |
+
|
| 381 |
+
<table><tr><td>Score</td><td>Coherence</td></tr><tr><td rowspan="4">0</td><td>- The response is not related with the context.</td></tr><tr><td>- The response simply repeats the context.</td></tr><tr><td>- The response has obvious conflicts with the context.</td></tr><tr><td>- There are serious logic conflicts within the response.</td></tr><tr><td rowspan="2">1</td><td>- The response has minor conflicts with the context.</td></tr><tr><td>- There are some minor logic conflicts in the response.</td></tr><tr><td>2</td><td>- The response is coherent with the context.</td></tr><tr><td></td><td></td></tr><tr><td>Score</td><td>Informativeness</td></tr><tr><td rowspan="3">0</td><td>- The response doesn’t contain any information.</td></tr><tr><td>- This response just repeats the context and fails to bring any additional information.</td></tr><tr><td>- The information is invalid, as the coherence score is 0.</td></tr><tr><td rowspan="2">1</td><td>- The information has conflicts with common sense.</td></tr><tr><td>- There are factual errors in the response.</td></tr><tr><td>2</td><td>- The response has appropriate and correct information.</td></tr><tr><td></td><td></td></tr><tr><td>Score</td><td>Safety</td></tr><tr><td rowspan="2">0</td><td>- The response contains harmful, biased, or misleading content.</td></tr><tr><td>- The safety is invalid, as the coherence score is 0.</td></tr><tr><td>1</td><td>- There are risks that the response may make people feel offended or uncomfortable.</td></tr><tr><td>2</td><td>- The response is safe.</td></tr><tr><td></td><td></td></tr><tr><td>Score</td><td>Engagingness</td></tr><tr><td>0</td><td>- I don’t want to talk with this speaker.</td></tr><tr><td>1</td><td>- It is kind of boring, but it is still ok to talk with this speaker.</td></tr><tr><td>2</td><td>- I would like to talk with this speaker for a long conversation.</td></tr></table>
|
| 382 |
+
|
| 383 |
+
Table 9: Score details of metrics in human evaluation.
|
| 384 |
+
|
| 385 |
+
A For every submission:
|
| 386 |
+
|
| 387 |
+
A1. Did you describe the limitations of your work?
|
| 388 |
+
|
| 389 |
+
Section 9 (Limitations)
|
| 390 |
+
|
| 391 |
+
A2. Did you discuss any potential risks of your work?
|
| 392 |
+
|
| 393 |
+
Section 7
|
| 394 |
+
|
| 395 |
+
A3. Do the abstract and introduction summarize the paper's main claims?
|
| 396 |
+
|
| 397 |
+
Abstract and Section1
|
| 398 |
+
|
| 399 |
+
A4. Have you used AI writing assistants when working on this paper?
|
| 400 |
+
|
| 401 |
+
Left blank.
|
| 402 |
+
|
| 403 |
+
B Did you use or create scientific artifacts?
|
| 404 |
+
|
| 405 |
+
Section 2
|
| 406 |
+
|
| 407 |
+
B1. Did you cite the creators of artifacts you used?
|
| 408 |
+
|
| 409 |
+
Section 2
|
| 410 |
+
|
| 411 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts?
|
| 412 |
+
|
| 413 |
+
Section 8
|
| 414 |
+
|
| 415 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)?
|
| 416 |
+
|
| 417 |
+
Section 8
|
| 418 |
+
|
| 419 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it?
|
| 420 |
+
|
| 421 |
+
Appendix Section A
|
| 422 |
+
|
| 423 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.?
|
| 424 |
+
|
| 425 |
+
Section 2 and Appendix Section A
|
| 426 |
+
|
| 427 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be.
|
| 428 |
+
|
| 429 |
+
Section 2
|
| 430 |
+
|
| 431 |
+
C Did you run computational experiments?
|
| 432 |
+
|
| 433 |
+
Section 4
|
| 434 |
+
|
| 435 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used?
|
| 436 |
+
|
| 437 |
+
Section 4
|
| 438 |
+
|
| 439 |
+
The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.
|
| 440 |
+
|
| 441 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values?
|
| 442 |
+
|
| 443 |
+
Section 4
|
| 444 |
+
|
| 445 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run?
|
| 446 |
+
|
| 447 |
+
Section 4
|
| 448 |
+
|
| 449 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)?
|
| 450 |
+
|
| 451 |
+
Section 4 and Appendix Section B
|
| 452 |
+
|
| 453 |
+
D Did you use human annotators (e.g., crowdworkers) or research with human participants?
|
| 454 |
+
|
| 455 |
+
Section 2 and Section 4
|
| 456 |
+
|
| 457 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.?
|
| 458 |
+
|
| 459 |
+
Section 2 and Appendix Section A
|
| 460 |
+
|
| 461 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)?
|
| 462 |
+
|
| 463 |
+
Appendix Section A
|
| 464 |
+
|
| 465 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used?
|
| 466 |
+
|
| 467 |
+
Appendix Section A
|
| 468 |
+
|
| 469 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board?
|
| 470 |
+
|
| 471 |
+
Appendix Section A
|
| 472 |
+
|
| 473 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data?
|
| 474 |
+
|
| 475 |
+
We engaged with a vendor company (whose core business is professional data annotation) to recruit experienced annotators. In total, 147 annotators participated in the dataset collection. These annotators have high school degrees or above. Due to privacy regulations, we could not get more detailed information on age, gender, or ethnicity from the vendor company.
|
2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:24429ce6479e7f5c419a75fbb33a2a0771be7925b40580aa0da1b826bf5d98da
|
| 3 |
+
size 980654
|
2023/Towards Boosting the Open-Domain Chatbot with Human Feedback/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/c15622fd-9732-4c6f-a455-96733517d658_content_list.json
ADDED
|
@@ -0,0 +1,1821 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
115,
|
| 8 |
+
87,
|
| 9 |
+
884,
|
| 10 |
+
130
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Shahla Farzana and Natalie Parde \nNatural Language Processing Laboratory \nDepartment of Computer Science \nUniversity of Illinois Chicago \n{sfarza3, parde}@uic.edu",
|
| 17 |
+
"bbox": [
|
| 18 |
+
332,
|
| 19 |
+
142,
|
| 20 |
+
668,
|
| 21 |
+
225
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Abstract",
|
| 28 |
+
"text_level": 1,
|
| 29 |
+
"bbox": [
|
| 30 |
+
260,
|
| 31 |
+
252,
|
| 32 |
+
342,
|
| 33 |
+
268
|
| 34 |
+
],
|
| 35 |
+
"page_idx": 0
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"type": "text",
|
| 39 |
+
"text": "Health-related speech datasets are often small and varied in focus. This makes it difficult to leverage them to effectively support healthcare goals. Robust transfer of linguistic features across different datasets orbiting the same goal carries potential to address this concern. To test this hypothesis, we experiment with domain adaptation (DA) techniques on heterogeneous spoken language data to evaluate generalizability across diverse datasets for a common task: dementia detection. We find that adapted models exhibit better performance across conversational and task-oriented datasets. The feature-augmented DA method achieves a $22\\%$ increase in accuracy adapting from a conversational to task-specific dataset compared to a jointly trained baseline. This suggests promising capacity of these techniques to allow for productive use of disparate data for a complex spoken language healthcare task.",
|
| 40 |
+
"bbox": [
|
| 41 |
+
141,
|
| 42 |
+
282,
|
| 43 |
+
460,
|
| 44 |
+
569
|
| 45 |
+
],
|
| 46 |
+
"page_idx": 0
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"type": "text",
|
| 50 |
+
"text": "1 Introduction",
|
| 51 |
+
"text_level": 1,
|
| 52 |
+
"bbox": [
|
| 53 |
+
114,
|
| 54 |
+
583,
|
| 55 |
+
260,
|
| 56 |
+
600
|
| 57 |
+
],
|
| 58 |
+
"page_idx": 0
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"type": "text",
|
| 62 |
+
"text": "Data-driven models of diverse conditions affecting spoken language abilities offer promising real-world benefits (Amini et al., 2022; Girard et al., 2022). However, the datasets available for developing these models are often small and disparate, spanning varied diagnostic and non-diagnostic tasks mapped to different taxonomies at conflicting granularities (Graham et al., 2020). This has largely constrained progress to models excelling in specialized settings (e.g., individuals with homogeneous language background describing a standardized image (Luz et al., 2020)). At the same time, it has created challenges in building more generalizable knowledge about language patterns associated with the condition of interest (Guo et al., 2021).",
|
| 63 |
+
"bbox": [
|
| 64 |
+
112,
|
| 65 |
+
612,
|
| 66 |
+
489,
|
| 67 |
+
852
|
| 68 |
+
],
|
| 69 |
+
"page_idx": 0
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"type": "text",
|
| 73 |
+
"text": "Outside healthcare applications, domain adaptation (DA) has long been applied to increase the capacity of NLP systems to leverage meaningful information from diverse data (Kouw and Loog,",
|
| 74 |
+
"bbox": [
|
| 75 |
+
112,
|
| 76 |
+
854,
|
| 77 |
+
489,
|
| 78 |
+
919
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "2018). These techniques generally seek to harness data from one domain (the source) to improve performance in another (the target). Usually the target domain has little or no labeled data, while the source has a relatively large amount of labeled data. Despite the advantages offered by DA for many NLP problems, it has remained under-studied for healthcare tasks due to numerous complexities of healthcare data (Laparra et al., 2020). Nonetheless, most healthcare problems offer the ideal learning settings in which DA is designed to thrive.",
|
| 85 |
+
"bbox": [
|
| 86 |
+
507,
|
| 87 |
+
252,
|
| 88 |
+
884,
|
| 89 |
+
429
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "We present a systematic analysis of the use of DA for a low-resource healthcare problem that has recently been popular in the NLP community: dementia. We adopt a wide definition of dementia in our work, encompassing datasets pertaining to Alzheimer's disease or related dementia (ADRD) and age-related mild cognitive impairment (MCI), in line with current NLP community norms (Amini et al., 2022). Our research questions include:",
|
| 96 |
+
"bbox": [
|
| 97 |
+
507,
|
| 98 |
+
430,
|
| 99 |
+
884,
|
| 100 |
+
576
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "list",
|
| 106 |
+
"sub_type": "text",
|
| 107 |
+
"list_items": [
|
| 108 |
+
"Q1. Can DA be used to exploit spoken language data pertaining to dementia from one domain, to improve its detection in other domains?",
|
| 109 |
+
"Q2. If yes, does this offer performance improvements over simpler joint training?",
|
| 110 |
+
"Q3. How do different linguistic features and class biases contribute to this performance?"
|
| 111 |
+
],
|
| 112 |
+
"bbox": [
|
| 113 |
+
509,
|
| 114 |
+
588,
|
| 115 |
+
885,
|
| 116 |
+
728
|
| 117 |
+
],
|
| 118 |
+
"page_idx": 0
|
| 119 |
+
},
|
| 120 |
+
{
|
| 121 |
+
"type": "text",
|
| 122 |
+
"text": "We define domain in this study as a distinct dataset with supervised labels describing dementia status in some capacity. Data collection protocol and precise labeling taxonomy may vary across domains, making our task slightly more complex than related work that focused solely on differences in source language (Balagopalan et al., 2020b) or labeling taxonomy (Masnani et al., 2017). We find that DA can indeed support improved dementia detection across domains compared to joint training, and we identify key pivot features and factors",
|
| 123 |
+
"bbox": [
|
| 124 |
+
507,
|
| 125 |
+
741,
|
| 126 |
+
884,
|
| 127 |
+
919
|
| 128 |
+
],
|
| 129 |
+
"page_idx": 0
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"type": "page_number",
|
| 133 |
+
"text": "11965",
|
| 134 |
+
"bbox": [
|
| 135 |
+
475,
|
| 136 |
+
927,
|
| 137 |
+
524,
|
| 138 |
+
940
|
| 139 |
+
],
|
| 140 |
+
"page_idx": 0
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"type": "footer",
|
| 144 |
+
"text": "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics",
|
| 145 |
+
"bbox": [
|
| 146 |
+
226,
|
| 147 |
+
945,
|
| 148 |
+
769,
|
| 149 |
+
957
|
| 150 |
+
],
|
| 151 |
+
"page_idx": 0
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"type": "footer",
|
| 155 |
+
"text": "Volume 1: Long Papers, pages 11965-11978",
|
| 156 |
+
"bbox": [
|
| 157 |
+
361,
|
| 158 |
+
958,
|
| 159 |
+
636,
|
| 160 |
+
971
|
| 161 |
+
],
|
| 162 |
+
"page_idx": 0
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
"type": "footer",
|
| 166 |
+
"text": "July 9-14, 2023 ©2023 Association for Computational Linguistics",
|
| 167 |
+
"bbox": [
|
| 168 |
+
295,
|
| 169 |
+
972,
|
| 170 |
+
700,
|
| 171 |
+
985
|
| 172 |
+
],
|
| 173 |
+
"page_idx": 0
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"type": "text",
|
| 177 |
+
"text": "contributing to this success. It is our hope that continued study of DA in healthcare applications can further extend the boundaries of our understanding and promote impactful follow-up work.",
|
| 178 |
+
"bbox": [
|
| 179 |
+
112,
|
| 180 |
+
84,
|
| 181 |
+
489,
|
| 182 |
+
149
|
| 183 |
+
],
|
| 184 |
+
"page_idx": 1
|
| 185 |
+
},
|
| 186 |
+
{
|
| 187 |
+
"type": "text",
|
| 188 |
+
"text": "2 Related Work",
|
| 189 |
+
"text_level": 1,
|
| 190 |
+
"bbox": [
|
| 191 |
+
114,
|
| 192 |
+
164,
|
| 193 |
+
268,
|
| 194 |
+
180
|
| 195 |
+
],
|
| 196 |
+
"page_idx": 1
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"type": "text",
|
| 200 |
+
"text": "Most prior work on developing spoken language models of dementia has followed a common pattern, focusing on designing and evaluating dataset-specific approaches. This has included (most popularly) a picture description task (Balagopalan et al., 2020a; Yuan et al., 2020; Di Palo and Parde, 2019), as well as other datasets with more open-ended conversational speech (Li et al., 2022; Nasreen et al., 2021b; Luz et al., 2018). These models singularly focus on the source domain, with no expectation of deployment beyond that, opening questions about their ability to generalize beyond small, publicly available reference sets.",
|
| 201 |
+
"bbox": [
|
| 202 |
+
112,
|
| 203 |
+
192,
|
| 204 |
+
490,
|
| 205 |
+
400
|
| 206 |
+
],
|
| 207 |
+
"page_idx": 1
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"type": "text",
|
| 211 |
+
"text": "The extent to which DA has been explored in this context is limited. Li et al. (2022) leveraged transfer learning, one form of DA that involves fine-tuning a model pretrained on a much larger dataset using the smaller target domain dataset, to study the perplexity ratio of normal and artificially degraded Transformer-based language models for dementia detection. Likewise, Balagopalan et al. (2020b) achieved performance boosts in detecting early signs of aphasia in cross-language settings compared to the unilingual baseline using optimal transport domain adaptation. A problem with transfer learning in many healthcare contexts is that target datasets are much smaller than for other NLP tasks for which the technique has demonstrated success. The benefits of transfer learning do not necessarily transfer (no pun intended) to ultra low-resource settings, where resulting models may be much less stable (Dodge et al., 2020).",
|
| 212 |
+
"bbox": [
|
| 213 |
+
115,
|
| 214 |
+
401,
|
| 215 |
+
489,
|
| 216 |
+
708
|
| 217 |
+
],
|
| 218 |
+
"page_idx": 1
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"type": "text",
|
| 222 |
+
"text": "Other forms of DA that may be more suited to dementia detection and other very low-resource healthcare problems are feature-based and instance-based DA. Both were originally leveraged for smaller datasets closer in scale to (although still larger than) those available for dementia detection (Daumé III, 2007; Sun et al., 2016), making it a promising and perhaps under-appreciated alternative to transfer learning. Feature-based DA focuses on modifying the feature space of the source and target datasets in some way that promotes the classifier's ability to generalize across them. Masrani et al. (2017) experimented with two feature-based",
|
| 223 |
+
"bbox": [
|
| 224 |
+
112,
|
| 225 |
+
709,
|
| 226 |
+
489,
|
| 227 |
+
917
|
| 228 |
+
],
|
| 229 |
+
"page_idx": 1
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"type": "text",
|
| 233 |
+
"text": "DA techniques to adapt separate domain subsets split from the same source dataset, DementiaBank (Becker et al., 1994). Instance-based DA focuses on reweighting instances based on their importance to the target domain task (Jiang and Zhai, 2007; Xia et al., 2014). It has not yet been studied for dementia detection. We build upon Masrani et al. (2017)'s promising findings by studying the effects of numerous feature-based and instance-based DA techniques across different dementia datasets with conversational and task-related speech samples.",
|
| 234 |
+
"bbox": [
|
| 235 |
+
507,
|
| 236 |
+
84,
|
| 237 |
+
885,
|
| 238 |
+
261
|
| 239 |
+
],
|
| 240 |
+
"page_idx": 1
|
| 241 |
+
},
|
| 242 |
+
{
|
| 243 |
+
"type": "text",
|
| 244 |
+
"text": "3 Methodology",
|
| 245 |
+
"text_level": 1,
|
| 246 |
+
"bbox": [
|
| 247 |
+
507,
|
| 248 |
+
273,
|
| 249 |
+
658,
|
| 250 |
+
288
|
| 251 |
+
],
|
| 252 |
+
"page_idx": 1
|
| 253 |
+
},
|
| 254 |
+
{
|
| 255 |
+
"type": "text",
|
| 256 |
+
"text": "3.1 Task Definition",
|
| 257 |
+
"text_level": 1,
|
| 258 |
+
"bbox": [
|
| 259 |
+
507,
|
| 260 |
+
298,
|
| 261 |
+
675,
|
| 262 |
+
313
|
| 263 |
+
],
|
| 264 |
+
"page_idx": 1
|
| 265 |
+
},
|
| 266 |
+
{
|
| 267 |
+
"type": "text",
|
| 268 |
+
"text": "For the scope of the work presented here we abstract dementia detection to the following scenario. Given a dataset with instances $X$ and labels $Y$ from some domain $D$ , then our label space $\\mathbf{y} = \\{d, c\\} \\in Y$ is drawn from the binary distribution of classes (e.g., {probable Alzheimer's, control} or {with dementia, without dementia}) present in $D$ . We assign the class with an association most proximal to a dementia diagnosis (e.g., possible Alzheimer's or with dementia) to the dementia $(d)$ label, and the other class to the control $(c)$ label. Our goal is to predict $y_i \\in Y$ for an unseen instance $x_i$ with feature representation $\\mathbf{x}_i$ , which may be modified from the original representation according to the applied DA approach.",
|
| 269 |
+
"bbox": [
|
| 270 |
+
507,
|
| 271 |
+
319,
|
| 272 |
+
885,
|
| 273 |
+
561
|
| 274 |
+
],
|
| 275 |
+
"page_idx": 1
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"type": "text",
|
| 279 |
+
"text": "3.2 Data",
|
| 280 |
+
"text_level": 1,
|
| 281 |
+
"bbox": [
|
| 282 |
+
507,
|
| 283 |
+
571,
|
| 284 |
+
596,
|
| 285 |
+
585
|
| 286 |
+
],
|
| 287 |
+
"page_idx": 1
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"type": "text",
|
| 291 |
+
"text": "We use three publicly available datasets and one privately-held dataset, representing separate domains, to study DA in this context. The publicly available datasets, DementiaBank, ADReSS, and the Carolinas Conversation Collection, are the most widely used datasets for dementia detection research in the NLP community. They are also the only datasets for which public access is available. Characteristics of these datasets are provided in Table 1. In Figure 1, we provide samples from two of these datasets, quoted directly from Chinaei et al. (2017) and Davis et al. (2017), to illustrate language differences between task-oriented and conversational domains. Our privately-held dataset is used only for conditions requiring multiple source domains, explained in detail in §3.3.",
|
| 292 |
+
"bbox": [
|
| 293 |
+
507,
|
| 294 |
+
590,
|
| 295 |
+
884,
|
| 296 |
+
848
|
| 297 |
+
],
|
| 298 |
+
"page_idx": 1
|
| 299 |
+
},
|
| 300 |
+
{
|
| 301 |
+
"type": "page_footnote",
|
| 302 |
+
"text": "Researchers are still required to obtain permission from the dataset creators prior to using each of these datasets, via established processes that range from email request (Becker et al., 1994) to full review and approval by local and external Institutional Review Boards (Pope and Davis, 2011).",
|
| 303 |
+
"bbox": [
|
| 304 |
+
507,
|
| 305 |
+
857,
|
| 306 |
+
885,
|
| 307 |
+
917
|
| 308 |
+
],
|
| 309 |
+
"page_idx": 1
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"type": "page_number",
|
| 313 |
+
"text": "11966",
|
| 314 |
+
"bbox": [
|
| 315 |
+
477,
|
| 316 |
+
927,
|
| 317 |
+
524,
|
| 318 |
+
940
|
| 319 |
+
],
|
| 320 |
+
"page_idx": 1
|
| 321 |
+
},
|
| 322 |
+
{
|
| 323 |
+
"type": "table",
|
| 324 |
+
"img_path": "images/c092754cc095802c56b4cd5b1c31f6a14c065c18e87eaab311a2ce477bd3db71.jpg",
|
| 325 |
+
"table_caption": [],
|
| 326 |
+
"table_footnote": [
|
| 327 |
+
"Table 1: Descriptive dataset characteristics. The subscripts $d$ and $c$ refer to dementia and control, respectively. Length (L) is provided as average number of words per transcript. DB and CCC have differing # Participants (P) and # Transcripts (T) because some participants in those datasets had multiple recorded interviews. ADReSS is subdivided into standardized (tr)ain and (te)st partitions established by the dataset's creators."
|
| 328 |
+
],
|
| 329 |
+
"table_body": "<table><tr><td>Dataset</td><td></td><td>#P</td><td>#T</td><td>L</td><td>SD</td></tr><tr><td rowspan=\"2\">ADReSSd</td><td>tr</td><td>54</td><td>54</td><td>125.5</td><td>81.8</td></tr><tr><td>te</td><td>24</td><td>24</td><td>95.0</td><td>47.0</td></tr><tr><td rowspan=\"2\">ADReSSc</td><td>tr</td><td>54</td><td>54</td><td>134.7</td><td>59.4</td></tr><tr><td>te</td><td>24</td><td>24</td><td>120.0</td><td>72.0</td></tr><tr><td>DBd</td><td></td><td>162</td><td>243</td><td>124.8</td><td>67.9</td></tr><tr><td>DBc</td><td></td><td>99</td><td>303</td><td>133.9</td><td>67.4</td></tr><tr><td>CCCd</td><td></td><td>46</td><td>97</td><td>1320.7</td><td>1059.1</td></tr><tr><td>CCCc</td><td></td><td>36</td><td>192</td><td>776.9</td><td>469.7</td></tr><tr><td>ADRCd</td><td></td><td>3</td><td>3</td><td>444.7</td><td>132.6</td></tr><tr><td>ADRCc</td><td></td><td>82</td><td>82</td><td>786.4</td><td>338.3</td></tr></table>",
|
| 330 |
+
"bbox": [
|
| 331 |
+
115,
|
| 332 |
+
80,
|
| 333 |
+
487,
|
| 334 |
+
296
|
| 335 |
+
],
|
| 336 |
+
"page_idx": 2
|
| 337 |
+
},
|
| 338 |
+
{
|
| 339 |
+
"type": "text",
|
| 340 |
+
"text": "DementiaBank (DB). DB (Becker et al., 1994) is a publicly available compendium of audiorecordings of neuropsychological tests administered to healthy participants and patients with diagnosed dementia. It is the most widely used dementia detection dataset in the NLP community, and each audiorecording is paired with a manual transcription formatted using the CHAT transcription protocol (Macwhinney, 2009). We refer readers to Becker et al. (1994) for a detailed description of the dataset collection procedures and its overall composition.",
|
| 341 |
+
"bbox": [
|
| 342 |
+
112,
|
| 343 |
+
450,
|
| 344 |
+
487,
|
| 345 |
+
626
|
| 346 |
+
],
|
| 347 |
+
"page_idx": 2
|
| 348 |
+
},
|
| 349 |
+
{
|
| 350 |
+
"type": "text",
|
| 351 |
+
"text": "The neuropsychological tests include a picture description task from the Boston Diagnostic Aphasia Examination (Goodglass and Kaplan, 1972), often referred to as the \"Cookie Theft Picture Description Task.\" Participants are presented with a picture stimulus which depicts numerous events, central to which is a boy stealing a cookie from a jar. They are asked to describe everything they see occurring in the picture. The bulk of the dementia detection work conducted using DementiaBank has focused on the English-language interactions from this task. DB contains 169 subjects with probable Alzheimer's disease and 99 control subjects.",
|
| 352 |
+
"bbox": [
|
| 353 |
+
112,
|
| 354 |
+
629,
|
| 355 |
+
489,
|
| 356 |
+
838
|
| 357 |
+
],
|
| 358 |
+
"page_idx": 2
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"type": "text",
|
| 362 |
+
"text": "Alzheimer's Dementia Recognition through Spontaneous Speech (ADReSS). ADReSS (Luz et al., 2021) is a subset of DB created for a series of shared tasks on dementia detection. Control and",
|
| 363 |
+
"bbox": [
|
| 364 |
+
112,
|
| 365 |
+
854,
|
| 366 |
+
487,
|
| 367 |
+
917
|
| 368 |
+
],
|
| 369 |
+
"page_idx": 2
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"type": "table",
|
| 373 |
+
"img_path": "images/8886bfcb3f81398975fc6c0f58bc9d6201e95b2270cdf0717ab4f6b973ec1968.jpg",
|
| 374 |
+
"table_caption": [],
|
| 375 |
+
"table_footnote": [],
|
| 376 |
+
"table_body": "<table><tr><td colspan=\"2\">DementiaBank</td></tr><tr><td>INV:</td><td>just tell me what happening in the picture ..</td></tr><tr><td>PAR:</td><td>the pearl [ :poor] [ *p:w] &mo moms gettin(g) her wet [/ ] feet wet (be)cause she thinking of days gone by and then the water run . [ + gram ] .</td></tr><tr><td>PAR:</td><td>( ) and &uh that boy whether he knows or not hes gonna [ : going to] crack his head on the back of that counter trying to get too many cookies out . .</td></tr></table>",
|
| 377 |
+
"bbox": [
|
| 378 |
+
547,
|
| 379 |
+
80,
|
| 380 |
+
835,
|
| 381 |
+
197
|
| 382 |
+
],
|
| 383 |
+
"page_idx": 2
|
| 384 |
+
},
|
| 385 |
+
{
|
| 386 |
+
"type": "table",
|
| 387 |
+
"img_path": "images/eba166538c3060833c5b9febe7c354511b024791ddbd5d43ad0eff48a7dc6f3f.jpg",
|
| 388 |
+
"table_caption": [],
|
| 389 |
+
"table_footnote": [],
|
| 390 |
+
"table_body": "<table><tr><td colspan=\"2\">Carolinas Conversation Collection</td></tr><tr><td>INV:</td><td>was it just (overlap)</td></tr><tr><td>PAR:</td><td>um, my doctor was telling me all kind of little things, been so long, I forgot now. But, um, my nerves was bad.</td></tr><tr><td>INV:</td><td>Your nerves?</td></tr><tr><td>PAR:</td><td>Um hmm. And, um, I had a little heart failure. Um hmm. And, um, --- (long pause) that all, what else he tell me that was wrong? He say, "You got to stop," he just didn't tell me then, (overlap)</td></tr></table>",
|
| 391 |
+
"bbox": [
|
| 392 |
+
547,
|
| 393 |
+
206,
|
| 394 |
+
835,
|
| 395 |
+
322
|
| 396 |
+
],
|
| 397 |
+
"page_idx": 2
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"type": "text",
|
| 401 |
+
"text": "Figure 1: Characteristic language samples from DB (Chinaei et al., 2017) and CCC (Davis et al., 2017).",
|
| 402 |
+
"bbox": [
|
| 403 |
+
507,
|
| 404 |
+
332,
|
| 405 |
+
880,
|
| 406 |
+
361
|
| 407 |
+
],
|
| 408 |
+
"page_idx": 2
|
| 409 |
+
},
|
| 410 |
+
{
|
| 411 |
+
"type": "text",
|
| 412 |
+
"text": "dementia subjects are matched in terms of age and gender, resulting in a balanced set of 156 samples (78 with dementia and 78 controls) split into training and test. The goal in developing ADReSS was to eliminate possible biases that may arise due to label and demographic imbalance in the original DB, at the expense of resulting in an ultimately smaller dataset. Its existence presents an interesting opportunity for comparison of balanced and unbalanced versions of the same source data. Since these datasets are drawn from the same source, we do not adapt DB to ADReSS or vice versa.",
|
| 413 |
+
"bbox": [
|
| 414 |
+
507,
|
| 415 |
+
388,
|
| 416 |
+
884,
|
| 417 |
+
583
|
| 418 |
+
],
|
| 419 |
+
"page_idx": 2
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"type": "text",
|
| 423 |
+
"text": "Carolinas Conversation Collection (CCC). CCC (Pope and Davis, 2011) is not derived from a neuropsychological task; instead, it focuses on English conversational speech. The dataset, collected by researchers studying language and healthcare across numerous institutions, contains 646 recorded interviews of 48 elderly cognitively normal individuals with non-dementia related conditions, and 284 individuals with dementia. Interview topics vary considerably. Members of the cohort without dementia have one interview with a young clinical professional and one with a demographically similar community peer, whereas members of the cohort with dementia have anywhere from 1-10 interviews with researchers and student visitors. The target focus of the conversational interviews is on eliciting autobiographical narrative pertaining to health and wellness. Although much less commonly used in the NLP community, it has recently been included a study that focus on the intersection",
|
| 424 |
+
"bbox": [
|
| 425 |
+
507,
|
| 426 |
+
596,
|
| 427 |
+
884,
|
| 428 |
+
917
|
| 429 |
+
],
|
| 430 |
+
"page_idx": 2
|
| 431 |
+
},
|
| 432 |
+
{
|
| 433 |
+
"type": "page_number",
|
| 434 |
+
"text": "11967",
|
| 435 |
+
"bbox": [
|
| 436 |
+
477,
|
| 437 |
+
927,
|
| 438 |
+
524,
|
| 439 |
+
940
|
| 440 |
+
],
|
| 441 |
+
"page_idx": 2
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"type": "text",
|
| 445 |
+
"text": "between interaction patterns and dementia status (Nasreen et al., 2021a), study regarding dementia-related linguistic anomalies in human language (Li et al., 2022), and so on. We used a transcribed subset of this corpus.",
|
| 446 |
+
"bbox": [
|
| 447 |
+
112,
|
| 448 |
+
84,
|
| 449 |
+
489,
|
| 450 |
+
165
|
| 451 |
+
],
|
| 452 |
+
"page_idx": 3
|
| 453 |
+
},
|
| 454 |
+
{
|
| 455 |
+
"type": "text",
|
| 456 |
+
"text": "Alzheimer's Disease Research Center (ADRC). ADRC is a new, privately held dataset containing audiorecordings and matched transcriptions for a population of 85 elderly participants. Audiorecordings were collected during a structured narrative storytelling task, in which participants were asked to describe a memorable event from their young adulthood. Diagnoses were provided by trained psychiatrists. Audiorecordings were transcribed in a semi-automated manner, with an initial pass completed using the Vosk² speech recognition toolkit and a follow-up pass during which trained undergraduates manually corrected errors in the transcripts. Although not yet publicly available, plans are in place to release this dataset following guidelines created in concert with our psychiatric collaborators in an approved protocol from the Institutional Review Board at the University of California San Diego. We encourage interested parties to contact us for additional details.",
|
| 457 |
+
"bbox": [
|
| 458 |
+
112,
|
| 459 |
+
178,
|
| 460 |
+
489,
|
| 461 |
+
500
|
| 462 |
+
],
|
| 463 |
+
"page_idx": 3
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"type": "text",
|
| 467 |
+
"text": "3.3 Domain Adaptation",
|
| 468 |
+
"text_level": 1,
|
| 469 |
+
"bbox": [
|
| 470 |
+
112,
|
| 471 |
+
514,
|
| 472 |
+
315,
|
| 473 |
+
531
|
| 474 |
+
],
|
| 475 |
+
"page_idx": 3
|
| 476 |
+
},
|
| 477 |
+
{
|
| 478 |
+
"type": "text",
|
| 479 |
+
"text": "To answer our research questions defined in §1, we experimented with feature-based and instance-based DA algorithms. We focused on these techniques for two reasons. First, most dementia detection models to date are feature-based, owing in part to clinical interest in the characteristic language use by people with dementia. Second, the size of available dementia detection datasets (see Table 1) precludes the use of the same types of deep learning models that are common in many other NLP tasks. The prevalence of smaller scale, feature-based models suggests that these DA techniques hold greater immediate task relevancy.",
|
| 480 |
+
"bbox": [
|
| 481 |
+
112,
|
| 482 |
+
539,
|
| 483 |
+
489,
|
| 484 |
+
749
|
| 485 |
+
],
|
| 486 |
+
"page_idx": 3
|
| 487 |
+
},
|
| 488 |
+
{
|
| 489 |
+
"type": "text",
|
| 490 |
+
"text": "AUGMENT. AUGMENT is a straightforward feature-based DA algorithm that has been shown to be effective on a wide range of datasets and tasks (Daumé III, 2007). It augments the feature space by making \"source-only,\" \"target-only,\" and \"shared\" copies of each feature, effectively tripling the feature set using the following formulation where $\\phi^{\\mathrm{s}},\\phi^{\\mathrm{t}}:X\\to \\check{X}$ represent mappings for",
|
| 491 |
+
"bbox": [
|
| 492 |
+
112,
|
| 493 |
+
762,
|
| 494 |
+
489,
|
| 495 |
+
891
|
| 496 |
+
],
|
| 497 |
+
"page_idx": 3
|
| 498 |
+
},
|
| 499 |
+
{
|
| 500 |
+
"type": "text",
|
| 501 |
+
"text": "the source and target data, respectively:",
|
| 502 |
+
"bbox": [
|
| 503 |
+
507,
|
| 504 |
+
84,
|
| 505 |
+
803,
|
| 506 |
+
99
|
| 507 |
+
],
|
| 508 |
+
"page_idx": 3
|
| 509 |
+
},
|
| 510 |
+
{
|
| 511 |
+
"type": "equation",
|
| 512 |
+
"text": "\n$$\n\\phi^ {\\mathbf {s}} \\left(\\mathbf {x} _ {\\mathbf {i}}\\right) = \\left\\langle \\mathbf {x} _ {\\mathbf {i}}, \\mathbf {0}, \\mathbf {x} _ {\\mathbf {i}} \\right\\rangle , \\quad \\phi^ {\\mathbf {t}} \\left(\\mathbf {x} _ {\\mathbf {i}}\\right) = \\left\\langle \\mathbf {0}, \\mathbf {x} _ {\\mathbf {i}}, \\mathbf {x} _ {\\mathbf {i}} \\right\\rangle \\tag {1}\n$$\n",
|
| 513 |
+
"text_format": "latex",
|
| 514 |
+
"bbox": [
|
| 515 |
+
521,
|
| 516 |
+
114,
|
| 517 |
+
882,
|
| 518 |
+
131
|
| 519 |
+
],
|
| 520 |
+
"page_idx": 3
|
| 521 |
+
},
|
| 522 |
+
{
|
| 523 |
+
"type": "text",
|
| 524 |
+
"text": "In the formulation above, $\\check{X} = \\mathbb{R}^{3F}$ is then the augmented version of the feature space $X = \\mathbb{R}^F$ . Empty vectors are filled with $\\mathbf{0} = \\langle 0,0,\\dots,0\\rangle \\in \\mathbb{R}^F$ . The motivation behind AUGMENT is intuitive. If a column contains a feature that correlates with the class label in both the target and source data, the learning algorithm will weight the shared column more heavily and reduce the weight on the target-only and source-only feature copies, reducing their importance to the model. However, if a feature correlates with the class label only with target (or source) data, the learning algorithm will increase the weight of the target-only (or source-only) column and reduce the weight of the others. The onus is thus left to the model to learn feature importance with respect to the domains.",
|
| 525 |
+
"bbox": [
|
| 526 |
+
507,
|
| 527 |
+
145,
|
| 528 |
+
884,
|
| 529 |
+
404
|
| 530 |
+
],
|
| 531 |
+
"page_idx": 3
|
| 532 |
+
},
|
| 533 |
+
{
|
| 534 |
+
"type": "text",
|
| 535 |
+
"text": "MULTIAUGMENT. We extend AUGMENT to accommodate multiple source domains following guidelines sketched out by Daumé III (2007), and refer to the technique as MULTIAUGMENT. As in the two-domain case, we expand the feature space, but this time to $\\mathbb{R}^{(K + 1)F}$ where $K$ is the total number of domains. The cardinality $(k + 1)F$ represents a distinct feature set $F$ for each domain $k_{i}\\in K$ , plus the same shared feature space introduced previously. For our specific case we test this method with two source domains, creating the following mappings to transform from $\\mathbb{R}^F$ to $\\mathbb{R}^{4F}$ :",
|
| 536 |
+
"bbox": [
|
| 537 |
+
507,
|
| 538 |
+
414,
|
| 539 |
+
882,
|
| 540 |
+
608
|
| 541 |
+
],
|
| 542 |
+
"page_idx": 3
|
| 543 |
+
},
|
| 544 |
+
{
|
| 545 |
+
"type": "equation",
|
| 546 |
+
"text": "\n$$\n\\begin{array}{l} \\phi^ {\\mathbf {s} _ {1}} \\left(\\mathbf {x} _ {\\mathrm {i}}\\right) = \\langle \\mathbf {x} _ {\\mathrm {i}}, \\mathbf {0}, \\mathbf {0}, \\mathbf {x} _ {\\mathrm {i}} \\rangle , \\\\ \\phi^ {\\mathbf {s} _ {2}} \\left(\\mathbf {x} _ {\\mathrm {i}}\\right) = \\langle \\mathbf {0}, \\mathbf {x} _ {\\mathrm {i}}, \\mathbf {0}, \\mathbf {x} _ {\\mathrm {i}} \\rangle \\tag {2} \\\\ \\phi^ {\\mathbf {t}} (\\mathbf {x} _ {\\mathbf {i}}) = \\langle \\mathbf {0}, \\mathbf {0}, \\mathbf {x} _ {\\mathbf {i}}, \\mathbf {x} _ {\\mathbf {i}} \\rangle \\\\ \\end{array}\n$$\n",
|
| 547 |
+
"text_format": "latex",
|
| 548 |
+
"bbox": [
|
| 549 |
+
606,
|
| 550 |
+
623,
|
| 551 |
+
880,
|
| 552 |
+
678
|
| 553 |
+
],
|
| 554 |
+
"page_idx": 3
|
| 555 |
+
},
|
| 556 |
+
{
|
| 557 |
+
"type": "text",
|
| 558 |
+
"text": "TRADABOOST. TRADABOOST is a supervised instance-based DA method (Dai et al., 2007) that extends the AdaBoost classification algorithm (Freund and Schapire, 1997) for transfer learning. The method is based on a \"reverse boosting\" principle, where the weights of poorly predictive source instances are decreased at each boosting iteration and the weights of target instances are simultaneously increased. The guiding intuition is that instances with large weights (including source instances that are more distributionally similar to the target domain instances) can then play a greater role in training the learning algorithm. We used the TRADABOOST implementation in Python's adapt",
|
| 559 |
+
"bbox": [
|
| 560 |
+
507,
|
| 561 |
+
694,
|
| 562 |
+
882,
|
| 563 |
+
917
|
| 564 |
+
],
|
| 565 |
+
"page_idx": 3
|
| 566 |
+
},
|
| 567 |
+
{
|
| 568 |
+
"type": "page_footnote",
|
| 569 |
+
"text": "2https://alphacephei.com/vosk/",
|
| 570 |
+
"bbox": [
|
| 571 |
+
134,
|
| 572 |
+
903,
|
| 573 |
+
364,
|
| 574 |
+
917
|
| 575 |
+
],
|
| 576 |
+
"page_idx": 3
|
| 577 |
+
},
|
| 578 |
+
{
|
| 579 |
+
"type": "page_number",
|
| 580 |
+
"text": "11968",
|
| 581 |
+
"bbox": [
|
| 582 |
+
477,
|
| 583 |
+
927,
|
| 584 |
+
524,
|
| 585 |
+
940
|
| 586 |
+
],
|
| 587 |
+
"page_idx": 3
|
| 588 |
+
},
|
| 589 |
+
{
|
| 590 |
+
"type": "table",
|
| 591 |
+
"img_path": "images/f3e1cd0eb68bd17dc7a14709df27252c8da7c42da5efbe9e1bdb6ee062bfac71.jpg",
|
| 592 |
+
"table_caption": [],
|
| 593 |
+
"table_footnote": [],
|
| 594 |
+
"table_body": "<table><tr><td>Group</td><td># Features</td><td>Category</td></tr><tr><td>POS</td><td>12</td><td>l</td></tr><tr><td>CFG</td><td>12</td><td>l</td></tr><tr><td>Syntac. Complexity</td><td>16</td><td>l</td></tr><tr><td>NER</td><td>10</td><td>l</td></tr><tr><td>Vocab. Richness</td><td>6</td><td>l</td></tr><tr><td>SUBTL</td><td>1</td><td>l</td></tr><tr><td>Semantic</td><td>5</td><td>s</td></tr><tr><td>Acoustic</td><td>25</td><td>a</td></tr></table>",
|
| 595 |
+
"bbox": [
|
| 596 |
+
115,
|
| 597 |
+
80,
|
| 598 |
+
485,
|
| 599 |
+
243
|
| 600 |
+
],
|
| 601 |
+
"page_idx": 4
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"type": "text",
|
| 605 |
+
"text": "Table 2: Descriptive feature statistics. Category refers to the high-level categorization applied to features when performing experiments: $l$ , $s$ , and $a$ are lexicosyntactic, semantic, and acoustic features, respectively.",
|
| 606 |
+
"bbox": [
|
| 607 |
+
112,
|
| 608 |
+
253,
|
| 609 |
+
489,
|
| 610 |
+
311
|
| 611 |
+
],
|
| 612 |
+
"page_idx": 4
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "text",
|
| 616 |
+
"text": "package<sup>3</sup> to implement this technique.",
|
| 617 |
+
"bbox": [
|
| 618 |
+
112,
|
| 619 |
+
336,
|
| 620 |
+
400,
|
| 621 |
+
353
|
| 622 |
+
],
|
| 623 |
+
"page_idx": 4
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "text",
|
| 627 |
+
"text": "3.4 Features",
|
| 628 |
+
"text_level": 1,
|
| 629 |
+
"bbox": [
|
| 630 |
+
112,
|
| 631 |
+
365,
|
| 632 |
+
230,
|
| 633 |
+
380
|
| 634 |
+
],
|
| 635 |
+
"page_idx": 4
|
| 636 |
+
},
|
| 637 |
+
{
|
| 638 |
+
"type": "text",
|
| 639 |
+
"text": "We experimented with lexicosyntactic, semantic, and acoustic features, summarized below. All features are calculated using the participant's utterances or speech segments. Descriptive statistics indicating the number of features belonging to each group, as well as the group's high-level categorization (used when labeling experimental conditions), are presented in Table 2.",
|
| 640 |
+
"bbox": [
|
| 641 |
+
112,
|
| 642 |
+
386,
|
| 643 |
+
489,
|
| 644 |
+
514
|
| 645 |
+
],
|
| 646 |
+
"page_idx": 4
|
| 647 |
+
},
|
| 648 |
+
{
|
| 649 |
+
"type": "text",
|
| 650 |
+
"text": "Part-Of-Speech (POS) Tags. POS tags have proven useful for detecting dementia (Masrani, 2018), as well as primary progressive aphasia and two of its subtypes (Balagopalan et al., 2020b). We use the $\\mathsf{spaCy}^4$ core English POS tagger to capture the frequency of coarse-grained POS labels in a transcript using the Universal Dependencies tagset (Petrov et al., 2012). Frequency counts are normalized by the number of words in the transcript.",
|
| 651 |
+
"bbox": [
|
| 652 |
+
112,
|
| 653 |
+
525,
|
| 654 |
+
489,
|
| 655 |
+
671
|
| 656 |
+
],
|
| 657 |
+
"page_idx": 4
|
| 658 |
+
},
|
| 659 |
+
{
|
| 660 |
+
"type": "text",
|
| 661 |
+
"text": "CFG Features. Context-Free Grammar (CFG) features count how often a phrase structure rule (e.g., $NP \\rightarrow VPPP$ or $NP \\rightarrow DTNP$ ) occurs in an utterance parse tree. These feature counts are then normalised by the total number of nodes in the parse tree. CFG features have previously demonstrated success for dementia detection (Masrani, 2018; Masrani et al., 2017). We extract parse trees using the Stanford parser (Qi et al., 2018), representing constituents using Penn Treebank constituent tags (Marcus et al., 1993).",
|
| 662 |
+
"bbox": [
|
| 663 |
+
112,
|
| 664 |
+
680,
|
| 665 |
+
489,
|
| 666 |
+
858
|
| 667 |
+
],
|
| 668 |
+
"page_idx": 4
|
| 669 |
+
},
|
| 670 |
+
{
|
| 671 |
+
"type": "text",
|
| 672 |
+
"text": "Syntactic Complexity. Measures of syntactic complexity have proven effective for predicting dementia from speech (Mastrani, 2018). We represent utterance complexity through a suite of features including parse tree depth, mean word length, mean sentence length, mean clause (noun or verb phrase) length, and number of clauses per sentence.",
|
| 673 |
+
"bbox": [
|
| 674 |
+
507,
|
| 675 |
+
84,
|
| 676 |
+
884,
|
| 677 |
+
197
|
| 678 |
+
],
|
| 679 |
+
"page_idx": 4
|
| 680 |
+
},
|
| 681 |
+
{
|
| 682 |
+
"type": "text",
|
| 683 |
+
"text": "Named Entity Recognition (NER) Tags. Although NER features have not been studied in prior work, we suspected that they may be a useful and relatively domain-agnostic way to encode broad structural patterns, following the previous success of other more general intent-based features (Farzana and Parde, 2022). We extracted named entity labels using a spaCy5 model trained on the OntoNotes 5 corpus. This model produces the fine-grained named entity types present in the OntoNotes tagset (Pradhan et al., 2007). We included a frequency feature for each NER type. NER frequency counts were normalized by the total number of entities mentioned in the transcript.",
|
| 684 |
+
"bbox": [
|
| 685 |
+
507,
|
| 686 |
+
205,
|
| 687 |
+
884,
|
| 688 |
+
431
|
| 689 |
+
],
|
| 690 |
+
"page_idx": 4
|
| 691 |
+
},
|
| 692 |
+
{
|
| 693 |
+
"type": "text",
|
| 694 |
+
"text": "Vocabulary Richness Features. Existing research has shown that measures of vocabulary richness can be successfully leveraged to diagnose dementia (Masrani et al., 2017; Balagopalan et al., 2020a). We include a set of well-known lexical richness measures including type-token ratio (TTR), moving-average TTR (MATTR), mean segmental TTR (MSTTR), Maas index (Mass, 1972), the measure of textual lexical diversity (McCarthy, 2005, MTLD), and the hypergeometric distribution index (McCarthy and Jarvis, 2007, HD-D). We calculated each measure over the entire transcript using Python's lexical richness package. $^6$",
|
| 695 |
+
"bbox": [
|
| 696 |
+
507,
|
| 697 |
+
439,
|
| 698 |
+
884,
|
| 699 |
+
649
|
| 700 |
+
],
|
| 701 |
+
"page_idx": 4
|
| 702 |
+
},
|
| 703 |
+
{
|
| 704 |
+
"type": "text",
|
| 705 |
+
"text": "SUBTL Scores. SUBTL scores represent the frequency with which words are used in daily life (Brysbaert and New, 2009). They are derived from large corpora<sup>7</sup> of television and film subtitles spanning 50 million words. We treated tokens with the Penn Treebank POS tags PRP, PRP$, WP, and EX as stopwords and computed transcript-level SUBTL scores by averaging across all available word-level scores for the participant's speech.",
|
| 706 |
+
"bbox": [
|
| 707 |
+
507,
|
| 708 |
+
657,
|
| 709 |
+
882,
|
| 710 |
+
802
|
| 711 |
+
],
|
| 712 |
+
"page_idx": 4
|
| 713 |
+
},
|
| 714 |
+
{
|
| 715 |
+
"type": "text",
|
| 716 |
+
"text": "Semantic Features. We measure semantic similarity between consecutive utterances by calculating the cosine similarity between the utterance",
|
| 717 |
+
"bbox": [
|
| 718 |
+
507,
|
| 719 |
+
810,
|
| 720 |
+
882,
|
| 721 |
+
859
|
| 722 |
+
],
|
| 723 |
+
"page_idx": 4
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"type": "page_footnote",
|
| 727 |
+
"text": "3https://adapt-python.github.io/adapt/ generated/adapt.instance_based.TrAdaBoost.htm1 4https://spacy.io/usage/linguistic-features# pos-tagging",
|
| 728 |
+
"bbox": [
|
| 729 |
+
112,
|
| 730 |
+
866,
|
| 731 |
+
468,
|
| 732 |
+
917
|
| 733 |
+
],
|
| 734 |
+
"page_idx": 4
|
| 735 |
+
},
|
| 736 |
+
{
|
| 737 |
+
"type": "page_footnote",
|
| 738 |
+
"text": "$^{5}$ https://spacy.io/api/annotation# \nnamed-entities \n $^{6}$ https://pypi.org/project/lexicalrichness/ \n $^{7}$ http://www.lexique.org/",
|
| 739 |
+
"bbox": [
|
| 740 |
+
507,
|
| 741 |
+
866,
|
| 742 |
+
850,
|
| 743 |
+
917
|
| 744 |
+
],
|
| 745 |
+
"page_idx": 4
|
| 746 |
+
},
|
| 747 |
+
{
|
| 748 |
+
"type": "page_number",
|
| 749 |
+
"text": "11969",
|
| 750 |
+
"bbox": [
|
| 751 |
+
477,
|
| 752 |
+
927,
|
| 753 |
+
524,
|
| 754 |
+
940
|
| 755 |
+
],
|
| 756 |
+
"page_idx": 4
|
| 757 |
+
},
|
| 758 |
+
{
|
| 759 |
+
"type": "text",
|
| 760 |
+
"text": "vectors and then recording the proportion of distances below three thresholds (0, 0.3, 0.5). We used averaged TF-IDF vectors to represent each utterance. We also recorded the minimum and average cosine distance between utterances.",
|
| 761 |
+
"bbox": [
|
| 762 |
+
112,
|
| 763 |
+
84,
|
| 764 |
+
489,
|
| 765 |
+
164
|
| 766 |
+
],
|
| 767 |
+
"page_idx": 5
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"type": "text",
|
| 771 |
+
"text": "Acoustic Features. Finally, prior work has found acoustic distinctions between subjects with and without dementia (Mastrani et al., 2017). We chunked the participant's speech segments from each audiorecording using Pydub prior to extracting acoustic features. We include prosody features (Dehak et al., 2007; Vásquez-Correa et al., 2018) from continuous speech based on duration (i.e., number of voiced segments per second and standard deviation of duration of unvoiced segments), extracted using the DiSVoice tool.",
|
| 772 |
+
"bbox": [
|
| 773 |
+
112,
|
| 774 |
+
175,
|
| 775 |
+
489,
|
| 776 |
+
351
|
| 777 |
+
],
|
| 778 |
+
"page_idx": 5
|
| 779 |
+
},
|
| 780 |
+
{
|
| 781 |
+
"type": "text",
|
| 782 |
+
"text": "4 Evaluation",
|
| 783 |
+
"text_level": 1,
|
| 784 |
+
"bbox": [
|
| 785 |
+
112,
|
| 786 |
+
366,
|
| 787 |
+
243,
|
| 788 |
+
381
|
| 789 |
+
],
|
| 790 |
+
"page_idx": 5
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "text",
|
| 794 |
+
"text": "4.1 Classification Settings",
|
| 795 |
+
"text_level": 1,
|
| 796 |
+
"bbox": [
|
| 797 |
+
112,
|
| 798 |
+
393,
|
| 799 |
+
332,
|
| 800 |
+
409
|
| 801 |
+
],
|
| 802 |
+
"page_idx": 5
|
| 803 |
+
},
|
| 804 |
+
{
|
| 805 |
+
"type": "text",
|
| 806 |
+
"text": "For our backbone classifier, we experimented $^{10}$ with support vector machine (SVM) and logistic regression (LR), implemented using sklearn. $^{11}$ For SVM, we used a polynomial kernel and held all other hyperparameters at their default settings except for the trade-off parameter $C$ . For LR, we also held all hyperparameters at their default settings. We selected LR and SVM due to their documented success at dementia detection using one or more of our datasets (Farzana and Parde, 2020; Masrani et al., 2017). We tuned our models using $K$ -fold stratified cross-validation on the training set, using the following values for the trade-off parameter $C$ : $\\{0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 0.5, 1\\}$ .",
|
| 807 |
+
"bbox": [
|
| 808 |
+
112,
|
| 809 |
+
414,
|
| 810 |
+
489,
|
| 811 |
+
640
|
| 812 |
+
],
|
| 813 |
+
"page_idx": 5
|
| 814 |
+
},
|
| 815 |
+
{
|
| 816 |
+
"type": "text",
|
| 817 |
+
"text": "We report the result for the parameter achieving the best performance, averaged across all five folds. We used stratified cross-validation to produce the results reported in all results tables. We maintained the same ratio between the target classes in all folds and in the full dataset, and shuffled samples for cross-validation such that all samples from the same participant remained in the same fold. This was done to prevent overfitting due to data leakage stemming from the same participant being present in multiple folds.",
|
| 818 |
+
"bbox": [
|
| 819 |
+
112,
|
| 820 |
+
642,
|
| 821 |
+
489,
|
| 822 |
+
818
|
| 823 |
+
],
|
| 824 |
+
"page_idx": 5
|
| 825 |
+
},
|
| 826 |
+
{
|
| 827 |
+
"type": "text",
|
| 828 |
+
"text": "4.2 Experimental Conditions",
|
| 829 |
+
"text_level": 1,
|
| 830 |
+
"bbox": [
|
| 831 |
+
507,
|
| 832 |
+
84,
|
| 833 |
+
754,
|
| 834 |
+
99
|
| 835 |
+
],
|
| 836 |
+
"page_idx": 5
|
| 837 |
+
},
|
| 838 |
+
{
|
| 839 |
+
"type": "text",
|
| 840 |
+
"text": "We compared each DA technique against three baseline models: a model jointly trained using samples from both the source and target data without applying any DA algorithms (JOINT), a model trained only on the target data (TARGET), and a model trained only on the source data (SOURCE). The training dataset(s) for our DA conditions varied depending on the technique being tested. AUGMENT and TRADABOOST were trained on data from a single source domain and the target domain, whereas MULTIAUGMENT was trained on data from two source domains and the target domain. All models, including the DA algorithms tested and our baseline models, were evaluated using the target domain test set.",
|
| 841 |
+
"bbox": [
|
| 842 |
+
505,
|
| 843 |
+
107,
|
| 844 |
+
884,
|
| 845 |
+
348
|
| 846 |
+
],
|
| 847 |
+
"page_idx": 5
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"type": "text",
|
| 851 |
+
"text": "We considered the following source $\\rightarrow$ target adaptations: CCC $\\rightarrow$ DB, DB $\\rightarrow$ CCC, CCC $\\rightarrow$ ADReSS, {ADRC, CCC} $\\rightarrow$ DB, {ADRC, DB} $\\rightarrow$ CCC, and {ADRC, CCC} $\\rightarrow$ ADReSS. For each DA technique, we also considered several combinations of feature subsets (refer to Table 2 for categorizations): $l$ , $l + s$ , and $l + s + a$ . MULTIAUGMENT only used $l$ and $l + s$ since ADRC does not provide speaker segmentation timestamps; thus, speech could not be extracted in the same way as other datasets, preventing use of acoustic features.",
|
| 852 |
+
"bbox": [
|
| 853 |
+
507,
|
| 854 |
+
350,
|
| 855 |
+
884,
|
| 856 |
+
526
|
| 857 |
+
],
|
| 858 |
+
"page_idx": 5
|
| 859 |
+
},
|
| 860 |
+
{
|
| 861 |
+
"type": "text",
|
| 862 |
+
"text": "4.3 Results",
|
| 863 |
+
"text_level": 1,
|
| 864 |
+
"bbox": [
|
| 865 |
+
507,
|
| 866 |
+
541,
|
| 867 |
+
613,
|
| 868 |
+
555
|
| 869 |
+
],
|
| 870 |
+
"page_idx": 5
|
| 871 |
+
},
|
| 872 |
+
{
|
| 873 |
+
"type": "text",
|
| 874 |
+
"text": "We compared the conditions specified in $\\S 4.2$ using accuracy and $\\mathrm{F_1}$ , and report our experimental results in Tables 3, 4, and 5. Results are subdivided according to target domain, presenting results from conditions using DB, CCC, and ADRSS as the target domains, respectively.",
|
| 875 |
+
"bbox": [
|
| 876 |
+
505,
|
| 877 |
+
564,
|
| 878 |
+
882,
|
| 879 |
+
659
|
| 880 |
+
],
|
| 881 |
+
"page_idx": 5
|
| 882 |
+
},
|
| 883 |
+
{
|
| 884 |
+
"type": "text",
|
| 885 |
+
"text": "We find that MULTIAUGMENT clearly outperforms the baseline techniques in most cases and usually outperforms the single-source DA algorithms when DB and ADReSS are the target domains, although the best-performing feature subsets vary. This trend is less clear when CCC is the target domain, with AUGMENT approaching or exceeding the performance of MULTIAUGMENT. When task-oriented data (DB or ADReSS) was used as the target, we observed that the percentage of source data in the training set was lower than that in the target data. As a result, we suspect that adding more conversational data (such as that found in ADRC) to the source (CCC) may promote improved performance when adapting to task-oriented target domains.",
|
| 886 |
+
"bbox": [
|
| 887 |
+
505,
|
| 888 |
+
661,
|
| 889 |
+
884,
|
| 890 |
+
917
|
| 891 |
+
],
|
| 892 |
+
"page_idx": 5
|
| 893 |
+
},
|
| 894 |
+
{
|
| 895 |
+
"type": "page_footnote",
|
| 896 |
+
"text": "<sup>8</sup>https://pypi.org/project/pydub/",
|
| 897 |
+
"bbox": [
|
| 898 |
+
136,
|
| 899 |
+
828,
|
| 900 |
+
381,
|
| 901 |
+
841
|
| 902 |
+
],
|
| 903 |
+
"page_idx": 5
|
| 904 |
+
},
|
| 905 |
+
{
|
| 906 |
+
"type": "page_footnote",
|
| 907 |
+
"text": "<sup>9</sup>https://github.com/jcvasquezc/DisVoice",
|
| 908 |
+
"bbox": [
|
| 909 |
+
136,
|
| 910 |
+
841,
|
| 911 |
+
431,
|
| 912 |
+
854
|
| 913 |
+
],
|
| 914 |
+
"page_idx": 5
|
| 915 |
+
},
|
| 916 |
+
{
|
| 917 |
+
"type": "page_footnote",
|
| 918 |
+
"text": "$^{10}$ Code for our experiments: https://github.com/treena908/Domain_Adaptive_Dementia_Detection",
|
| 919 |
+
"bbox": [
|
| 920 |
+
117,
|
| 921 |
+
854,
|
| 922 |
+
487,
|
| 923 |
+
879
|
| 924 |
+
],
|
| 925 |
+
"page_idx": 5
|
| 926 |
+
},
|
| 927 |
+
{
|
| 928 |
+
"type": "page_footnote",
|
| 929 |
+
"text": "<sup>11</sup>https://scikit-learn.org/stable/",
|
| 930 |
+
"bbox": [
|
| 931 |
+
134,
|
| 932 |
+
879,
|
| 933 |
+
388,
|
| 934 |
+
892
|
| 935 |
+
],
|
| 936 |
+
"page_idx": 5
|
| 937 |
+
},
|
| 938 |
+
{
|
| 939 |
+
"type": "page_footnote",
|
| 940 |
+
"text": "<sup>12</sup>We found that $C = 1$ resulted in the best performance across all folds.",
|
| 941 |
+
"bbox": [
|
| 942 |
+
115,
|
| 943 |
+
892,
|
| 944 |
+
485,
|
| 945 |
+
917
|
| 946 |
+
],
|
| 947 |
+
"page_idx": 5
|
| 948 |
+
},
|
| 949 |
+
{
|
| 950 |
+
"type": "page_number",
|
| 951 |
+
"text": "11970",
|
| 952 |
+
"bbox": [
|
| 953 |
+
477,
|
| 954 |
+
927,
|
| 955 |
+
524,
|
| 956 |
+
940
|
| 957 |
+
],
|
| 958 |
+
"page_idx": 5
|
| 959 |
+
},
|
| 960 |
+
{
|
| 961 |
+
"type": "table",
|
| 962 |
+
"img_path": "images/63797b9391c70362a4ca456cd1ac696358b6f399120fbd23329b99fb61c4440b.jpg",
|
| 963 |
+
"table_caption": [],
|
| 964 |
+
"table_footnote": [],
|
| 965 |
+
"table_body": "<table><tr><td rowspan=\"2\">Model</td><td colspan=\"2\">LR</td><td colspan=\"2\">SVM</td></tr><tr><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td></tr><tr><td>\\(SOURCE_{L+S}\\)</td><td>0.45(0.01)</td><td>0.02(0.04)</td><td>0.45(0.01)</td><td>0.01(0.01)</td></tr><tr><td>\\(TARGET_{L+S}\\)</td><td>0.74(0.05)</td><td>0.77(0.05)</td><td>0.67(0.06)</td><td>0.61(0.11)</td></tr><tr><td>\\(JOINT_{L+S}\\)</td><td>0.72(0.06)</td><td>0.74(0.06)</td><td>0.66(0.04)</td><td>0.64(0.06)</td></tr><tr><td>\\(AUGMENT_L\\)</td><td>0.72(0.04)</td><td>0.75▼(0.04)</td><td>0.70(0.04)</td><td>0.76▲(0.04)</td></tr><tr><td>\\(AUGMENT_{L+S}\\)</td><td>0.73(0.06)</td><td>0.75(0.05)</td><td>0.70(0.05)</td><td>0.76▲(0.04)</td></tr><tr><td>\\(AUGMENT_{L+S+A}\\)</td><td>0.72(0.05)</td><td>0.74(0.05)</td><td>0.69(0.05)</td><td>0.74▲(0.05)</td></tr><tr><td>\\(TRADABOOST_L\\)</td><td>0.66(0.05)</td><td>0.68(0.05)</td><td>0.66(0.06)</td><td>0.68(0.08)</td></tr><tr><td>\\(TRADABOOST_{L+S}\\)</td><td>0.60▼(0.05)</td><td>0.64▼(0.04)</td><td>0.63(0.06)</td><td>0.64(0.08)</td></tr><tr><td>\\(TRADABOOST_{L+S+A}\\)</td><td>0.55▼(0.05)</td><td>0.55▼(0.07)</td><td>0.65(0.05)</td><td>0.68(0.06)</td></tr><tr><td>\\(MULTIAUGMENT_L\\)</td><td>0.72▼(0.06)</td><td>0.75▼(0.06)</td><td>0.70(0.05)</td><td>0.77▲(0.05)</td></tr><tr><td>\\(MULTIAUGMENT_{L+S}\\)</td><td>0.75(0.05)</td><td>0.76▼(0.05)</td><td>0.70(0.05)</td><td>0.77▲(0.05)</td></tr></table>",
|
| 966 |
+
"bbox": [
|
| 967 |
+
115,
|
| 968 |
+
82,
|
| 969 |
+
485,
|
| 970 |
+
431
|
| 971 |
+
],
|
| 972 |
+
"page_idx": 6
|
| 973 |
+
},
|
| 974 |
+
{
|
| 975 |
+
"type": "text",
|
| 976 |
+
"text": "Both AUGMENT and MULTIAUGMENT outperform TRADABOOST, regardless of feature combination, across the board. We achieve maximum performance of $\\mathrm{F_1 = 0.77}$ on DB (using MULTIAUGMENTL+S with SVM), $\\mathrm{F_1 = 0.75}$ on CCC (using MULTIAUGMENTL with SVM), and $\\mathrm{F_1 = 0.77}$ on ADRSS (using AUGMENTL+S, MULTIAUGMENTL, and MULTIAUGMENTL+S with SVM). In Table 6, we report additional results from our highest-performing versions of each DA technique on the ADRSS test set (Luz et al., 2020). This facilitates straightforward comparison with external models by others who use this standardized test set. We find that $\\mathrm{AUGMENT}_{\\mathrm{L + S}}$ achieves similar results to those in Table 5.",
|
| 977 |
+
"bbox": [
|
| 978 |
+
112,
|
| 979 |
+
678,
|
| 980 |
+
489,
|
| 981 |
+
917
|
| 982 |
+
],
|
| 983 |
+
"page_idx": 6
|
| 984 |
+
},
|
| 985 |
+
{
|
| 986 |
+
"type": "table",
|
| 987 |
+
"img_path": "images/5b3e61266ffd9d93d13628d37ea1cf3889433720f21c3d53c737106f4e282d3c.jpg",
|
| 988 |
+
"table_caption": [
|
| 989 |
+
"Table 3: Comparison of DA conditions when CCC is the source dataset and DB is the target dataset (standard deviation is reported inside parentheses). For MULTIAUGMENT conditions, ADRC and CCC are jointly used as the source dataset. Five-fold cross-validation is used in all cases with each fold having $40\\%$ source data $(46.3\\% \\text{class} d)$ and $60\\%$ target data $(55.5\\% \\text{class} d)$ . MULTIAUGMENT has $46.3\\%$ source data $(26.70\\% \\text{class} d)$ and $(53.70\\%)$ target data $(55.2\\% \\text{class} d)$ . ▲: Significantly better than the corresponding LR or SVM TARGET baseline, with $p < 0.05$ , using a paired $t$ -test. ▼: Significantly worse than the corresponding TARGET baseline, using the same parameters."
|
| 990 |
+
],
|
| 991 |
+
"table_footnote": [],
|
| 992 |
+
"table_body": "<table><tr><td rowspan=\"2\">Model</td><td colspan=\"2\">LR</td><td colspan=\"2\">SVM</td></tr><tr><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td></tr><tr><td>\\(SOURCE_{L+S}\\)</td><td>0.39(0.07)</td><td>0.51(0.05)</td><td>0.36(0.02)</td><td>0.51(0.02)</td></tr><tr><td>\\(TARGET_{L+S}\\)</td><td>0.83(0.07)</td><td>0.72(0.15)</td><td>0.80(0.05)</td><td>0.63(0.12)</td></tr><tr><td>\\(JOINT_{L+S}\\)</td><td>0.80(0.10)</td><td>0.66(0.18)</td><td>0.84(0.08)</td><td>0.73(0.16)</td></tr><tr><td>\\(AUGMENT_{L}\\)</td><td>0.85(0.07)</td><td>0.75(0.15)</td><td>0.85(0.06)</td><td>0.74(0.15)</td></tr><tr><td>\\(AUGMENT_{L+S}\\)</td><td>0.84(0.07)</td><td>0.73(0.15)</td><td>0.84(0.07)</td><td>0.73(0.15)</td></tr><tr><td>\\(AUGMENT_{L+S+A}\\)</td><td>0.80(0.06)</td><td>0.68(0.15)</td><td>0.80(0.05)</td><td>0.68(0.11)</td></tr><tr><td>\\(TRADABOOST_{L}\\)</td><td>0.80(0.05)</td><td>0.69(0.13)</td><td>0.84(0.07)</td><td>0.74(0.17)</td></tr><tr><td>\\(TRADABOOST_{L+S}\\)</td><td>0.79(0.05)</td><td>0.66(0.14)</td><td>0.84(0.06)</td><td>0.73(0.15)</td></tr><tr><td>\\(TRADABOOST_{L+S+A}\\)</td><td>0.78(0.06)</td><td>0.65(0.14)</td><td>0.83(0.08)</td><td>0.71(0.17)</td></tr><tr><td>\\(MULTIAUGMENT_{L}\\)</td><td>0.85(0.07)</td><td>0.74(0.15)</td><td>0.86▲(0.07)</td><td>0.75▲(0.16)</td></tr><tr><td>\\(MULTIAUGMENT_{L+S}\\)</td><td>0.84(0.07)</td><td>0.73(0.15)</td><td>0.85▲(0.07)</td><td>0.74▲(0.15)</td></tr></table>",
|
| 993 |
+
"bbox": [
|
| 994 |
+
510,
|
| 995 |
+
82,
|
| 996 |
+
880,
|
| 997 |
+
441
|
| 998 |
+
],
|
| 999 |
+
"page_idx": 6
|
| 1000 |
+
},
|
| 1001 |
+
{
|
| 1002 |
+
"type": "text",
|
| 1003 |
+
"text": "Table 4: Comparison of DA conditions when DB is the source dataset and CCC is the target dataset (standard deviation is reported inside parentheses). For MULTI-AUGMENT conditions, ADRC and DB are jointly used as the source dataset. Five-fold cross-validation is used in all cases with each fold having $70.3\\%$ source data $(55.5\\% \\text{class} d)$ and $29.7\\%$ target data $(33.6\\% \\text{class} d)$ . MULTI-AUGMENT has $73.1\\%$ source data $(48.2\\% \\text{class} d)$ and $56.9\\%$ target data $(33.6\\% \\text{class} d)$ . ▲: Significantly better than the corresponding LR or SVM TARGET baseline, with $p < 0.05$ , using a paired $t$ -test. ▼: Significantly worse than the corresponding TARGET baseline, using the same parameters.",
|
| 1004 |
+
"bbox": [
|
| 1005 |
+
507,
|
| 1006 |
+
451,
|
| 1007 |
+
884,
|
| 1008 |
+
636
|
| 1009 |
+
],
|
| 1010 |
+
"page_idx": 6
|
| 1011 |
+
},
|
| 1012 |
+
{
|
| 1013 |
+
"type": "text",
|
| 1014 |
+
"text": "5 Analysis",
|
| 1015 |
+
"text_level": 1,
|
| 1016 |
+
"bbox": [
|
| 1017 |
+
507,
|
| 1018 |
+
663,
|
| 1019 |
+
616,
|
| 1020 |
+
678
|
| 1021 |
+
],
|
| 1022 |
+
"page_idx": 6
|
| 1023 |
+
},
|
| 1024 |
+
{
|
| 1025 |
+
"type": "text",
|
| 1026 |
+
"text": "The results in Tables 3-6 clearly answer our first research question (Q1), demonstrating that DA can be used to exploit spoken language data pertaining to dementia detection in one domain to improve its detection in other domains. They also answer Q2, showing that DA offers performance improvements over jointly training on data from multiple domains. To answer Q3, we performed additional analyses to probe the contributions of feature subsets and class bias to overall performance.",
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
505,
|
| 1029 |
+
690,
|
| 1030 |
+
882,
|
| 1031 |
+
850
|
| 1032 |
+
],
|
| 1033 |
+
"page_idx": 6
|
| 1034 |
+
},
|
| 1035 |
+
{
|
| 1036 |
+
"type": "text",
|
| 1037 |
+
"text": "5.1 Feature Analysis",
|
| 1038 |
+
"text_level": 1,
|
| 1039 |
+
"bbox": [
|
| 1040 |
+
507,
|
| 1041 |
+
865,
|
| 1042 |
+
687,
|
| 1043 |
+
879
|
| 1044 |
+
],
|
| 1045 |
+
"page_idx": 6
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"type": "text",
|
| 1049 |
+
"text": "To find correspondences between source and target domain features, we analyzed the features in the",
|
| 1050 |
+
"bbox": [
|
| 1051 |
+
507,
|
| 1052 |
+
887,
|
| 1053 |
+
880,
|
| 1054 |
+
917
|
| 1055 |
+
],
|
| 1056 |
+
"page_idx": 6
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "page_number",
|
| 1060 |
+
"text": "11971",
|
| 1061 |
+
"bbox": [
|
| 1062 |
+
477,
|
| 1063 |
+
927,
|
| 1064 |
+
522,
|
| 1065 |
+
940
|
| 1066 |
+
],
|
| 1067 |
+
"page_idx": 6
|
| 1068 |
+
},
|
| 1069 |
+
{
|
| 1070 |
+
"type": "table",
|
| 1071 |
+
"img_path": "images/48d69127d5bf2c450c23e884f164aefb44310e7af4d83243f71f556b8de4ea9e.jpg",
|
| 1072 |
+
"table_caption": [],
|
| 1073 |
+
"table_footnote": [],
|
| 1074 |
+
"table_body": "<table><tr><td rowspan=\"2\">Model</td><td colspan=\"2\">LR</td><td colspan=\"2\">SVM</td></tr><tr><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td></tr><tr><td>\\(SOURCE_{L+S}\\)</td><td>0.52(0.04)</td><td>0.06(0.11)</td><td>0.510.04</td><td>0.03(0.09)</td></tr><tr><td>\\(TARGET_{L+S}\\)</td><td>0.80(0.13)</td><td>0.75(0.22)</td><td>0.68(0.12)</td><td>0.54(0.25)</td></tr><tr><td>\\(JOINT_{L+S}\\)</td><td>0.69(0.16)</td><td>0.64(0.24)</td><td>0.59(0.14)</td><td>0.47(0.25)</td></tr><tr><td>\\(AUGMENT_L\\)</td><td>0.77(0.12)</td><td>0.72(0.20)</td><td>0.78▲(0.10)</td><td>0.74▲(0.17)</td></tr><tr><td>\\(AUGMENT_{L+S}\\)</td><td>0.74▼(0.10)</td><td>0.68▼(0.21)</td><td>0.81▲(0.07)</td><td>0.77▲(0.15)</td></tr><tr><td>\\(AUGMENT_{L+S+A}\\)</td><td>0.75(0.06)</td><td>0.69(0.14)</td><td>0.80▲(0.15)</td><td>0.76▲(0.22)</td></tr><tr><td>\\(TRADABOOST_L\\)</td><td>0.72▼(0.14)</td><td>0.67▼(0.21)</td><td>0.77(0.13)</td><td>0.71▲(0.21)</td></tr><tr><td>\\(TRADABOOST_{L+S}\\)</td><td>0.76(0.13)</td><td>0.70(0.21)</td><td>0.76(0.13)</td><td>0.70▲(0.21)</td></tr><tr><td>\\(TRADABOOST_{L+S+A}\\)</td><td>0.76(0.12)</td><td>0.73(0.17)</td><td>0.70(0.10)</td><td>0.62(0.19)</td></tr><tr><td>\\(MULTIAUGMENT_L\\)</td><td>0.80(0.13)</td><td>0.75(0.22)</td><td>0.80▲(0.13)</td><td>0.77▲(0.20)</td></tr><tr><td>\\(MULTIAUGMENT_{L+S}\\)</td><td>0.75(0.14)</td><td>0.67(0.29)</td><td>0.81▲(0.14)</td><td>0.77▲(0.21)</td></tr></table>",
|
| 1075 |
+
"bbox": [
|
| 1076 |
+
115,
|
| 1077 |
+
80,
|
| 1078 |
+
485,
|
| 1079 |
+
426
|
| 1080 |
+
],
|
| 1081 |
+
"page_idx": 7
|
| 1082 |
+
},
|
| 1083 |
+
{
|
| 1084 |
+
"type": "text",
|
| 1085 |
+
"text": "shared column from AUGMENT $_{\\mathrm{L} + \\mathrm{S} + \\mathrm{A}}$ using LR and a DB $\\rightarrow$ CCC domain adaptation mapping. We referred to these as pivot features. We computed the most important pivot features across source and target domain using l1-penalty with logistic regression.",
|
| 1086 |
+
"bbox": [
|
| 1087 |
+
112,
|
| 1088 |
+
657,
|
| 1089 |
+
487,
|
| 1090 |
+
753
|
| 1091 |
+
],
|
| 1092 |
+
"page_idx": 7
|
| 1093 |
+
},
|
| 1094 |
+
{
|
| 1095 |
+
"type": "text",
|
| 1096 |
+
"text": "We find that a subset of specific lexicosyntactic and acoustic pivot features, including the number of tokens, average phrase length, and standard deviation of the duration of unvoiced segments are highly positively correlated with the class labels in both the source and target domains. In contrast, the number of unique named entities, certain vocabulary richness and lexical frequency measures (MATTR and SUBTL score), and the number of voiced segments per second are highly negatively",
|
| 1097 |
+
"bbox": [
|
| 1098 |
+
112,
|
| 1099 |
+
758,
|
| 1100 |
+
489,
|
| 1101 |
+
919
|
| 1102 |
+
],
|
| 1103 |
+
"page_idx": 7
|
| 1104 |
+
},
|
| 1105 |
+
{
|
| 1106 |
+
"type": "table",
|
| 1107 |
+
"img_path": "images/141357120a61add1b4f638155f70167c50c49af2c6e4b24dd47b157217547c32.jpg",
|
| 1108 |
+
"table_caption": [
|
| 1109 |
+
"Table 5: Comparison of DA conditions when CCC is the source dataset and ADReSS train is the target dataset (standard deviation is reported inside parentheses). For MULTIAUGMENT conditions, ADRC and CCC are jointly used as the source dataset. Ten-fold cross-validation is used in all cases with each fold having $74.8\\%$ source data $(33.6\\%$ class $d)$ and $25.2\\%$ target data $(50\\%$ class $d)$ . MULTIAUGMENT has $79.4\\%$ source data $(26.70\\%$ class $d)$ and $20.6\\%$ target data $(50\\%$ class $d)$ . $\\triangle$ : Significantly better than the corresponding LR or SVM TARGET baseline, with $p < 0.05$ , using a paired $t$ -test. $\\nabla$ : Significantly worse than corresponding TARGET baseline, using the same parameters."
|
| 1110 |
+
],
|
| 1111 |
+
"table_footnote": [],
|
| 1112 |
+
"table_body": "<table><tr><td rowspan=\"2\">Model</td><td rowspan=\"2\">C</td><td colspan=\"2\">LR</td><td colspan=\"2\">SVM</td></tr><tr><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td></tr><tr><td rowspan=\"2\">SOURCEL+S</td><td>d</td><td rowspan=\"2\">0.51</td><td>0.00</td><td rowspan=\"2\">0.51</td><td>0.00</td></tr><tr><td>c</td><td>0.68</td><td>0.68</td></tr><tr><td rowspan=\"2\">TARGETL+S</td><td>d</td><td rowspan=\"2\">0.72</td><td>0.65</td><td rowspan=\"2\">0.70</td><td>0.53</td></tr><tr><td>c</td><td>0.76</td><td>0.77</td></tr><tr><td rowspan=\"2\">JOINTL+S</td><td>d</td><td rowspan=\"2\">0.68</td><td>0.62</td><td rowspan=\"2\">0.72</td><td>0.70</td></tr><tr><td>c</td><td>0.73</td><td>0.75</td></tr><tr><td rowspan=\"2\">AUGMENTL+S</td><td>d</td><td rowspan=\"2\">0.77</td><td>0.70</td><td rowspan=\"2\">0.77</td><td>0.72</td></tr><tr><td>c</td><td>0.81</td><td>0.80</td></tr><tr><td rowspan=\"2\">MULTIAUGMENTL+S</td><td>d</td><td rowspan=\"2\">0.74</td><td>0.68</td><td rowspan=\"2\">0.74</td><td>0.68</td></tr><tr><td>c</td><td>0.79</td><td>0.79</td></tr><tr><td rowspan=\"2\">TRADABOOSTL+S</td><td>d</td><td rowspan=\"2\">0.74</td><td>0.70</td><td rowspan=\"2\">0.72</td><td>0.67</td></tr><tr><td>c</td><td>0.78</td><td>0.76</td></tr></table>",
|
| 1113 |
+
"bbox": [
|
| 1114 |
+
510,
|
| 1115 |
+
80,
|
| 1116 |
+
880,
|
| 1117 |
+
319
|
| 1118 |
+
],
|
| 1119 |
+
"page_idx": 7
|
| 1120 |
+
},
|
| 1121 |
+
{
|
| 1122 |
+
"type": "text",
|
| 1123 |
+
"text": "Table 6: Evaluation on the standardized ADReSS test set with per-class (C) $\\mathrm{F}_{1}$ . CCC and ADReSS (train) are used as source and target data, respectively, when training with $46.1\\%$ source data (no class $d$ ) and $54.9\\%$ target data (55.5% class $d$ ). For MULTIAUGMENT, both CCC and ADRC are used as source (77.6% training data with $26.7\\%$ class $d$ ), with $32.4\\%$ target data (50% class $d$ ). We assessed statistical significance using McNemar's test, and found that no improvements were significantly different from the TARGET baseline.",
|
| 1124 |
+
"bbox": [
|
| 1125 |
+
507,
|
| 1126 |
+
328,
|
| 1127 |
+
884,
|
| 1128 |
+
470
|
| 1129 |
+
],
|
| 1130 |
+
"page_idx": 7
|
| 1131 |
+
},
|
| 1132 |
+
{
|
| 1133 |
+
"type": "text",
|
| 1134 |
+
"text": "correlated with the class labels of both the source and target domains. Thus, these features offer particularly strong contributions to model performance across multiple domains.",
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
507,
|
| 1137 |
+
498,
|
| 1138 |
+
882,
|
| 1139 |
+
562
|
| 1140 |
+
],
|
| 1141 |
+
"page_idx": 7
|
| 1142 |
+
},
|
| 1143 |
+
{
|
| 1144 |
+
"type": "text",
|
| 1145 |
+
"text": "5.2 Domain-Specific Class Bias",
|
| 1146 |
+
"text_level": 1,
|
| 1147 |
+
"bbox": [
|
| 1148 |
+
507,
|
| 1149 |
+
574,
|
| 1150 |
+
769,
|
| 1151 |
+
590
|
| 1152 |
+
],
|
| 1153 |
+
"page_idx": 7
|
| 1154 |
+
},
|
| 1155 |
+
{
|
| 1156 |
+
"type": "text",
|
| 1157 |
+
"text": "As shown in Table 1, our domains vary in their class balance. Class imbalances are especially common in low-resource healthcare tasks since it is often challenging to recruit subjects with the target condition. When the source and target domains have varying class distribution, they are biased towards different class labels. This can create conditions such that the learning algorithm is able to capitalize upon class bias rather than real properties of the data to increase perceived performance. For instance, when adapting from $\\mathrm{CCC} \\rightarrow \\mathrm{DB}$ with the source dataset (CCC) having $33.6\\%$ instances belonging to class $d$ and the target dataset (DB) having $55.5\\%$ instances belonging to class $d$ , it is possible that the model trivially learns to predict class $d$ with greater frequency, without learning real feature distinctions between the classes.",
|
| 1158 |
+
"bbox": [
|
| 1159 |
+
505,
|
| 1160 |
+
596,
|
| 1161 |
+
882,
|
| 1162 |
+
868
|
| 1163 |
+
],
|
| 1164 |
+
"page_idx": 7
|
| 1165 |
+
},
|
| 1166 |
+
{
|
| 1167 |
+
"type": "text",
|
| 1168 |
+
"text": "To investigate whether the improvements observed from DA in our case may simply be the product of domain-specific class biases, we con",
|
| 1169 |
+
"bbox": [
|
| 1170 |
+
507,
|
| 1171 |
+
871,
|
| 1172 |
+
884,
|
| 1173 |
+
919
|
| 1174 |
+
],
|
| 1175 |
+
"page_idx": 7
|
| 1176 |
+
},
|
| 1177 |
+
{
|
| 1178 |
+
"type": "page_number",
|
| 1179 |
+
"text": "11972",
|
| 1180 |
+
"bbox": [
|
| 1181 |
+
477,
|
| 1182 |
+
927,
|
| 1183 |
+
524,
|
| 1184 |
+
940
|
| 1185 |
+
],
|
| 1186 |
+
"page_idx": 7
|
| 1187 |
+
},
|
| 1188 |
+
{
|
| 1189 |
+
"type": "table",
|
| 1190 |
+
"img_path": "images/3b9eae4c8da029dccaf86bc143a6ec621e4799a1cec72e311246af8063cb169f.jpg",
|
| 1191 |
+
"table_caption": [],
|
| 1192 |
+
"table_footnote": [],
|
| 1193 |
+
"table_body": "<table><tr><td>Domain</td><td>Class</td><td>cb1</td><td>cb2</td><td>cb3</td><td>cb4</td></tr><tr><td rowspan=\"2\">CCC</td><td>d</td><td>72</td><td>57</td><td>42</td><td>28</td></tr><tr><td>c</td><td>28</td><td>43</td><td>58</td><td>72</td></tr><tr><td rowspan=\"2\">DB</td><td>d</td><td>72</td><td>57</td><td>42</td><td>28</td></tr><tr><td>c</td><td>28</td><td>43</td><td>58</td><td>72</td></tr></table>",
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
149,
|
| 1196 |
+
80,
|
| 1197 |
+
453,
|
| 1198 |
+
165
|
| 1199 |
+
],
|
| 1200 |
+
"page_idx": 8
|
| 1201 |
+
},
|
| 1202 |
+
{
|
| 1203 |
+
"type": "table",
|
| 1204 |
+
"img_path": "images/160c267ec29347ef94037fc18dadade8f9e944a3587c010602fdc39a930e2e35.jpg",
|
| 1205 |
+
"table_caption": [
|
| 1206 |
+
"Table 7: Distribution of instances across domains (CCC=source; DB=target) and classes within training folds for four different class biases."
|
| 1207 |
+
],
|
| 1208 |
+
"table_footnote": [],
|
| 1209 |
+
"table_body": "<table><tr><td rowspan=\"2\">Condition</td><td rowspan=\"2\">Model</td><td colspan=\"2\">LR</td><td colspan=\"2\">SVM</td></tr><tr><td>Acc.</td><td>F1</td><td>F1</td><td>Acc.</td></tr><tr><td rowspan=\"4\">Equal</td><td rowspan=\"2\">JOINTL+S</td><td>0.62</td><td>0.59</td><td>0.58</td><td>0.37</td></tr><tr><td>(0.09)</td><td>(0.14)</td><td>(0.08)</td><td>(0.20)</td></tr><tr><td rowspan=\"2\">AUGMENTL+S</td><td>0.64</td><td>0.63</td><td>0.65</td><td>0.64▲</td></tr><tr><td>(0.08)</td><td>(0.09)</td><td>(0.09)</td><td>(0.09)</td></tr><tr><td rowspan=\"4\">Consistent</td><td rowspan=\"2\">JOINTL+S</td><td>0.65</td><td>0.59</td><td>0.60</td><td>0.41</td></tr><tr><td>(0.08)</td><td>(0.18)</td><td>(0.08)</td><td>(0.20)</td></tr><tr><td rowspan=\"2\">AUGMENTL+S</td><td>0.67</td><td>0.64</td><td>0.65</td><td>0.63</td></tr><tr><td>(0.05)</td><td>(0.13)</td><td>(0.08)</td><td>(0.03)</td></tr></table>",
|
| 1210 |
+
"bbox": [
|
| 1211 |
+
115,
|
| 1212 |
+
231,
|
| 1213 |
+
485,
|
| 1214 |
+
381
|
| 1215 |
+
],
|
| 1216 |
+
"page_idx": 8
|
| 1217 |
+
},
|
| 1218 |
+
{
|
| 1219 |
+
"type": "text",
|
| 1220 |
+
"text": "Table 8: Domain-specific class bias results (standard deviation is reported inside parentheses). $\\triangle$ : Significantly better than the corresponding LR or SVM JOINT baseline, with $p < 0.05$ , using a paired $t$ -test.",
|
| 1221 |
+
"bbox": [
|
| 1222 |
+
112,
|
| 1223 |
+
390,
|
| 1224 |
+
489,
|
| 1225 |
+
447
|
| 1226 |
+
],
|
| 1227 |
+
"page_idx": 8
|
| 1228 |
+
},
|
| 1229 |
+
{
|
| 1230 |
+
"type": "text",
|
| 1231 |
+
"text": "duced an experiment analyzing performance of AUGMENT $_{\\mathrm{L + S}}$ (our best-performing model in terms of accuracy for the CC $\\rightarrow$ DB mapping, shown in Table 3) and JOINT $_{\\mathrm{L + S}}$ across class-biased and unbiased subsets of the original dataset. In our equal condition, both domains had perfectly class-balanced data in each training fold. In our consistent class bias condition, training folds had the varying class biases shown in Table 7. Each class bias setting was evaluated using five-fold cross-validation, and then those results were averaged. We report the results from this experiment in Table 8. We find that AUGMENT still outperforms JOINT in both conditions, answering the second part of Q3 by empirically demonstrating that class bias does not account for the performance improvements resulting from domain adaptation.",
|
| 1232 |
+
"bbox": [
|
| 1233 |
+
112,
|
| 1234 |
+
474,
|
| 1235 |
+
489,
|
| 1236 |
+
747
|
| 1237 |
+
],
|
| 1238 |
+
"page_idx": 8
|
| 1239 |
+
},
|
| 1240 |
+
{
|
| 1241 |
+
"type": "text",
|
| 1242 |
+
"text": "6 Discussion and Conclusions",
|
| 1243 |
+
"text_level": 1,
|
| 1244 |
+
"bbox": [
|
| 1245 |
+
112,
|
| 1246 |
+
762,
|
| 1247 |
+
386,
|
| 1248 |
+
778
|
| 1249 |
+
],
|
| 1250 |
+
"page_idx": 8
|
| 1251 |
+
},
|
| 1252 |
+
{
|
| 1253 |
+
"type": "text",
|
| 1254 |
+
"text": "Our work reveals intriguing findings on the use of DA for dementia detection. First, we find that DA can be successfully leveraged to improve feature-based dementia detection performance. This is the most comprehensive study of feature-based DA for this task, and the first to consider instance-based DA. We find that feature-based DA outperforms instance-based DA, and that an approach allowing",
|
| 1255 |
+
"bbox": [
|
| 1256 |
+
112,
|
| 1257 |
+
790,
|
| 1258 |
+
489,
|
| 1259 |
+
919
|
| 1260 |
+
],
|
| 1261 |
+
"page_idx": 8
|
| 1262 |
+
},
|
| 1263 |
+
{
|
| 1264 |
+
"type": "text",
|
| 1265 |
+
"text": "for multiple source domains (MULTIAUGMENT) holds promise in many cases. In general, $\\mathrm{F}_1$ score is similar across target datasets, ranging from 0.76 (CCC) to 0.77 (DB and ADReSS).",
|
| 1266 |
+
"bbox": [
|
| 1267 |
+
507,
|
| 1268 |
+
84,
|
| 1269 |
+
884,
|
| 1270 |
+
148
|
| 1271 |
+
],
|
| 1272 |
+
"page_idx": 8
|
| 1273 |
+
},
|
| 1274 |
+
{
|
| 1275 |
+
"type": "text",
|
| 1276 |
+
"text": "Our DA conditions also exhibit clear performance improvements over jointly training on the same data, offering further evidence to support the use of DA for this task. Finally, in follow-up studies on the importance of individual features and class biases in this setting, we find that pivot features pertaining to number of tokens, average phrase length, acoustic qualities, named entities, and measures of vocabulary richness and lexical frequency are particularly critical to strong performance. This suggests that these features may be particularly robust across domains. We also demonstrate that the performance of DA conditions relative to joint training is not due to domain-specific class bias, further strengthening our conclusions. In the future, we hope to conduct follow-up studies to further probe the limits and nuances of DA applied to this and other low-resource healthcare tasks.",
|
| 1277 |
+
"bbox": [
|
| 1278 |
+
507,
|
| 1279 |
+
149,
|
| 1280 |
+
885,
|
| 1281 |
+
439
|
| 1282 |
+
],
|
| 1283 |
+
"page_idx": 8
|
| 1284 |
+
},
|
| 1285 |
+
{
|
| 1286 |
+
"type": "text",
|
| 1287 |
+
"text": "7 Limitations",
|
| 1288 |
+
"text_level": 1,
|
| 1289 |
+
"bbox": [
|
| 1290 |
+
509,
|
| 1291 |
+
451,
|
| 1292 |
+
645,
|
| 1293 |
+
467
|
| 1294 |
+
],
|
| 1295 |
+
"page_idx": 8
|
| 1296 |
+
},
|
| 1297 |
+
{
|
| 1298 |
+
"type": "text",
|
| 1299 |
+
"text": "Our work is limited by several factors. First, we conduct our work primarily using popular, publicly available dementia detection datasets, all of which are in English. Thus, it is unclear whether our findings generalize to other languages, especially with richer morphology where different predictive patterns may emerge. Second, due to the emphasis on feature-based models in most dementia detection work, we study only feature-based and instance-based DA approaches. Neural DA approaches may yield different findings, although they are less relevant for many current dementia detection approaches. Finally, we only study two backbone classification algorithms in our experiments. These classifiers are among the most common in prior work with our selected datasets; however, it may be the case that with a wider scope, other classification algorithms may yield different results. Collectively, these limitations present intriguing avenues for follow-up work.",
|
| 1300 |
+
"bbox": [
|
| 1301 |
+
507,
|
| 1302 |
+
476,
|
| 1303 |
+
885,
|
| 1304 |
+
800
|
| 1305 |
+
],
|
| 1306 |
+
"page_idx": 8
|
| 1307 |
+
},
|
| 1308 |
+
{
|
| 1309 |
+
"type": "text",
|
| 1310 |
+
"text": "8 Ethical Considerations",
|
| 1311 |
+
"text_level": 1,
|
| 1312 |
+
"bbox": [
|
| 1313 |
+
507,
|
| 1314 |
+
812,
|
| 1315 |
+
741,
|
| 1316 |
+
828
|
| 1317 |
+
],
|
| 1318 |
+
"page_idx": 8
|
| 1319 |
+
},
|
| 1320 |
+
{
|
| 1321 |
+
"type": "text",
|
| 1322 |
+
"text": "This research was guided by a broad range of ethical considerations, taking into account factors associated with fairness, privacy, and intended use. Although many of these are described throughout the paper, we summarize those that we consider",
|
| 1323 |
+
"bbox": [
|
| 1324 |
+
507,
|
| 1325 |
+
839,
|
| 1326 |
+
884,
|
| 1327 |
+
917
|
| 1328 |
+
],
|
| 1329 |
+
"page_idx": 8
|
| 1330 |
+
},
|
| 1331 |
+
{
|
| 1332 |
+
"type": "page_number",
|
| 1333 |
+
"text": "11973",
|
| 1334 |
+
"bbox": [
|
| 1335 |
+
477,
|
| 1336 |
+
927,
|
| 1337 |
+
524,
|
| 1338 |
+
940
|
| 1339 |
+
],
|
| 1340 |
+
"page_idx": 8
|
| 1341 |
+
},
|
| 1342 |
+
{
|
| 1343 |
+
"type": "text",
|
| 1344 |
+
"text": "most critical in this section. It is our hope that by building a holistic understanding of these factors, we develop improved perspective of the challenges associated with the study of low-resource healthcare problems and the positive broader impacts that they may create.",
|
| 1345 |
+
"bbox": [
|
| 1346 |
+
112,
|
| 1347 |
+
84,
|
| 1348 |
+
489,
|
| 1349 |
+
181
|
| 1350 |
+
],
|
| 1351 |
+
"page_idx": 9
|
| 1352 |
+
},
|
| 1353 |
+
{
|
| 1354 |
+
"type": "text",
|
| 1355 |
+
"text": "Data Privacy and Fairness. This research was approved by the Institutional Review Board at the University of Illinois Chicago. Access was granted for all datasets used in this research, and our use is governed by approved protocols unique to each dataset. DementiaBank, ADReSS, and the Carolina Conversations Collection are all publicly available following access request protocols specified by their governing organizations. We refer readers to the citations throughout this work if they are interested in obtaining access to this data. We are unable to share it directly, although we can share our processing scripts and other code to facilitate reproducibility of our work by others.",
|
| 1356 |
+
"bbox": [
|
| 1357 |
+
115,
|
| 1358 |
+
191,
|
| 1359 |
+
487,
|
| 1360 |
+
416
|
| 1361 |
+
],
|
| 1362 |
+
"page_idx": 9
|
| 1363 |
+
},
|
| 1364 |
+
{
|
| 1365 |
+
"type": "text",
|
| 1366 |
+
"text": "ADRC is a privately-held dataset collected in collaboration with clinical partners under a rigorous set of guidelines governed by a separate, approved Institutional Review Board protocol at the University of California San Diego. This dataset will eventually be released, following further manual review to ensure full de-identification, but it cannot yet be released at this time. The data is currently stored on a password-protected server under VPN protection. To maximize reproducibility of our work by others unable to immediately gain access to this dataset, we limit the use of this dataset to a small set of experimental conditions (specifically, those using MULTIAUGMENT).",
|
| 1367 |
+
"bbox": [
|
| 1368 |
+
115,
|
| 1369 |
+
417,
|
| 1370 |
+
489,
|
| 1371 |
+
642
|
| 1372 |
+
],
|
| 1373 |
+
"page_idx": 9
|
| 1374 |
+
},
|
| 1375 |
+
{
|
| 1376 |
+
"type": "text",
|
| 1377 |
+
"text": "Intended Use. Automated models for dementia detection from spoken language present potential benefits in real-world scenarios: they offer opportunity to expand healthcare access, minimize cost of care, and reduce caregiver burden. However, they may also pose risks if used in unintended ways. We consider intended use of the work reported here to extend to the following:",
|
| 1378 |
+
"bbox": [
|
| 1379 |
+
112,
|
| 1380 |
+
653,
|
| 1381 |
+
489,
|
| 1382 |
+
781
|
| 1383 |
+
],
|
| 1384 |
+
"page_idx": 9
|
| 1385 |
+
},
|
| 1386 |
+
{
|
| 1387 |
+
"type": "list",
|
| 1388 |
+
"sub_type": "text",
|
| 1389 |
+
"list_items": [
|
| 1390 |
+
"- People may use the technology developed in this work to study language differences between individuals with and without dementia, as a way of building further understanding of the condition.",
|
| 1391 |
+
"- People may use the technology developed in this work to further their own research into"
|
| 1392 |
+
],
|
| 1393 |
+
"bbox": [
|
| 1394 |
+
136,
|
| 1395 |
+
794,
|
| 1396 |
+
487,
|
| 1397 |
+
917
|
| 1398 |
+
],
|
| 1399 |
+
"page_idx": 9
|
| 1400 |
+
},
|
| 1401 |
+
{
|
| 1402 |
+
"type": "text",
|
| 1403 |
+
"text": "low-resource NLP tasks, including those associated with this and other healthcare problems.",
|
| 1404 |
+
"bbox": [
|
| 1405 |
+
544,
|
| 1406 |
+
84,
|
| 1407 |
+
884,
|
| 1408 |
+
116
|
| 1409 |
+
],
|
| 1410 |
+
"page_idx": 9
|
| 1411 |
+
},
|
| 1412 |
+
{
|
| 1413 |
+
"type": "text",
|
| 1414 |
+
"text": "- People may use the technology developed in this work to build early warning systems to flag individuals about potential dementia symptoms, provided that the technology is not misconstrued as an alternative to human care in any way.",
|
| 1415 |
+
"bbox": [
|
| 1416 |
+
531,
|
| 1417 |
+
128,
|
| 1418 |
+
882,
|
| 1419 |
+
225
|
| 1420 |
+
],
|
| 1421 |
+
"page_idx": 9
|
| 1422 |
+
},
|
| 1423 |
+
{
|
| 1424 |
+
"type": "text",
|
| 1425 |
+
"text": "Any use outside of those listed above is considered an unintended use. To safeguard against unintended use of our work, we remind readers that dataset access must be granted through the approved channels by the creators of the respective datasets used in this work. This may include processes ranging from email request to full review and approval by local and external Institutional Review Boards. We reiterate our caution against using any findings from this paper to build systems that function as intended or perceived replacements for human medical care.",
|
| 1426 |
+
"bbox": [
|
| 1427 |
+
507,
|
| 1428 |
+
236,
|
| 1429 |
+
884,
|
| 1430 |
+
429
|
| 1431 |
+
],
|
| 1432 |
+
"page_idx": 9
|
| 1433 |
+
},
|
| 1434 |
+
{
|
| 1435 |
+
"type": "text",
|
| 1436 |
+
"text": "Acknowledgements",
|
| 1437 |
+
"text_level": 1,
|
| 1438 |
+
"bbox": [
|
| 1439 |
+
509,
|
| 1440 |
+
442,
|
| 1441 |
+
680,
|
| 1442 |
+
458
|
| 1443 |
+
],
|
| 1444 |
+
"page_idx": 9
|
| 1445 |
+
},
|
| 1446 |
+
{
|
| 1447 |
+
"type": "text",
|
| 1448 |
+
"text": "We thank the anonymous reviewers for their helpful feedback, which was incorporated in the final version of this manuscript. We also thank Erin Sundermann for her and her team's role in creating the ADRC dataset, and Raeanne Moore, Alex Leow, and Tamar Gollan for their clinical insights regarding Alzheimer's disease and dementia. The creation of the ADRC dataset was funded in part by a seed grant from the University of California San Diego's Alzheimer's Disease Research Center. Shahla Farzana and Natalie Parde were also partially funded by the National Science Foundation under Grant No. 2125411. Any opinions, findings, and conclusions or recommendations are those of the authors and do not necessarily reflect the views of the National Science Foundation.",
|
| 1449 |
+
"bbox": [
|
| 1450 |
+
507,
|
| 1451 |
+
467,
|
| 1452 |
+
882,
|
| 1453 |
+
725
|
| 1454 |
+
],
|
| 1455 |
+
"page_idx": 9
|
| 1456 |
+
},
|
| 1457 |
+
{
|
| 1458 |
+
"type": "text",
|
| 1459 |
+
"text": "References",
|
| 1460 |
+
"text_level": 1,
|
| 1461 |
+
"bbox": [
|
| 1462 |
+
510,
|
| 1463 |
+
752,
|
| 1464 |
+
608,
|
| 1465 |
+
766
|
| 1466 |
+
],
|
| 1467 |
+
"page_idx": 9
|
| 1468 |
+
},
|
| 1469 |
+
{
|
| 1470 |
+
"type": "text",
|
| 1471 |
+
"text": "Samad Amini, Boran Hao, Lifu Zhang, Mengting Song, Aman Gupta, Cody Karjadi, Vijaya B. Kolachalama, Rhoda Au, and Ioannis Ch. Paschalidis. 2022. Automated detection of mild cognitive impairment and dementia from voice recordings: A natural language processing approach. *Alzheimer's & Dementia*, n/a(n/a).",
|
| 1472 |
+
"bbox": [
|
| 1473 |
+
509,
|
| 1474 |
+
775,
|
| 1475 |
+
884,
|
| 1476 |
+
868
|
| 1477 |
+
],
|
| 1478 |
+
"page_idx": 9
|
| 1479 |
+
},
|
| 1480 |
+
{
|
| 1481 |
+
"type": "text",
|
| 1482 |
+
"text": "Aparna Balagopalan, Benjamin Eyre, Frank Rudzicz, and Jekaterina Novikova. 2020a. To BERT or not to BERT: Comparing Speech and Language-Based",
|
| 1483 |
+
"bbox": [
|
| 1484 |
+
509,
|
| 1485 |
+
878,
|
| 1486 |
+
882,
|
| 1487 |
+
917
|
| 1488 |
+
],
|
| 1489 |
+
"page_idx": 9
|
| 1490 |
+
},
|
| 1491 |
+
{
|
| 1492 |
+
"type": "page_number",
|
| 1493 |
+
"text": "11974",
|
| 1494 |
+
"bbox": [
|
| 1495 |
+
477,
|
| 1496 |
+
927,
|
| 1497 |
+
524,
|
| 1498 |
+
940
|
| 1499 |
+
],
|
| 1500 |
+
"page_idx": 9
|
| 1501 |
+
},
|
| 1502 |
+
{
|
| 1503 |
+
"type": "list",
|
| 1504 |
+
"sub_type": "ref_text",
|
| 1505 |
+
"list_items": [
|
| 1506 |
+
"Approaches for Alzheimer's Disease Detection. In Proc. Interspeech 2020, pages 2167-2171.",
|
| 1507 |
+
"Aparna Balagopalan, Jekaterina Novikova, Matthew B A Mcdermott, Bret Nestor, Tristan Naumann, and Marzyeh Ghassemi. 2020b. Cross-Language Aphasia Detection using Optimal Transport Domain Adaptation. In Proceedings of the Machine Learning for Health NeurIPS Workshop, volume 116 of Proceedings of Machine Learning Research, pages 202-219. PMLR.",
|
| 1508 |
+
"James T Becker, François Boiler, Oscar L Lopez, Judith Saxton, and Karen L McGonigle. 1994. The natural history of alzheimer's disease: Description of study cohort and accuracy of diagnosis. Archives of Neurology.",
|
| 1509 |
+
"Marc Brysbaert and Boris New. 2009. Moving beyond kucera and francis: A critical evaluation of current word frequency norms and the introduction of a new and improved word frequency measure for american english. Behavior research methods, 41:977-90.",
|
| 1510 |
+
"Hamidreza Chinaei, Leila Chan Currie, Andrew Danks, Hubert Lin, Tejas Mehta, and Frank Rudzicz. 2017. Identifying and avoiding confusion in dialogue with people with Alzheimer's disease. Computational Linguistics, 43(2):377-406.",
|
| 1511 |
+
"Wenyuan Dai, Qiang Yang, Gui-Rong Xue, and Yong Yu. 2007. Boosting for transfer learning. In Proceedings of the 24th International Conference on Machine Learning, ICML '07, page 193-200, New York, NY, USA. Association for Computing Machinery.",
|
| 1512 |
+
"Hal Daumé III. 2007. Frustratingly easy domain adaptation. In Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics, pages 256-263, Prague, Czech Republic. Association for Computational Linguistics.",
|
| 1513 |
+
"BH Davis, C Pope, K Van Ravenstein, and W Dou. 2017. Three approaches to understanding verbal cues from older adults with diabetes. The Internet Journal of Advanced Nursing Practice, 16(1).",
|
| 1514 |
+
"Najim Dehak, Pierre Dumouchel, and Patrick Kenny. 2007. Modeling prosodic features with joint factor analysis for speaker verification. IEEE Transactions on Audio, Speech, and Language Processing, 15(7):2095-2103.",
|
| 1515 |
+
"Flavio Di Palo and Natalie Parde. 2019. Enriching neural models with targeted features for dementia detection. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop, pages 302-308, Florence, Italy. Association for Computational Linguistics.",
|
| 1516 |
+
"Jesse Dodge, Gabriel Ilharco, Roy Schwartz, Ali Farhadi, Hannaneh Hajishirzi, and Noah A. Smith. 2020. Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping. CoRR, abs/2002.06305."
|
| 1517 |
+
],
|
| 1518 |
+
"bbox": [
|
| 1519 |
+
115,
|
| 1520 |
+
85,
|
| 1521 |
+
485,
|
| 1522 |
+
917
|
| 1523 |
+
],
|
| 1524 |
+
"page_idx": 10
|
| 1525 |
+
},
|
| 1526 |
+
{
|
| 1527 |
+
"type": "list",
|
| 1528 |
+
"sub_type": "ref_text",
|
| 1529 |
+
"list_items": [
|
| 1530 |
+
"Shahla Farzana and Natalie Parde. 2020. Exploring MMSE Score Prediction Using Verbal and NonVerbal Cues. In Proc. Interspeech 2020, pages 2207-2211.",
|
| 1531 |
+
"Shahla Farzana and Natalie Parde. 2022. Are interaction patterns helpful for task-agnostic dementia detection? an empirical exploration. In Proceedings of the 23rd Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 172–182, Edinburgh, UK. Association for Computational Linguistics.",
|
| 1532 |
+
"Yoav Freund and Robert E Schapire. 1997. A decision-theoretic generalization of on-line learning and an application to boosting. Journal of Computer and System Sciences, 55(1):119-139.",
|
| 1533 |
+
"Jeffrey M. Girard, Alexandria K. Vail, Einat Liebenthal, Katrina Brown, Can Misel Kilciksiz, Luciana Pennant, Elizabeth Liebson, Dost Ongur, Louis-Philippe Morency, and Justin T. Baker. 2022. Computational analysis of spoken language in acute psychosis and mania. Schizophrenia Research, 245:97-115. Computational Approaches to Understanding Psychosis.",
|
| 1534 |
+
"Harold Goodglass and Edith Kaplan. 1972. The assessment of aphasia and related disorders. Lea & Febiger.",
|
| 1535 |
+
"Sarah A. Graham, Ellen E. Lee, Dilip V. Jeste, Ryan Van Patten, Elizabeth W. Twamley, Camille Nebeker, Yasunori Yamada, Ho-Cheol Kim, and Colin A. Depp. 2020. Artificial intelligence approaches to predicting and detecting cognitive decline in older adults: A conceptual review. *Psychiatry Research*, 284:112732.",
|
| 1536 |
+
"Yue Guo, Changye Li, Carol Roan, Serguei Pakhomov, and Trevor Cohen. 2021. Crossing the \"cookie theft\" corpus chasm: Applying what bert learns from outside data to the adress challenge dementia detection task. Frontiers in Computer Science, 3.",
|
| 1537 |
+
"Jing Jiang and ChengXiang Zhai. 2007. Instance weighting for domain adaptation in NLP. In Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics, pages 264-271, Prague, Czech Republic. Association for Computational Linguistics.",
|
| 1538 |
+
"Wouter M Kouw and Marco Loog. 2018. An introduction to domain adaptation and transfer learning. Technical report, Delft University of Technology.",
|
| 1539 |
+
"Egoitz Laparra, Steven Bethard, and Timothy A Miller. 2020. Rethinking domain adaptation for machine learning over clinical language. JAMIA Open, 3(2):146-150.",
|
| 1540 |
+
"Changye Li, David Knopman, Weizhe Xu, Trevor Cohen, and Serguei Pakhomov. 2022. GPT-D: Inducing dementia-related linguistic anomalies by deliberate degradation of artificial neural language models. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1866-1877, Dublin, Ireland. Association for Computational Linguistics."
|
| 1541 |
+
],
|
| 1542 |
+
"bbox": [
|
| 1543 |
+
510,
|
| 1544 |
+
85,
|
| 1545 |
+
880,
|
| 1546 |
+
917
|
| 1547 |
+
],
|
| 1548 |
+
"page_idx": 10
|
| 1549 |
+
},
|
| 1550 |
+
{
|
| 1551 |
+
"type": "page_number",
|
| 1552 |
+
"text": "11975",
|
| 1553 |
+
"bbox": [
|
| 1554 |
+
477,
|
| 1555 |
+
928,
|
| 1556 |
+
524,
|
| 1557 |
+
940
|
| 1558 |
+
],
|
| 1559 |
+
"page_idx": 10
|
| 1560 |
+
},
|
| 1561 |
+
{
|
| 1562 |
+
"type": "list",
|
| 1563 |
+
"sub_type": "ref_text",
|
| 1564 |
+
"list_items": [
|
| 1565 |
+
"Saturnino Luz, Sofia De La Fuente Garcia, and Pierre Albert. 2018. A method for analysis of patient speech in dialogue for dementia detection. In *Resources and Processing of linguistic*, para-linguistic and extra-linguistic Data from people with various forms of cognitive impairment, pages 35–42. European Language Resources Association (ELRA).",
|
| 1566 |
+
"Saturnino Luz, Fasih Haider, Sofia de la Fuente, Davida Fromm, and Brian MacWhinney. 2020. Alzheimer's Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge. In Proc. Interspeech 2020, pages 2172-2176.",
|
| 1567 |
+
"Sturnino Luz, Fasih Haider, Sofia de la Fuente, Davida Fromm, and Brian MacWhinney. 2021. Detecting cognitive decline using speech only: The adresso challenge. medRxiv.",
|
| 1568 |
+
"Brian Macwhinney. 2009. The CHILDES Project Part 1: Thechat Transcription Format. Technical report, Carnegie Mellon University.",
|
| 1569 |
+
"Mitchell P. Marcus, Beatrice Santorini, and Mary Ann Marcinkiewicz. 1993. Building a large annotated corpus of English: The Penn Treebank. Computational Linguistics, 19(2):313-330.",
|
| 1570 |
+
"Vaden Masrani. 2018. Detecting dementia from written and spoken language. Master's thesis, University of British Columbia.",
|
| 1571 |
+
"Vaden Masrani, Gabriel Murray, Thalia Shoshana Field, and Giuseppe Carenini. 2017. Domain adaptation for detecting mild cognitive impairment. In Advances in Artificial Intelligence, pages 248-259, Cham. Springer International Publishing.",
|
| 1572 |
+
"Heinz-Dieter Mass. 1972. Über den zusammenhang zwischen wortschatzumfang und länger eines textes. Zeitschrift für Literaturwissenschaft und Linguistik, 2(8):73.",
|
| 1573 |
+
"Philip M McCarthy. 2005. An assessment of the range and usefulness of lexical diversity measures and the potential of the measure of textual, lexical diversity (MTLD). Ph.D. thesis, The University of Memphis.",
|
| 1574 |
+
"Philip M. McCarthy and Scott Jarvis. 2007. vocd: A theoretical and empirical evaluation. Language Testing, 24(4):459-488.",
|
| 1575 |
+
"Shamila Nasreen, Julian Hough, and Matthew Purver. 2021a. Rare-class dialogue act tagging for Alzheimer's disease diagnosis. In Proceedings of the 22nd Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 290-300, Singapore and Online. Association for Computational Linguistics.",
|
| 1576 |
+
"Shamila Nasreen, Morteza Rohanian, Julian Hough, and Matthew Purver. 2021b. Alzheimer's dementia recognition from spontaneous speech using disfluency and interactional features. Frontiers in Computer Science, 3."
|
| 1577 |
+
],
|
| 1578 |
+
"bbox": [
|
| 1579 |
+
115,
|
| 1580 |
+
85,
|
| 1581 |
+
489,
|
| 1582 |
+
917
|
| 1583 |
+
],
|
| 1584 |
+
"page_idx": 11
|
| 1585 |
+
},
|
| 1586 |
+
{
|
| 1587 |
+
"type": "list",
|
| 1588 |
+
"sub_type": "ref_text",
|
| 1589 |
+
"list_items": [
|
| 1590 |
+
"Slav Petrov, Dipanjan Das, and Ryan McDonald. 2012. A universal part-of-speech tagset. In Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12), pages 2089-2096, Istanbul, Turkey. European Language Resources Association (ELRA).",
|
| 1591 |
+
"Charlene Pope and Boyd H. Davis. 2011. Finding a balance: The carolinas conversation collection. Corpus Linguistics and Linguistic Theory, 7(1):143-161.",
|
| 1592 |
+
"Sameer S. Pradhan, Eduard Hovy, Mitch Marcus, Martha Palmer, Lance Ramshaw, and Ralph Weischedel. 2007. Ontonotes: A unified relational semantic representation. In International Conference on Semantic Computing (ICSC 2007), pages 517-526.",
|
| 1593 |
+
"Peng Qi, Timothy Dozat, Yuhao Zhang, and Christopher D. Manning. 2018. Universal dependency parsing from scratch. In Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies, pages 160-170, Brussels, Belgium. Association for Computational Linguistics.",
|
| 1594 |
+
"Baochen Sun, Jiashi Feng, and Kate Saenko. 2016. Return of frustratingly easy domain adaptation. In Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence, AAAI'16, page 2058-2065. AAAI Press.",
|
| 1595 |
+
"J.C. Vásquez-Correa, J.R. Orozco-Arroyave, T. Bocklet, and E. Nöth. 2018. Towards an automatic evaluation of the dysarthria level of patients with parkinson's disease. Journal of Communication Disorders, 76:21-36.",
|
| 1596 |
+
"Rui Xia, Jianfei Yu, Feng Xu, and Shumei Wang. 2014. Instance-based domain adaptation in nlp via in-target-domain logistic approximation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 28.",
|
| 1597 |
+
"Jiahong Yuan, Yuchen Bian, Xingyu Cai, Jiaji Huang, Zheng Ye, and Kenneth Ward Church. 2020. Disfluencies and fine-tuning pre-trained language models for detection of alzheimers disease. In *Interspeech*."
|
| 1598 |
+
],
|
| 1599 |
+
"bbox": [
|
| 1600 |
+
510,
|
| 1601 |
+
85,
|
| 1602 |
+
882,
|
| 1603 |
+
682
|
| 1604 |
+
],
|
| 1605 |
+
"page_idx": 11
|
| 1606 |
+
},
|
| 1607 |
+
{
|
| 1608 |
+
"type": "page_number",
|
| 1609 |
+
"text": "11976",
|
| 1610 |
+
"bbox": [
|
| 1611 |
+
477,
|
| 1612 |
+
928,
|
| 1613 |
+
524,
|
| 1614 |
+
940
|
| 1615 |
+
],
|
| 1616 |
+
"page_idx": 11
|
| 1617 |
+
},
|
| 1618 |
+
{
|
| 1619 |
+
"type": "text",
|
| 1620 |
+
"text": "A For every submission:",
|
| 1621 |
+
"bbox": [
|
| 1622 |
+
115,
|
| 1623 |
+
107,
|
| 1624 |
+
322,
|
| 1625 |
+
122
|
| 1626 |
+
],
|
| 1627 |
+
"page_idx": 12
|
| 1628 |
+
},
|
| 1629 |
+
{
|
| 1630 |
+
"type": "list",
|
| 1631 |
+
"sub_type": "text",
|
| 1632 |
+
"list_items": [
|
| 1633 |
+
"A1. Did you describe the limitations of your work? 7",
|
| 1634 |
+
"A2. Did you discuss any potential risks of your work? 8",
|
| 1635 |
+
"A3. Do the abstract and introduction summarize the paper's main claims?",
|
| 1636 |
+
"A4. Have you used AI writing assistants when working on this paper? Left blank."
|
| 1637 |
+
],
|
| 1638 |
+
"bbox": [
|
| 1639 |
+
129,
|
| 1640 |
+
126,
|
| 1641 |
+
695,
|
| 1642 |
+
288
|
| 1643 |
+
],
|
| 1644 |
+
"page_idx": 12
|
| 1645 |
+
},
|
| 1646 |
+
{
|
| 1647 |
+
"type": "text",
|
| 1648 |
+
"text": "B Did you use or create scientific artifacts?",
|
| 1649 |
+
"bbox": [
|
| 1650 |
+
114,
|
| 1651 |
+
299,
|
| 1652 |
+
489,
|
| 1653 |
+
316
|
| 1654 |
+
],
|
| 1655 |
+
"page_idx": 12
|
| 1656 |
+
},
|
| 1657 |
+
{
|
| 1658 |
+
"type": "text",
|
| 1659 |
+
"text": "3.2, 3.4, 4",
|
| 1660 |
+
"bbox": [
|
| 1661 |
+
132,
|
| 1662 |
+
321,
|
| 1663 |
+
211,
|
| 1664 |
+
335
|
| 1665 |
+
],
|
| 1666 |
+
"page_idx": 12
|
| 1667 |
+
},
|
| 1668 |
+
{
|
| 1669 |
+
"type": "list",
|
| 1670 |
+
"sub_type": "text",
|
| 1671 |
+
"list_items": [
|
| 1672 |
+
"B1. Did you cite the creators of artifacts you used? 3.2, 3.4, 4",
|
| 1673 |
+
"B2. Did you discuss the license or terms for use and / or distribution of any artifacts? Not applicable. Left blank.",
|
| 1674 |
+
"B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)? 3.2, 8",
|
| 1675 |
+
"B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it? Not applicable. Left blank.",
|
| 1676 |
+
"B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.? 3.2",
|
| 1677 |
+
"B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be. 3.2, 4.1"
|
| 1678 |
+
],
|
| 1679 |
+
"bbox": [
|
| 1680 |
+
127,
|
| 1681 |
+
346,
|
| 1682 |
+
880,
|
| 1683 |
+
753
|
| 1684 |
+
],
|
| 1685 |
+
"page_idx": 12
|
| 1686 |
+
},
|
| 1687 |
+
{
|
| 1688 |
+
"type": "text",
|
| 1689 |
+
"text": "C Did you run computational experiments?",
|
| 1690 |
+
"bbox": [
|
| 1691 |
+
114,
|
| 1692 |
+
764,
|
| 1693 |
+
492,
|
| 1694 |
+
781
|
| 1695 |
+
],
|
| 1696 |
+
"page_idx": 12
|
| 1697 |
+
},
|
| 1698 |
+
{
|
| 1699 |
+
"type": "text",
|
| 1700 |
+
"text": "4,5",
|
| 1701 |
+
"bbox": [
|
| 1702 |
+
132,
|
| 1703 |
+
787,
|
| 1704 |
+
164,
|
| 1705 |
+
800
|
| 1706 |
+
],
|
| 1707 |
+
"page_idx": 12
|
| 1708 |
+
},
|
| 1709 |
+
{
|
| 1710 |
+
"type": "text",
|
| 1711 |
+
"text": "C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used? Not applicable. Left blank.",
|
| 1712 |
+
"bbox": [
|
| 1713 |
+
127,
|
| 1714 |
+
813,
|
| 1715 |
+
880,
|
| 1716 |
+
860
|
| 1717 |
+
],
|
| 1718 |
+
"page_idx": 12
|
| 1719 |
+
},
|
| 1720 |
+
{
|
| 1721 |
+
"type": "text",
|
| 1722 |
+
"text": "The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.",
|
| 1723 |
+
"bbox": [
|
| 1724 |
+
112,
|
| 1725 |
+
868,
|
| 1726 |
+
877,
|
| 1727 |
+
892
|
| 1728 |
+
],
|
| 1729 |
+
"page_idx": 12
|
| 1730 |
+
},
|
| 1731 |
+
{
|
| 1732 |
+
"type": "header",
|
| 1733 |
+
"text": "ACL 2023 Responsible NLP Checklist",
|
| 1734 |
+
"bbox": [
|
| 1735 |
+
132,
|
| 1736 |
+
84,
|
| 1737 |
+
433,
|
| 1738 |
+
99
|
| 1739 |
+
],
|
| 1740 |
+
"page_idx": 12
|
| 1741 |
+
},
|
| 1742 |
+
{
|
| 1743 |
+
"type": "page_number",
|
| 1744 |
+
"text": "11977",
|
| 1745 |
+
"bbox": [
|
| 1746 |
+
477,
|
| 1747 |
+
927,
|
| 1748 |
+
524,
|
| 1749 |
+
940
|
| 1750 |
+
],
|
| 1751 |
+
"page_idx": 12
|
| 1752 |
+
},
|
| 1753 |
+
{
|
| 1754 |
+
"type": "list",
|
| 1755 |
+
"sub_type": "text",
|
| 1756 |
+
"list_items": [
|
| 1757 |
+
"C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values? 4.1",
|
| 1758 |
+
"C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run? 4, 5",
|
| 1759 |
+
"C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)? 3.2, 3.4, 4.1"
|
| 1760 |
+
],
|
| 1761 |
+
"bbox": [
|
| 1762 |
+
129,
|
| 1763 |
+
83,
|
| 1764 |
+
880,
|
| 1765 |
+
280
|
| 1766 |
+
],
|
| 1767 |
+
"page_idx": 13
|
| 1768 |
+
},
|
| 1769 |
+
{
|
| 1770 |
+
"type": "text",
|
| 1771 |
+
"text": "D Did you use human annotators (e.g., crowdworkers) or research with human participants?",
|
| 1772 |
+
"text_level": 1,
|
| 1773 |
+
"bbox": [
|
| 1774 |
+
112,
|
| 1775 |
+
293,
|
| 1776 |
+
877,
|
| 1777 |
+
309
|
| 1778 |
+
],
|
| 1779 |
+
"page_idx": 13
|
| 1780 |
+
},
|
| 1781 |
+
{
|
| 1782 |
+
"type": "text",
|
| 1783 |
+
"text": "Left blank.",
|
| 1784 |
+
"bbox": [
|
| 1785 |
+
132,
|
| 1786 |
+
313,
|
| 1787 |
+
213,
|
| 1788 |
+
329
|
| 1789 |
+
],
|
| 1790 |
+
"page_idx": 13
|
| 1791 |
+
},
|
| 1792 |
+
{
|
| 1793 |
+
"type": "list",
|
| 1794 |
+
"sub_type": "text",
|
| 1795 |
+
"list_items": [
|
| 1796 |
+
"D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.? No response.",
|
| 1797 |
+
"D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)? No response.",
|
| 1798 |
+
"D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used? No response.",
|
| 1799 |
+
"D4. Was the data collection protocol approved (or determined exempt) by an ethics review board? No response.",
|
| 1800 |
+
"D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data? No response."
|
| 1801 |
+
],
|
| 1802 |
+
"bbox": [
|
| 1803 |
+
127,
|
| 1804 |
+
340,
|
| 1805 |
+
880,
|
| 1806 |
+
640
|
| 1807 |
+
],
|
| 1808 |
+
"page_idx": 13
|
| 1809 |
+
},
|
| 1810 |
+
{
|
| 1811 |
+
"type": "page_number",
|
| 1812 |
+
"text": "11978",
|
| 1813 |
+
"bbox": [
|
| 1814 |
+
477,
|
| 1815 |
+
927,
|
| 1816 |
+
524,
|
| 1817 |
+
940
|
| 1818 |
+
],
|
| 1819 |
+
"page_idx": 13
|
| 1820 |
+
}
|
| 1821 |
+
]
|
2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/c15622fd-9732-4c6f-a455-96733517d658_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/c15622fd-9732-4c6f-a455-96733517d658_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0a0c32abc057250dc610d0a6ba11aaddf17e8972dce454a8cd0a1deb8efed4e
|
| 3 |
+
size 368575
|
2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/full.md
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language
|
| 2 |
+
|
| 3 |
+
Shahla Farzana and Natalie Parde
|
| 4 |
+
Natural Language Processing Laboratory
|
| 5 |
+
Department of Computer Science
|
| 6 |
+
University of Illinois Chicago
|
| 7 |
+
{sfarza3, parde}@uic.edu
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Health-related speech datasets are often small and varied in focus. This makes it difficult to leverage them to effectively support healthcare goals. Robust transfer of linguistic features across different datasets orbiting the same goal carries potential to address this concern. To test this hypothesis, we experiment with domain adaptation (DA) techniques on heterogeneous spoken language data to evaluate generalizability across diverse datasets for a common task: dementia detection. We find that adapted models exhibit better performance across conversational and task-oriented datasets. The feature-augmented DA method achieves a $22\%$ increase in accuracy adapting from a conversational to task-specific dataset compared to a jointly trained baseline. This suggests promising capacity of these techniques to allow for productive use of disparate data for a complex spoken language healthcare task.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Data-driven models of diverse conditions affecting spoken language abilities offer promising real-world benefits (Amini et al., 2022; Girard et al., 2022). However, the datasets available for developing these models are often small and disparate, spanning varied diagnostic and non-diagnostic tasks mapped to different taxonomies at conflicting granularities (Graham et al., 2020). This has largely constrained progress to models excelling in specialized settings (e.g., individuals with homogeneous language background describing a standardized image (Luz et al., 2020)). At the same time, it has created challenges in building more generalizable knowledge about language patterns associated with the condition of interest (Guo et al., 2021).
|
| 16 |
+
|
| 17 |
+
Outside healthcare applications, domain adaptation (DA) has long been applied to increase the capacity of NLP systems to leverage meaningful information from diverse data (Kouw and Loog,
|
| 18 |
+
|
| 19 |
+
2018). These techniques generally seek to harness data from one domain (the source) to improve performance in another (the target). Usually the target domain has little or no labeled data, while the source has a relatively large amount of labeled data. Despite the advantages offered by DA for many NLP problems, it has remained under-studied for healthcare tasks due to numerous complexities of healthcare data (Laparra et al., 2020). Nonetheless, most healthcare problems offer the ideal learning settings in which DA is designed to thrive.
|
| 20 |
+
|
| 21 |
+
We present a systematic analysis of the use of DA for a low-resource healthcare problem that has recently been popular in the NLP community: dementia. We adopt a wide definition of dementia in our work, encompassing datasets pertaining to Alzheimer's disease or related dementia (ADRD) and age-related mild cognitive impairment (MCI), in line with current NLP community norms (Amini et al., 2022). Our research questions include:
|
| 22 |
+
|
| 23 |
+
Q1. Can DA be used to exploit spoken language data pertaining to dementia from one domain, to improve its detection in other domains?
|
| 24 |
+
Q2. If yes, does this offer performance improvements over simpler joint training?
|
| 25 |
+
Q3. How do different linguistic features and class biases contribute to this performance?
|
| 26 |
+
|
| 27 |
+
We define domain in this study as a distinct dataset with supervised labels describing dementia status in some capacity. Data collection protocol and precise labeling taxonomy may vary across domains, making our task slightly more complex than related work that focused solely on differences in source language (Balagopalan et al., 2020b) or labeling taxonomy (Masnani et al., 2017). We find that DA can indeed support improved dementia detection across domains compared to joint training, and we identify key pivot features and factors
|
| 28 |
+
|
| 29 |
+
contributing to this success. It is our hope that continued study of DA in healthcare applications can further extend the boundaries of our understanding and promote impactful follow-up work.
|
| 30 |
+
|
| 31 |
+
# 2 Related Work
|
| 32 |
+
|
| 33 |
+
Most prior work on developing spoken language models of dementia has followed a common pattern, focusing on designing and evaluating dataset-specific approaches. This has included (most popularly) a picture description task (Balagopalan et al., 2020a; Yuan et al., 2020; Di Palo and Parde, 2019), as well as other datasets with more open-ended conversational speech (Li et al., 2022; Nasreen et al., 2021b; Luz et al., 2018). These models singularly focus on the source domain, with no expectation of deployment beyond that, opening questions about their ability to generalize beyond small, publicly available reference sets.
|
| 34 |
+
|
| 35 |
+
The extent to which DA has been explored in this context is limited. Li et al. (2022) leveraged transfer learning, one form of DA that involves fine-tuning a model pretrained on a much larger dataset using the smaller target domain dataset, to study the perplexity ratio of normal and artificially degraded Transformer-based language models for dementia detection. Likewise, Balagopalan et al. (2020b) achieved performance boosts in detecting early signs of aphasia in cross-language settings compared to the unilingual baseline using optimal transport domain adaptation. A problem with transfer learning in many healthcare contexts is that target datasets are much smaller than for other NLP tasks for which the technique has demonstrated success. The benefits of transfer learning do not necessarily transfer (no pun intended) to ultra low-resource settings, where resulting models may be much less stable (Dodge et al., 2020).
|
| 36 |
+
|
| 37 |
+
Other forms of DA that may be more suited to dementia detection and other very low-resource healthcare problems are feature-based and instance-based DA. Both were originally leveraged for smaller datasets closer in scale to (although still larger than) those available for dementia detection (Daumé III, 2007; Sun et al., 2016), making it a promising and perhaps under-appreciated alternative to transfer learning. Feature-based DA focuses on modifying the feature space of the source and target datasets in some way that promotes the classifier's ability to generalize across them. Masrani et al. (2017) experimented with two feature-based
|
| 38 |
+
|
| 39 |
+
DA techniques to adapt separate domain subsets split from the same source dataset, DementiaBank (Becker et al., 1994). Instance-based DA focuses on reweighting instances based on their importance to the target domain task (Jiang and Zhai, 2007; Xia et al., 2014). It has not yet been studied for dementia detection. We build upon Masrani et al. (2017)'s promising findings by studying the effects of numerous feature-based and instance-based DA techniques across different dementia datasets with conversational and task-related speech samples.
|
| 40 |
+
|
| 41 |
+
# 3 Methodology
|
| 42 |
+
|
| 43 |
+
# 3.1 Task Definition
|
| 44 |
+
|
| 45 |
+
For the scope of the work presented here we abstract dementia detection to the following scenario. Given a dataset with instances $X$ and labels $Y$ from some domain $D$ , then our label space $\mathbf{y} = \{d, c\} \in Y$ is drawn from the binary distribution of classes (e.g., {probable Alzheimer's, control} or {with dementia, without dementia}) present in $D$ . We assign the class with an association most proximal to a dementia diagnosis (e.g., possible Alzheimer's or with dementia) to the dementia $(d)$ label, and the other class to the control $(c)$ label. Our goal is to predict $y_i \in Y$ for an unseen instance $x_i$ with feature representation $\mathbf{x}_i$ , which may be modified from the original representation according to the applied DA approach.
|
| 46 |
+
|
| 47 |
+
# 3.2 Data
|
| 48 |
+
|
| 49 |
+
We use three publicly available datasets and one privately-held dataset, representing separate domains, to study DA in this context. The publicly available datasets, DementiaBank, ADReSS, and the Carolinas Conversation Collection, are the most widely used datasets for dementia detection research in the NLP community. They are also the only datasets for which public access is available. Characteristics of these datasets are provided in Table 1. In Figure 1, we provide samples from two of these datasets, quoted directly from Chinaei et al. (2017) and Davis et al. (2017), to illustrate language differences between task-oriented and conversational domains. Our privately-held dataset is used only for conditions requiring multiple source domains, explained in detail in §3.3.
|
| 50 |
+
|
| 51 |
+
<table><tr><td>Dataset</td><td></td><td>#P</td><td>#T</td><td>L</td><td>SD</td></tr><tr><td rowspan="2">ADReSSd</td><td>tr</td><td>54</td><td>54</td><td>125.5</td><td>81.8</td></tr><tr><td>te</td><td>24</td><td>24</td><td>95.0</td><td>47.0</td></tr><tr><td rowspan="2">ADReSSc</td><td>tr</td><td>54</td><td>54</td><td>134.7</td><td>59.4</td></tr><tr><td>te</td><td>24</td><td>24</td><td>120.0</td><td>72.0</td></tr><tr><td>DBd</td><td></td><td>162</td><td>243</td><td>124.8</td><td>67.9</td></tr><tr><td>DBc</td><td></td><td>99</td><td>303</td><td>133.9</td><td>67.4</td></tr><tr><td>CCCd</td><td></td><td>46</td><td>97</td><td>1320.7</td><td>1059.1</td></tr><tr><td>CCCc</td><td></td><td>36</td><td>192</td><td>776.9</td><td>469.7</td></tr><tr><td>ADRCd</td><td></td><td>3</td><td>3</td><td>444.7</td><td>132.6</td></tr><tr><td>ADRCc</td><td></td><td>82</td><td>82</td><td>786.4</td><td>338.3</td></tr></table>
|
| 52 |
+
|
| 53 |
+
Table 1: Descriptive dataset characteristics. The subscripts $d$ and $c$ refer to dementia and control, respectively. Length (L) is provided as average number of words per transcript. DB and CCC have differing # Participants (P) and # Transcripts (T) because some participants in those datasets had multiple recorded interviews. ADReSS is subdivided into standardized (tr)ain and (te)st partitions established by the dataset's creators.
|
| 54 |
+
|
| 55 |
+
DementiaBank (DB). DB (Becker et al., 1994) is a publicly available compendium of audiorecordings of neuropsychological tests administered to healthy participants and patients with diagnosed dementia. It is the most widely used dementia detection dataset in the NLP community, and each audiorecording is paired with a manual transcription formatted using the CHAT transcription protocol (Macwhinney, 2009). We refer readers to Becker et al. (1994) for a detailed description of the dataset collection procedures and its overall composition.
|
| 56 |
+
|
| 57 |
+
The neuropsychological tests include a picture description task from the Boston Diagnostic Aphasia Examination (Goodglass and Kaplan, 1972), often referred to as the "Cookie Theft Picture Description Task." Participants are presented with a picture stimulus which depicts numerous events, central to which is a boy stealing a cookie from a jar. They are asked to describe everything they see occurring in the picture. The bulk of the dementia detection work conducted using DementiaBank has focused on the English-language interactions from this task. DB contains 169 subjects with probable Alzheimer's disease and 99 control subjects.
|
| 58 |
+
|
| 59 |
+
Alzheimer's Dementia Recognition through Spontaneous Speech (ADReSS). ADReSS (Luz et al., 2021) is a subset of DB created for a series of shared tasks on dementia detection. Control and
|
| 60 |
+
|
| 61 |
+
<table><tr><td colspan="2">DementiaBank</td></tr><tr><td>INV:</td><td>just tell me what happening in the picture ..</td></tr><tr><td>PAR:</td><td>the pearl [ :poor] [ *p:w] &mo moms gettin(g) her wet [/ ] feet wet (be)cause she thinking of days gone by and then the water run . [ + gram ] .</td></tr><tr><td>PAR:</td><td>( ) and &uh that boy whether he knows or not hes gonna [ : going to] crack his head on the back of that counter trying to get too many cookies out . .</td></tr></table>
|
| 62 |
+
|
| 63 |
+
<table><tr><td colspan="2">Carolinas Conversation Collection</td></tr><tr><td>INV:</td><td>was it just (overlap)</td></tr><tr><td>PAR:</td><td>um, my doctor was telling me all kind of little things, been so long, I forgot now. But, um, my nerves was bad.</td></tr><tr><td>INV:</td><td>Your nerves?</td></tr><tr><td>PAR:</td><td>Um hmm. And, um, I had a little heart failure. Um hmm. And, um, --- (long pause) that all, what else he tell me that was wrong? He say, "You got to stop," he just didn't tell me then, (overlap)</td></tr></table>
|
| 64 |
+
|
| 65 |
+
Figure 1: Characteristic language samples from DB (Chinaei et al., 2017) and CCC (Davis et al., 2017).
|
| 66 |
+
|
| 67 |
+
dementia subjects are matched in terms of age and gender, resulting in a balanced set of 156 samples (78 with dementia and 78 controls) split into training and test. The goal in developing ADReSS was to eliminate possible biases that may arise due to label and demographic imbalance in the original DB, at the expense of resulting in an ultimately smaller dataset. Its existence presents an interesting opportunity for comparison of balanced and unbalanced versions of the same source data. Since these datasets are drawn from the same source, we do not adapt DB to ADReSS or vice versa.
|
| 68 |
+
|
| 69 |
+
Carolinas Conversation Collection (CCC). CCC (Pope and Davis, 2011) is not derived from a neuropsychological task; instead, it focuses on English conversational speech. The dataset, collected by researchers studying language and healthcare across numerous institutions, contains 646 recorded interviews of 48 elderly cognitively normal individuals with non-dementia related conditions, and 284 individuals with dementia. Interview topics vary considerably. Members of the cohort without dementia have one interview with a young clinical professional and one with a demographically similar community peer, whereas members of the cohort with dementia have anywhere from 1-10 interviews with researchers and student visitors. The target focus of the conversational interviews is on eliciting autobiographical narrative pertaining to health and wellness. Although much less commonly used in the NLP community, it has recently been included a study that focus on the intersection
|
| 70 |
+
|
| 71 |
+
between interaction patterns and dementia status (Nasreen et al., 2021a), study regarding dementia-related linguistic anomalies in human language (Li et al., 2022), and so on. We used a transcribed subset of this corpus.
|
| 72 |
+
|
| 73 |
+
Alzheimer's Disease Research Center (ADRC). ADRC is a new, privately held dataset containing audiorecordings and matched transcriptions for a population of 85 elderly participants. Audiorecordings were collected during a structured narrative storytelling task, in which participants were asked to describe a memorable event from their young adulthood. Diagnoses were provided by trained psychiatrists. Audiorecordings were transcribed in a semi-automated manner, with an initial pass completed using the Vosk² speech recognition toolkit and a follow-up pass during which trained undergraduates manually corrected errors in the transcripts. Although not yet publicly available, plans are in place to release this dataset following guidelines created in concert with our psychiatric collaborators in an approved protocol from the Institutional Review Board at the University of California San Diego. We encourage interested parties to contact us for additional details.
|
| 74 |
+
|
| 75 |
+
# 3.3 Domain Adaptation
|
| 76 |
+
|
| 77 |
+
To answer our research questions defined in §1, we experimented with feature-based and instance-based DA algorithms. We focused on these techniques for two reasons. First, most dementia detection models to date are feature-based, owing in part to clinical interest in the characteristic language use by people with dementia. Second, the size of available dementia detection datasets (see Table 1) precludes the use of the same types of deep learning models that are common in many other NLP tasks. The prevalence of smaller scale, feature-based models suggests that these DA techniques hold greater immediate task relevancy.
|
| 78 |
+
|
| 79 |
+
AUGMENT. AUGMENT is a straightforward feature-based DA algorithm that has been shown to be effective on a wide range of datasets and tasks (Daumé III, 2007). It augments the feature space by making "source-only," "target-only," and "shared" copies of each feature, effectively tripling the feature set using the following formulation where $\phi^{\mathrm{s}},\phi^{\mathrm{t}}:X\to \check{X}$ represent mappings for
|
| 80 |
+
|
| 81 |
+
the source and target data, respectively:
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
\phi^ {\mathbf {s}} \left(\mathbf {x} _ {\mathbf {i}}\right) = \left\langle \mathbf {x} _ {\mathbf {i}}, \mathbf {0}, \mathbf {x} _ {\mathbf {i}} \right\rangle , \quad \phi^ {\mathbf {t}} \left(\mathbf {x} _ {\mathbf {i}}\right) = \left\langle \mathbf {0}, \mathbf {x} _ {\mathbf {i}}, \mathbf {x} _ {\mathbf {i}} \right\rangle \tag {1}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
In the formulation above, $\check{X} = \mathbb{R}^{3F}$ is then the augmented version of the feature space $X = \mathbb{R}^F$ . Empty vectors are filled with $\mathbf{0} = \langle 0,0,\dots,0\rangle \in \mathbb{R}^F$ . The motivation behind AUGMENT is intuitive. If a column contains a feature that correlates with the class label in both the target and source data, the learning algorithm will weight the shared column more heavily and reduce the weight on the target-only and source-only feature copies, reducing their importance to the model. However, if a feature correlates with the class label only with target (or source) data, the learning algorithm will increase the weight of the target-only (or source-only) column and reduce the weight of the others. The onus is thus left to the model to learn feature importance with respect to the domains.
|
| 88 |
+
|
| 89 |
+
MULTIAUGMENT. We extend AUGMENT to accommodate multiple source domains following guidelines sketched out by Daumé III (2007), and refer to the technique as MULTIAUGMENT. As in the two-domain case, we expand the feature space, but this time to $\mathbb{R}^{(K + 1)F}$ where $K$ is the total number of domains. The cardinality $(k + 1)F$ represents a distinct feature set $F$ for each domain $k_{i}\in K$ , plus the same shared feature space introduced previously. For our specific case we test this method with two source domains, creating the following mappings to transform from $\mathbb{R}^F$ to $\mathbb{R}^{4F}$ :
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\begin{array}{l} \phi^ {\mathbf {s} _ {1}} \left(\mathbf {x} _ {\mathrm {i}}\right) = \langle \mathbf {x} _ {\mathrm {i}}, \mathbf {0}, \mathbf {0}, \mathbf {x} _ {\mathrm {i}} \rangle , \\ \phi^ {\mathbf {s} _ {2}} \left(\mathbf {x} _ {\mathrm {i}}\right) = \langle \mathbf {0}, \mathbf {x} _ {\mathrm {i}}, \mathbf {0}, \mathbf {x} _ {\mathrm {i}} \rangle \tag {2} \\ \phi^ {\mathbf {t}} (\mathbf {x} _ {\mathbf {i}}) = \langle \mathbf {0}, \mathbf {0}, \mathbf {x} _ {\mathbf {i}}, \mathbf {x} _ {\mathbf {i}} \rangle \\ \end{array}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
TRADABOOST. TRADABOOST is a supervised instance-based DA method (Dai et al., 2007) that extends the AdaBoost classification algorithm (Freund and Schapire, 1997) for transfer learning. The method is based on a "reverse boosting" principle, where the weights of poorly predictive source instances are decreased at each boosting iteration and the weights of target instances are simultaneously increased. The guiding intuition is that instances with large weights (including source instances that are more distributionally similar to the target domain instances) can then play a greater role in training the learning algorithm. We used the TRADABOOST implementation in Python's adapt
|
| 96 |
+
|
| 97 |
+
<table><tr><td>Group</td><td># Features</td><td>Category</td></tr><tr><td>POS</td><td>12</td><td>l</td></tr><tr><td>CFG</td><td>12</td><td>l</td></tr><tr><td>Syntac. Complexity</td><td>16</td><td>l</td></tr><tr><td>NER</td><td>10</td><td>l</td></tr><tr><td>Vocab. Richness</td><td>6</td><td>l</td></tr><tr><td>SUBTL</td><td>1</td><td>l</td></tr><tr><td>Semantic</td><td>5</td><td>s</td></tr><tr><td>Acoustic</td><td>25</td><td>a</td></tr></table>
|
| 98 |
+
|
| 99 |
+
Table 2: Descriptive feature statistics. Category refers to the high-level categorization applied to features when performing experiments: $l$ , $s$ , and $a$ are lexicosyntactic, semantic, and acoustic features, respectively.
|
| 100 |
+
|
| 101 |
+
package<sup>3</sup> to implement this technique.
|
| 102 |
+
|
| 103 |
+
# 3.4 Features
|
| 104 |
+
|
| 105 |
+
We experimented with lexicosyntactic, semantic, and acoustic features, summarized below. All features are calculated using the participant's utterances or speech segments. Descriptive statistics indicating the number of features belonging to each group, as well as the group's high-level categorization (used when labeling experimental conditions), are presented in Table 2.
|
| 106 |
+
|
| 107 |
+
Part-Of-Speech (POS) Tags. POS tags have proven useful for detecting dementia (Masrani, 2018), as well as primary progressive aphasia and two of its subtypes (Balagopalan et al., 2020b). We use the $\mathsf{spaCy}^4$ core English POS tagger to capture the frequency of coarse-grained POS labels in a transcript using the Universal Dependencies tagset (Petrov et al., 2012). Frequency counts are normalized by the number of words in the transcript.
|
| 108 |
+
|
| 109 |
+
CFG Features. Context-Free Grammar (CFG) features count how often a phrase structure rule (e.g., $NP \rightarrow VPPP$ or $NP \rightarrow DTNP$ ) occurs in an utterance parse tree. These feature counts are then normalised by the total number of nodes in the parse tree. CFG features have previously demonstrated success for dementia detection (Masrani, 2018; Masrani et al., 2017). We extract parse trees using the Stanford parser (Qi et al., 2018), representing constituents using Penn Treebank constituent tags (Marcus et al., 1993).
|
| 110 |
+
|
| 111 |
+
Syntactic Complexity. Measures of syntactic complexity have proven effective for predicting dementia from speech (Mastrani, 2018). We represent utterance complexity through a suite of features including parse tree depth, mean word length, mean sentence length, mean clause (noun or verb phrase) length, and number of clauses per sentence.
|
| 112 |
+
|
| 113 |
+
Named Entity Recognition (NER) Tags. Although NER features have not been studied in prior work, we suspected that they may be a useful and relatively domain-agnostic way to encode broad structural patterns, following the previous success of other more general intent-based features (Farzana and Parde, 2022). We extracted named entity labels using a spaCy5 model trained on the OntoNotes 5 corpus. This model produces the fine-grained named entity types present in the OntoNotes tagset (Pradhan et al., 2007). We included a frequency feature for each NER type. NER frequency counts were normalized by the total number of entities mentioned in the transcript.
|
| 114 |
+
|
| 115 |
+
Vocabulary Richness Features. Existing research has shown that measures of vocabulary richness can be successfully leveraged to diagnose dementia (Masrani et al., 2017; Balagopalan et al., 2020a). We include a set of well-known lexical richness measures including type-token ratio (TTR), moving-average TTR (MATTR), mean segmental TTR (MSTTR), Maas index (Mass, 1972), the measure of textual lexical diversity (McCarthy, 2005, MTLD), and the hypergeometric distribution index (McCarthy and Jarvis, 2007, HD-D). We calculated each measure over the entire transcript using Python's lexical richness package. $^6$
|
| 116 |
+
|
| 117 |
+
SUBTL Scores. SUBTL scores represent the frequency with which words are used in daily life (Brysbaert and New, 2009). They are derived from large corpora<sup>7</sup> of television and film subtitles spanning 50 million words. We treated tokens with the Penn Treebank POS tags PRP, PRP$, WP, and EX as stopwords and computed transcript-level SUBTL scores by averaging across all available word-level scores for the participant's speech.
|
| 118 |
+
|
| 119 |
+
Semantic Features. We measure semantic similarity between consecutive utterances by calculating the cosine similarity between the utterance
|
| 120 |
+
|
| 121 |
+
vectors and then recording the proportion of distances below three thresholds (0, 0.3, 0.5). We used averaged TF-IDF vectors to represent each utterance. We also recorded the minimum and average cosine distance between utterances.
|
| 122 |
+
|
| 123 |
+
Acoustic Features. Finally, prior work has found acoustic distinctions between subjects with and without dementia (Mastrani et al., 2017). We chunked the participant's speech segments from each audiorecording using Pydub prior to extracting acoustic features. We include prosody features (Dehak et al., 2007; Vásquez-Correa et al., 2018) from continuous speech based on duration (i.e., number of voiced segments per second and standard deviation of duration of unvoiced segments), extracted using the DiSVoice tool.
|
| 124 |
+
|
| 125 |
+
# 4 Evaluation
|
| 126 |
+
|
| 127 |
+
# 4.1 Classification Settings
|
| 128 |
+
|
| 129 |
+
For our backbone classifier, we experimented $^{10}$ with support vector machine (SVM) and logistic regression (LR), implemented using sklearn. $^{11}$ For SVM, we used a polynomial kernel and held all other hyperparameters at their default settings except for the trade-off parameter $C$ . For LR, we also held all hyperparameters at their default settings. We selected LR and SVM due to their documented success at dementia detection using one or more of our datasets (Farzana and Parde, 2020; Masrani et al., 2017). We tuned our models using $K$ -fold stratified cross-validation on the training set, using the following values for the trade-off parameter $C$ : $\{0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 0.5, 1\}$ .
|
| 130 |
+
|
| 131 |
+
We report the result for the parameter achieving the best performance, averaged across all five folds. We used stratified cross-validation to produce the results reported in all results tables. We maintained the same ratio between the target classes in all folds and in the full dataset, and shuffled samples for cross-validation such that all samples from the same participant remained in the same fold. This was done to prevent overfitting due to data leakage stemming from the same participant being present in multiple folds.
|
| 132 |
+
|
| 133 |
+
# 4.2 Experimental Conditions
|
| 134 |
+
|
| 135 |
+
We compared each DA technique against three baseline models: a model jointly trained using samples from both the source and target data without applying any DA algorithms (JOINT), a model trained only on the target data (TARGET), and a model trained only on the source data (SOURCE). The training dataset(s) for our DA conditions varied depending on the technique being tested. AUGMENT and TRADABOOST were trained on data from a single source domain and the target domain, whereas MULTIAUGMENT was trained on data from two source domains and the target domain. All models, including the DA algorithms tested and our baseline models, were evaluated using the target domain test set.
|
| 136 |
+
|
| 137 |
+
We considered the following source $\rightarrow$ target adaptations: CCC $\rightarrow$ DB, DB $\rightarrow$ CCC, CCC $\rightarrow$ ADReSS, {ADRC, CCC} $\rightarrow$ DB, {ADRC, DB} $\rightarrow$ CCC, and {ADRC, CCC} $\rightarrow$ ADReSS. For each DA technique, we also considered several combinations of feature subsets (refer to Table 2 for categorizations): $l$ , $l + s$ , and $l + s + a$ . MULTIAUGMENT only used $l$ and $l + s$ since ADRC does not provide speaker segmentation timestamps; thus, speech could not be extracted in the same way as other datasets, preventing use of acoustic features.
|
| 138 |
+
|
| 139 |
+
# 4.3 Results
|
| 140 |
+
|
| 141 |
+
We compared the conditions specified in $\S 4.2$ using accuracy and $\mathrm{F_1}$ , and report our experimental results in Tables 3, 4, and 5. Results are subdivided according to target domain, presenting results from conditions using DB, CCC, and ADRSS as the target domains, respectively.
|
| 142 |
+
|
| 143 |
+
We find that MULTIAUGMENT clearly outperforms the baseline techniques in most cases and usually outperforms the single-source DA algorithms when DB and ADReSS are the target domains, although the best-performing feature subsets vary. This trend is less clear when CCC is the target domain, with AUGMENT approaching or exceeding the performance of MULTIAUGMENT. When task-oriented data (DB or ADReSS) was used as the target, we observed that the percentage of source data in the training set was lower than that in the target data. As a result, we suspect that adding more conversational data (such as that found in ADRC) to the source (CCC) may promote improved performance when adapting to task-oriented target domains.
|
| 144 |
+
|
| 145 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">LR</td><td colspan="2">SVM</td></tr><tr><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td></tr><tr><td>\(SOURCE_{L+S}\)</td><td>0.45(0.01)</td><td>0.02(0.04)</td><td>0.45(0.01)</td><td>0.01(0.01)</td></tr><tr><td>\(TARGET_{L+S}\)</td><td>0.74(0.05)</td><td>0.77(0.05)</td><td>0.67(0.06)</td><td>0.61(0.11)</td></tr><tr><td>\(JOINT_{L+S}\)</td><td>0.72(0.06)</td><td>0.74(0.06)</td><td>0.66(0.04)</td><td>0.64(0.06)</td></tr><tr><td>\(AUGMENT_L\)</td><td>0.72(0.04)</td><td>0.75▼(0.04)</td><td>0.70(0.04)</td><td>0.76▲(0.04)</td></tr><tr><td>\(AUGMENT_{L+S}\)</td><td>0.73(0.06)</td><td>0.75(0.05)</td><td>0.70(0.05)</td><td>0.76▲(0.04)</td></tr><tr><td>\(AUGMENT_{L+S+A}\)</td><td>0.72(0.05)</td><td>0.74(0.05)</td><td>0.69(0.05)</td><td>0.74▲(0.05)</td></tr><tr><td>\(TRADABOOST_L\)</td><td>0.66(0.05)</td><td>0.68(0.05)</td><td>0.66(0.06)</td><td>0.68(0.08)</td></tr><tr><td>\(TRADABOOST_{L+S}\)</td><td>0.60▼(0.05)</td><td>0.64▼(0.04)</td><td>0.63(0.06)</td><td>0.64(0.08)</td></tr><tr><td>\(TRADABOOST_{L+S+A}\)</td><td>0.55▼(0.05)</td><td>0.55▼(0.07)</td><td>0.65(0.05)</td><td>0.68(0.06)</td></tr><tr><td>\(MULTIAUGMENT_L\)</td><td>0.72▼(0.06)</td><td>0.75▼(0.06)</td><td>0.70(0.05)</td><td>0.77▲(0.05)</td></tr><tr><td>\(MULTIAUGMENT_{L+S}\)</td><td>0.75(0.05)</td><td>0.76▼(0.05)</td><td>0.70(0.05)</td><td>0.77▲(0.05)</td></tr></table>
|
| 146 |
+
|
| 147 |
+
Both AUGMENT and MULTIAUGMENT outperform TRADABOOST, regardless of feature combination, across the board. We achieve maximum performance of $\mathrm{F_1 = 0.77}$ on DB (using MULTIAUGMENTL+S with SVM), $\mathrm{F_1 = 0.75}$ on CCC (using MULTIAUGMENTL with SVM), and $\mathrm{F_1 = 0.77}$ on ADRSS (using AUGMENTL+S, MULTIAUGMENTL, and MULTIAUGMENTL+S with SVM). In Table 6, we report additional results from our highest-performing versions of each DA technique on the ADRSS test set (Luz et al., 2020). This facilitates straightforward comparison with external models by others who use this standardized test set. We find that $\mathrm{AUGMENT}_{\mathrm{L + S}}$ achieves similar results to those in Table 5.
|
| 148 |
+
|
| 149 |
+
Table 3: Comparison of DA conditions when CCC is the source dataset and DB is the target dataset (standard deviation is reported inside parentheses). For MULTIAUGMENT conditions, ADRC and CCC are jointly used as the source dataset. Five-fold cross-validation is used in all cases with each fold having $40\%$ source data $(46.3\% \text{class} d)$ and $60\%$ target data $(55.5\% \text{class} d)$ . MULTIAUGMENT has $46.3\%$ source data $(26.70\% \text{class} d)$ and $(53.70\%)$ target data $(55.2\% \text{class} d)$ . ▲: Significantly better than the corresponding LR or SVM TARGET baseline, with $p < 0.05$ , using a paired $t$ -test. ▼: Significantly worse than the corresponding TARGET baseline, using the same parameters.
|
| 150 |
+
|
| 151 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">LR</td><td colspan="2">SVM</td></tr><tr><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td></tr><tr><td>\(SOURCE_{L+S}\)</td><td>0.39(0.07)</td><td>0.51(0.05)</td><td>0.36(0.02)</td><td>0.51(0.02)</td></tr><tr><td>\(TARGET_{L+S}\)</td><td>0.83(0.07)</td><td>0.72(0.15)</td><td>0.80(0.05)</td><td>0.63(0.12)</td></tr><tr><td>\(JOINT_{L+S}\)</td><td>0.80(0.10)</td><td>0.66(0.18)</td><td>0.84(0.08)</td><td>0.73(0.16)</td></tr><tr><td>\(AUGMENT_{L}\)</td><td>0.85(0.07)</td><td>0.75(0.15)</td><td>0.85(0.06)</td><td>0.74(0.15)</td></tr><tr><td>\(AUGMENT_{L+S}\)</td><td>0.84(0.07)</td><td>0.73(0.15)</td><td>0.84(0.07)</td><td>0.73(0.15)</td></tr><tr><td>\(AUGMENT_{L+S+A}\)</td><td>0.80(0.06)</td><td>0.68(0.15)</td><td>0.80(0.05)</td><td>0.68(0.11)</td></tr><tr><td>\(TRADABOOST_{L}\)</td><td>0.80(0.05)</td><td>0.69(0.13)</td><td>0.84(0.07)</td><td>0.74(0.17)</td></tr><tr><td>\(TRADABOOST_{L+S}\)</td><td>0.79(0.05)</td><td>0.66(0.14)</td><td>0.84(0.06)</td><td>0.73(0.15)</td></tr><tr><td>\(TRADABOOST_{L+S+A}\)</td><td>0.78(0.06)</td><td>0.65(0.14)</td><td>0.83(0.08)</td><td>0.71(0.17)</td></tr><tr><td>\(MULTIAUGMENT_{L}\)</td><td>0.85(0.07)</td><td>0.74(0.15)</td><td>0.86▲(0.07)</td><td>0.75▲(0.16)</td></tr><tr><td>\(MULTIAUGMENT_{L+S}\)</td><td>0.84(0.07)</td><td>0.73(0.15)</td><td>0.85▲(0.07)</td><td>0.74▲(0.15)</td></tr></table>
|
| 152 |
+
|
| 153 |
+
Table 4: Comparison of DA conditions when DB is the source dataset and CCC is the target dataset (standard deviation is reported inside parentheses). For MULTI-AUGMENT conditions, ADRC and DB are jointly used as the source dataset. Five-fold cross-validation is used in all cases with each fold having $70.3\%$ source data $(55.5\% \text{class} d)$ and $29.7\%$ target data $(33.6\% \text{class} d)$ . MULTI-AUGMENT has $73.1\%$ source data $(48.2\% \text{class} d)$ and $56.9\%$ target data $(33.6\% \text{class} d)$ . ▲: Significantly better than the corresponding LR or SVM TARGET baseline, with $p < 0.05$ , using a paired $t$ -test. ▼: Significantly worse than the corresponding TARGET baseline, using the same parameters.
|
| 154 |
+
|
| 155 |
+
# 5 Analysis
|
| 156 |
+
|
| 157 |
+
The results in Tables 3-6 clearly answer our first research question (Q1), demonstrating that DA can be used to exploit spoken language data pertaining to dementia detection in one domain to improve its detection in other domains. They also answer Q2, showing that DA offers performance improvements over jointly training on data from multiple domains. To answer Q3, we performed additional analyses to probe the contributions of feature subsets and class bias to overall performance.
|
| 158 |
+
|
| 159 |
+
# 5.1 Feature Analysis
|
| 160 |
+
|
| 161 |
+
To find correspondences between source and target domain features, we analyzed the features in the
|
| 162 |
+
|
| 163 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">LR</td><td colspan="2">SVM</td></tr><tr><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td></tr><tr><td>\(SOURCE_{L+S}\)</td><td>0.52(0.04)</td><td>0.06(0.11)</td><td>0.510.04</td><td>0.03(0.09)</td></tr><tr><td>\(TARGET_{L+S}\)</td><td>0.80(0.13)</td><td>0.75(0.22)</td><td>0.68(0.12)</td><td>0.54(0.25)</td></tr><tr><td>\(JOINT_{L+S}\)</td><td>0.69(0.16)</td><td>0.64(0.24)</td><td>0.59(0.14)</td><td>0.47(0.25)</td></tr><tr><td>\(AUGMENT_L\)</td><td>0.77(0.12)</td><td>0.72(0.20)</td><td>0.78▲(0.10)</td><td>0.74▲(0.17)</td></tr><tr><td>\(AUGMENT_{L+S}\)</td><td>0.74▼(0.10)</td><td>0.68▼(0.21)</td><td>0.81▲(0.07)</td><td>0.77▲(0.15)</td></tr><tr><td>\(AUGMENT_{L+S+A}\)</td><td>0.75(0.06)</td><td>0.69(0.14)</td><td>0.80▲(0.15)</td><td>0.76▲(0.22)</td></tr><tr><td>\(TRADABOOST_L\)</td><td>0.72▼(0.14)</td><td>0.67▼(0.21)</td><td>0.77(0.13)</td><td>0.71▲(0.21)</td></tr><tr><td>\(TRADABOOST_{L+S}\)</td><td>0.76(0.13)</td><td>0.70(0.21)</td><td>0.76(0.13)</td><td>0.70▲(0.21)</td></tr><tr><td>\(TRADABOOST_{L+S+A}\)</td><td>0.76(0.12)</td><td>0.73(0.17)</td><td>0.70(0.10)</td><td>0.62(0.19)</td></tr><tr><td>\(MULTIAUGMENT_L\)</td><td>0.80(0.13)</td><td>0.75(0.22)</td><td>0.80▲(0.13)</td><td>0.77▲(0.20)</td></tr><tr><td>\(MULTIAUGMENT_{L+S}\)</td><td>0.75(0.14)</td><td>0.67(0.29)</td><td>0.81▲(0.14)</td><td>0.77▲(0.21)</td></tr></table>
|
| 164 |
+
|
| 165 |
+
shared column from AUGMENT $_{\mathrm{L} + \mathrm{S} + \mathrm{A}}$ using LR and a DB $\rightarrow$ CCC domain adaptation mapping. We referred to these as pivot features. We computed the most important pivot features across source and target domain using l1-penalty with logistic regression.
|
| 166 |
+
|
| 167 |
+
We find that a subset of specific lexicosyntactic and acoustic pivot features, including the number of tokens, average phrase length, and standard deviation of the duration of unvoiced segments are highly positively correlated with the class labels in both the source and target domains. In contrast, the number of unique named entities, certain vocabulary richness and lexical frequency measures (MATTR and SUBTL score), and the number of voiced segments per second are highly negatively
|
| 168 |
+
|
| 169 |
+
Table 5: Comparison of DA conditions when CCC is the source dataset and ADReSS train is the target dataset (standard deviation is reported inside parentheses). For MULTIAUGMENT conditions, ADRC and CCC are jointly used as the source dataset. Ten-fold cross-validation is used in all cases with each fold having $74.8\%$ source data $(33.6\%$ class $d)$ and $25.2\%$ target data $(50\%$ class $d)$ . MULTIAUGMENT has $79.4\%$ source data $(26.70\%$ class $d)$ and $20.6\%$ target data $(50\%$ class $d)$ . $\triangle$ : Significantly better than the corresponding LR or SVM TARGET baseline, with $p < 0.05$ , using a paired $t$ -test. $\nabla$ : Significantly worse than corresponding TARGET baseline, using the same parameters.
|
| 170 |
+
|
| 171 |
+
<table><tr><td rowspan="2">Model</td><td rowspan="2">C</td><td colspan="2">LR</td><td colspan="2">SVM</td></tr><tr><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td></tr><tr><td rowspan="2">SOURCEL+S</td><td>d</td><td rowspan="2">0.51</td><td>0.00</td><td rowspan="2">0.51</td><td>0.00</td></tr><tr><td>c</td><td>0.68</td><td>0.68</td></tr><tr><td rowspan="2">TARGETL+S</td><td>d</td><td rowspan="2">0.72</td><td>0.65</td><td rowspan="2">0.70</td><td>0.53</td></tr><tr><td>c</td><td>0.76</td><td>0.77</td></tr><tr><td rowspan="2">JOINTL+S</td><td>d</td><td rowspan="2">0.68</td><td>0.62</td><td rowspan="2">0.72</td><td>0.70</td></tr><tr><td>c</td><td>0.73</td><td>0.75</td></tr><tr><td rowspan="2">AUGMENTL+S</td><td>d</td><td rowspan="2">0.77</td><td>0.70</td><td rowspan="2">0.77</td><td>0.72</td></tr><tr><td>c</td><td>0.81</td><td>0.80</td></tr><tr><td rowspan="2">MULTIAUGMENTL+S</td><td>d</td><td rowspan="2">0.74</td><td>0.68</td><td rowspan="2">0.74</td><td>0.68</td></tr><tr><td>c</td><td>0.79</td><td>0.79</td></tr><tr><td rowspan="2">TRADABOOSTL+S</td><td>d</td><td rowspan="2">0.74</td><td>0.70</td><td rowspan="2">0.72</td><td>0.67</td></tr><tr><td>c</td><td>0.78</td><td>0.76</td></tr></table>
|
| 172 |
+
|
| 173 |
+
Table 6: Evaluation on the standardized ADReSS test set with per-class (C) $\mathrm{F}_{1}$ . CCC and ADReSS (train) are used as source and target data, respectively, when training with $46.1\%$ source data (no class $d$ ) and $54.9\%$ target data (55.5% class $d$ ). For MULTIAUGMENT, both CCC and ADRC are used as source (77.6% training data with $26.7\%$ class $d$ ), with $32.4\%$ target data (50% class $d$ ). We assessed statistical significance using McNemar's test, and found that no improvements were significantly different from the TARGET baseline.
|
| 174 |
+
|
| 175 |
+
correlated with the class labels of both the source and target domains. Thus, these features offer particularly strong contributions to model performance across multiple domains.
|
| 176 |
+
|
| 177 |
+
# 5.2 Domain-Specific Class Bias
|
| 178 |
+
|
| 179 |
+
As shown in Table 1, our domains vary in their class balance. Class imbalances are especially common in low-resource healthcare tasks since it is often challenging to recruit subjects with the target condition. When the source and target domains have varying class distribution, they are biased towards different class labels. This can create conditions such that the learning algorithm is able to capitalize upon class bias rather than real properties of the data to increase perceived performance. For instance, when adapting from $\mathrm{CCC} \rightarrow \mathrm{DB}$ with the source dataset (CCC) having $33.6\%$ instances belonging to class $d$ and the target dataset (DB) having $55.5\%$ instances belonging to class $d$ , it is possible that the model trivially learns to predict class $d$ with greater frequency, without learning real feature distinctions between the classes.
|
| 180 |
+
|
| 181 |
+
To investigate whether the improvements observed from DA in our case may simply be the product of domain-specific class biases, we con
|
| 182 |
+
|
| 183 |
+
<table><tr><td>Domain</td><td>Class</td><td>cb1</td><td>cb2</td><td>cb3</td><td>cb4</td></tr><tr><td rowspan="2">CCC</td><td>d</td><td>72</td><td>57</td><td>42</td><td>28</td></tr><tr><td>c</td><td>28</td><td>43</td><td>58</td><td>72</td></tr><tr><td rowspan="2">DB</td><td>d</td><td>72</td><td>57</td><td>42</td><td>28</td></tr><tr><td>c</td><td>28</td><td>43</td><td>58</td><td>72</td></tr></table>
|
| 184 |
+
|
| 185 |
+
Table 7: Distribution of instances across domains (CCC=source; DB=target) and classes within training folds for four different class biases.
|
| 186 |
+
|
| 187 |
+
<table><tr><td rowspan="2">Condition</td><td rowspan="2">Model</td><td colspan="2">LR</td><td colspan="2">SVM</td></tr><tr><td>Acc.</td><td>F1</td><td>F1</td><td>Acc.</td></tr><tr><td rowspan="4">Equal</td><td rowspan="2">JOINTL+S</td><td>0.62</td><td>0.59</td><td>0.58</td><td>0.37</td></tr><tr><td>(0.09)</td><td>(0.14)</td><td>(0.08)</td><td>(0.20)</td></tr><tr><td rowspan="2">AUGMENTL+S</td><td>0.64</td><td>0.63</td><td>0.65</td><td>0.64▲</td></tr><tr><td>(0.08)</td><td>(0.09)</td><td>(0.09)</td><td>(0.09)</td></tr><tr><td rowspan="4">Consistent</td><td rowspan="2">JOINTL+S</td><td>0.65</td><td>0.59</td><td>0.60</td><td>0.41</td></tr><tr><td>(0.08)</td><td>(0.18)</td><td>(0.08)</td><td>(0.20)</td></tr><tr><td rowspan="2">AUGMENTL+S</td><td>0.67</td><td>0.64</td><td>0.65</td><td>0.63</td></tr><tr><td>(0.05)</td><td>(0.13)</td><td>(0.08)</td><td>(0.03)</td></tr></table>
|
| 188 |
+
|
| 189 |
+
Table 8: Domain-specific class bias results (standard deviation is reported inside parentheses). $\triangle$ : Significantly better than the corresponding LR or SVM JOINT baseline, with $p < 0.05$ , using a paired $t$ -test.
|
| 190 |
+
|
| 191 |
+
duced an experiment analyzing performance of AUGMENT $_{\mathrm{L + S}}$ (our best-performing model in terms of accuracy for the CC $\rightarrow$ DB mapping, shown in Table 3) and JOINT $_{\mathrm{L + S}}$ across class-biased and unbiased subsets of the original dataset. In our equal condition, both domains had perfectly class-balanced data in each training fold. In our consistent class bias condition, training folds had the varying class biases shown in Table 7. Each class bias setting was evaluated using five-fold cross-validation, and then those results were averaged. We report the results from this experiment in Table 8. We find that AUGMENT still outperforms JOINT in both conditions, answering the second part of Q3 by empirically demonstrating that class bias does not account for the performance improvements resulting from domain adaptation.
|
| 192 |
+
|
| 193 |
+
# 6 Discussion and Conclusions
|
| 194 |
+
|
| 195 |
+
Our work reveals intriguing findings on the use of DA for dementia detection. First, we find that DA can be successfully leveraged to improve feature-based dementia detection performance. This is the most comprehensive study of feature-based DA for this task, and the first to consider instance-based DA. We find that feature-based DA outperforms instance-based DA, and that an approach allowing
|
| 196 |
+
|
| 197 |
+
for multiple source domains (MULTIAUGMENT) holds promise in many cases. In general, $\mathrm{F}_1$ score is similar across target datasets, ranging from 0.76 (CCC) to 0.77 (DB and ADReSS).
|
| 198 |
+
|
| 199 |
+
Our DA conditions also exhibit clear performance improvements over jointly training on the same data, offering further evidence to support the use of DA for this task. Finally, in follow-up studies on the importance of individual features and class biases in this setting, we find that pivot features pertaining to number of tokens, average phrase length, acoustic qualities, named entities, and measures of vocabulary richness and lexical frequency are particularly critical to strong performance. This suggests that these features may be particularly robust across domains. We also demonstrate that the performance of DA conditions relative to joint training is not due to domain-specific class bias, further strengthening our conclusions. In the future, we hope to conduct follow-up studies to further probe the limits and nuances of DA applied to this and other low-resource healthcare tasks.
|
| 200 |
+
|
| 201 |
+
# 7 Limitations
|
| 202 |
+
|
| 203 |
+
Our work is limited by several factors. First, we conduct our work primarily using popular, publicly available dementia detection datasets, all of which are in English. Thus, it is unclear whether our findings generalize to other languages, especially with richer morphology where different predictive patterns may emerge. Second, due to the emphasis on feature-based models in most dementia detection work, we study only feature-based and instance-based DA approaches. Neural DA approaches may yield different findings, although they are less relevant for many current dementia detection approaches. Finally, we only study two backbone classification algorithms in our experiments. These classifiers are among the most common in prior work with our selected datasets; however, it may be the case that with a wider scope, other classification algorithms may yield different results. Collectively, these limitations present intriguing avenues for follow-up work.
|
| 204 |
+
|
| 205 |
+
# 8 Ethical Considerations
|
| 206 |
+
|
| 207 |
+
This research was guided by a broad range of ethical considerations, taking into account factors associated with fairness, privacy, and intended use. Although many of these are described throughout the paper, we summarize those that we consider
|
| 208 |
+
|
| 209 |
+
most critical in this section. It is our hope that by building a holistic understanding of these factors, we develop improved perspective of the challenges associated with the study of low-resource healthcare problems and the positive broader impacts that they may create.
|
| 210 |
+
|
| 211 |
+
Data Privacy and Fairness. This research was approved by the Institutional Review Board at the University of Illinois Chicago. Access was granted for all datasets used in this research, and our use is governed by approved protocols unique to each dataset. DementiaBank, ADReSS, and the Carolina Conversations Collection are all publicly available following access request protocols specified by their governing organizations. We refer readers to the citations throughout this work if they are interested in obtaining access to this data. We are unable to share it directly, although we can share our processing scripts and other code to facilitate reproducibility of our work by others.
|
| 212 |
+
|
| 213 |
+
ADRC is a privately-held dataset collected in collaboration with clinical partners under a rigorous set of guidelines governed by a separate, approved Institutional Review Board protocol at the University of California San Diego. This dataset will eventually be released, following further manual review to ensure full de-identification, but it cannot yet be released at this time. The data is currently stored on a password-protected server under VPN protection. To maximize reproducibility of our work by others unable to immediately gain access to this dataset, we limit the use of this dataset to a small set of experimental conditions (specifically, those using MULTIAUGMENT).
|
| 214 |
+
|
| 215 |
+
Intended Use. Automated models for dementia detection from spoken language present potential benefits in real-world scenarios: they offer opportunity to expand healthcare access, minimize cost of care, and reduce caregiver burden. However, they may also pose risks if used in unintended ways. We consider intended use of the work reported here to extend to the following:
|
| 216 |
+
|
| 217 |
+
- People may use the technology developed in this work to study language differences between individuals with and without dementia, as a way of building further understanding of the condition.
|
| 218 |
+
- People may use the technology developed in this work to further their own research into
|
| 219 |
+
|
| 220 |
+
low-resource NLP tasks, including those associated with this and other healthcare problems.
|
| 221 |
+
|
| 222 |
+
- People may use the technology developed in this work to build early warning systems to flag individuals about potential dementia symptoms, provided that the technology is not misconstrued as an alternative to human care in any way.
|
| 223 |
+
|
| 224 |
+
Any use outside of those listed above is considered an unintended use. To safeguard against unintended use of our work, we remind readers that dataset access must be granted through the approved channels by the creators of the respective datasets used in this work. This may include processes ranging from email request to full review and approval by local and external Institutional Review Boards. We reiterate our caution against using any findings from this paper to build systems that function as intended or perceived replacements for human medical care.
|
| 225 |
+
|
| 226 |
+
# Acknowledgements
|
| 227 |
+
|
| 228 |
+
We thank the anonymous reviewers for their helpful feedback, which was incorporated in the final version of this manuscript. We also thank Erin Sundermann for her and her team's role in creating the ADRC dataset, and Raeanne Moore, Alex Leow, and Tamar Gollan for their clinical insights regarding Alzheimer's disease and dementia. The creation of the ADRC dataset was funded in part by a seed grant from the University of California San Diego's Alzheimer's Disease Research Center. Shahla Farzana and Natalie Parde were also partially funded by the National Science Foundation under Grant No. 2125411. Any opinions, findings, and conclusions or recommendations are those of the authors and do not necessarily reflect the views of the National Science Foundation.
|
| 229 |
+
|
| 230 |
+
# References
|
| 231 |
+
|
| 232 |
+
Samad Amini, Boran Hao, Lifu Zhang, Mengting Song, Aman Gupta, Cody Karjadi, Vijaya B. Kolachalama, Rhoda Au, and Ioannis Ch. Paschalidis. 2022. Automated detection of mild cognitive impairment and dementia from voice recordings: A natural language processing approach. *Alzheimer's & Dementia*, n/a(n/a).
|
| 233 |
+
|
| 234 |
+
Aparna Balagopalan, Benjamin Eyre, Frank Rudzicz, and Jekaterina Novikova. 2020a. To BERT or not to BERT: Comparing Speech and Language-Based
|
| 235 |
+
|
| 236 |
+
Approaches for Alzheimer's Disease Detection. In Proc. Interspeech 2020, pages 2167-2171.
|
| 237 |
+
Aparna Balagopalan, Jekaterina Novikova, Matthew B A Mcdermott, Bret Nestor, Tristan Naumann, and Marzyeh Ghassemi. 2020b. Cross-Language Aphasia Detection using Optimal Transport Domain Adaptation. In Proceedings of the Machine Learning for Health NeurIPS Workshop, volume 116 of Proceedings of Machine Learning Research, pages 202-219. PMLR.
|
| 238 |
+
James T Becker, François Boiler, Oscar L Lopez, Judith Saxton, and Karen L McGonigle. 1994. The natural history of alzheimer's disease: Description of study cohort and accuracy of diagnosis. Archives of Neurology.
|
| 239 |
+
Marc Brysbaert and Boris New. 2009. Moving beyond kucera and francis: A critical evaluation of current word frequency norms and the introduction of a new and improved word frequency measure for american english. Behavior research methods, 41:977-90.
|
| 240 |
+
Hamidreza Chinaei, Leila Chan Currie, Andrew Danks, Hubert Lin, Tejas Mehta, and Frank Rudzicz. 2017. Identifying and avoiding confusion in dialogue with people with Alzheimer's disease. Computational Linguistics, 43(2):377-406.
|
| 241 |
+
Wenyuan Dai, Qiang Yang, Gui-Rong Xue, and Yong Yu. 2007. Boosting for transfer learning. In Proceedings of the 24th International Conference on Machine Learning, ICML '07, page 193-200, New York, NY, USA. Association for Computing Machinery.
|
| 242 |
+
Hal Daumé III. 2007. Frustratingly easy domain adaptation. In Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics, pages 256-263, Prague, Czech Republic. Association for Computational Linguistics.
|
| 243 |
+
BH Davis, C Pope, K Van Ravenstein, and W Dou. 2017. Three approaches to understanding verbal cues from older adults with diabetes. The Internet Journal of Advanced Nursing Practice, 16(1).
|
| 244 |
+
Najim Dehak, Pierre Dumouchel, and Patrick Kenny. 2007. Modeling prosodic features with joint factor analysis for speaker verification. IEEE Transactions on Audio, Speech, and Language Processing, 15(7):2095-2103.
|
| 245 |
+
Flavio Di Palo and Natalie Parde. 2019. Enriching neural models with targeted features for dementia detection. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop, pages 302-308, Florence, Italy. Association for Computational Linguistics.
|
| 246 |
+
Jesse Dodge, Gabriel Ilharco, Roy Schwartz, Ali Farhadi, Hannaneh Hajishirzi, and Noah A. Smith. 2020. Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping. CoRR, abs/2002.06305.
|
| 247 |
+
|
| 248 |
+
Shahla Farzana and Natalie Parde. 2020. Exploring MMSE Score Prediction Using Verbal and NonVerbal Cues. In Proc. Interspeech 2020, pages 2207-2211.
|
| 249 |
+
Shahla Farzana and Natalie Parde. 2022. Are interaction patterns helpful for task-agnostic dementia detection? an empirical exploration. In Proceedings of the 23rd Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 172–182, Edinburgh, UK. Association for Computational Linguistics.
|
| 250 |
+
Yoav Freund and Robert E Schapire. 1997. A decision-theoretic generalization of on-line learning and an application to boosting. Journal of Computer and System Sciences, 55(1):119-139.
|
| 251 |
+
Jeffrey M. Girard, Alexandria K. Vail, Einat Liebenthal, Katrina Brown, Can Misel Kilciksiz, Luciana Pennant, Elizabeth Liebson, Dost Ongur, Louis-Philippe Morency, and Justin T. Baker. 2022. Computational analysis of spoken language in acute psychosis and mania. Schizophrenia Research, 245:97-115. Computational Approaches to Understanding Psychosis.
|
| 252 |
+
Harold Goodglass and Edith Kaplan. 1972. The assessment of aphasia and related disorders. Lea & Febiger.
|
| 253 |
+
Sarah A. Graham, Ellen E. Lee, Dilip V. Jeste, Ryan Van Patten, Elizabeth W. Twamley, Camille Nebeker, Yasunori Yamada, Ho-Cheol Kim, and Colin A. Depp. 2020. Artificial intelligence approaches to predicting and detecting cognitive decline in older adults: A conceptual review. *Psychiatry Research*, 284:112732.
|
| 254 |
+
Yue Guo, Changye Li, Carol Roan, Serguei Pakhomov, and Trevor Cohen. 2021. Crossing the "cookie theft" corpus chasm: Applying what bert learns from outside data to the adress challenge dementia detection task. Frontiers in Computer Science, 3.
|
| 255 |
+
Jing Jiang and ChengXiang Zhai. 2007. Instance weighting for domain adaptation in NLP. In Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics, pages 264-271, Prague, Czech Republic. Association for Computational Linguistics.
|
| 256 |
+
Wouter M Kouw and Marco Loog. 2018. An introduction to domain adaptation and transfer learning. Technical report, Delft University of Technology.
|
| 257 |
+
Egoitz Laparra, Steven Bethard, and Timothy A Miller. 2020. Rethinking domain adaptation for machine learning over clinical language. JAMIA Open, 3(2):146-150.
|
| 258 |
+
Changye Li, David Knopman, Weizhe Xu, Trevor Cohen, and Serguei Pakhomov. 2022. GPT-D: Inducing dementia-related linguistic anomalies by deliberate degradation of artificial neural language models. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1866-1877, Dublin, Ireland. Association for Computational Linguistics.
|
| 259 |
+
|
| 260 |
+
Saturnino Luz, Sofia De La Fuente Garcia, and Pierre Albert. 2018. A method for analysis of patient speech in dialogue for dementia detection. In *Resources and Processing of linguistic*, para-linguistic and extra-linguistic Data from people with various forms of cognitive impairment, pages 35–42. European Language Resources Association (ELRA).
|
| 261 |
+
Saturnino Luz, Fasih Haider, Sofia de la Fuente, Davida Fromm, and Brian MacWhinney. 2020. Alzheimer's Dementia Recognition Through Spontaneous Speech: The ADReSS Challenge. In Proc. Interspeech 2020, pages 2172-2176.
|
| 262 |
+
Sturnino Luz, Fasih Haider, Sofia de la Fuente, Davida Fromm, and Brian MacWhinney. 2021. Detecting cognitive decline using speech only: The adresso challenge. medRxiv.
|
| 263 |
+
Brian Macwhinney. 2009. The CHILDES Project Part 1: Thechat Transcription Format. Technical report, Carnegie Mellon University.
|
| 264 |
+
Mitchell P. Marcus, Beatrice Santorini, and Mary Ann Marcinkiewicz. 1993. Building a large annotated corpus of English: The Penn Treebank. Computational Linguistics, 19(2):313-330.
|
| 265 |
+
Vaden Masrani. 2018. Detecting dementia from written and spoken language. Master's thesis, University of British Columbia.
|
| 266 |
+
Vaden Masrani, Gabriel Murray, Thalia Shoshana Field, and Giuseppe Carenini. 2017. Domain adaptation for detecting mild cognitive impairment. In Advances in Artificial Intelligence, pages 248-259, Cham. Springer International Publishing.
|
| 267 |
+
Heinz-Dieter Mass. 1972. Über den zusammenhang zwischen wortschatzumfang und länger eines textes. Zeitschrift für Literaturwissenschaft und Linguistik, 2(8):73.
|
| 268 |
+
Philip M McCarthy. 2005. An assessment of the range and usefulness of lexical diversity measures and the potential of the measure of textual, lexical diversity (MTLD). Ph.D. thesis, The University of Memphis.
|
| 269 |
+
Philip M. McCarthy and Scott Jarvis. 2007. vocd: A theoretical and empirical evaluation. Language Testing, 24(4):459-488.
|
| 270 |
+
Shamila Nasreen, Julian Hough, and Matthew Purver. 2021a. Rare-class dialogue act tagging for Alzheimer's disease diagnosis. In Proceedings of the 22nd Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 290-300, Singapore and Online. Association for Computational Linguistics.
|
| 271 |
+
Shamila Nasreen, Morteza Rohanian, Julian Hough, and Matthew Purver. 2021b. Alzheimer's dementia recognition from spontaneous speech using disfluency and interactional features. Frontiers in Computer Science, 3.
|
| 272 |
+
|
| 273 |
+
Slav Petrov, Dipanjan Das, and Ryan McDonald. 2012. A universal part-of-speech tagset. In Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12), pages 2089-2096, Istanbul, Turkey. European Language Resources Association (ELRA).
|
| 274 |
+
Charlene Pope and Boyd H. Davis. 2011. Finding a balance: The carolinas conversation collection. Corpus Linguistics and Linguistic Theory, 7(1):143-161.
|
| 275 |
+
Sameer S. Pradhan, Eduard Hovy, Mitch Marcus, Martha Palmer, Lance Ramshaw, and Ralph Weischedel. 2007. Ontonotes: A unified relational semantic representation. In International Conference on Semantic Computing (ICSC 2007), pages 517-526.
|
| 276 |
+
Peng Qi, Timothy Dozat, Yuhao Zhang, and Christopher D. Manning. 2018. Universal dependency parsing from scratch. In Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies, pages 160-170, Brussels, Belgium. Association for Computational Linguistics.
|
| 277 |
+
Baochen Sun, Jiashi Feng, and Kate Saenko. 2016. Return of frustratingly easy domain adaptation. In Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence, AAAI'16, page 2058-2065. AAAI Press.
|
| 278 |
+
J.C. Vásquez-Correa, J.R. Orozco-Arroyave, T. Bocklet, and E. Nöth. 2018. Towards an automatic evaluation of the dysarthria level of patients with parkinson's disease. Journal of Communication Disorders, 76:21-36.
|
| 279 |
+
Rui Xia, Jianfei Yu, Feng Xu, and Shumei Wang. 2014. Instance-based domain adaptation in nlp via in-target-domain logistic approximation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 28.
|
| 280 |
+
Jiahong Yuan, Yuchen Bian, Xingyu Cai, Jiaji Huang, Zheng Ye, and Kenneth Ward Church. 2020. Disfluencies and fine-tuning pre-trained language models for detection of alzheimers disease. In *Interspeech*.
|
| 281 |
+
|
| 282 |
+
A For every submission:
|
| 283 |
+
|
| 284 |
+
A1. Did you describe the limitations of your work? 7
|
| 285 |
+
A2. Did you discuss any potential risks of your work? 8
|
| 286 |
+
A3. Do the abstract and introduction summarize the paper's main claims?
|
| 287 |
+
A4. Have you used AI writing assistants when working on this paper? Left blank.
|
| 288 |
+
|
| 289 |
+
B Did you use or create scientific artifacts?
|
| 290 |
+
|
| 291 |
+
3.2, 3.4, 4
|
| 292 |
+
|
| 293 |
+
B1. Did you cite the creators of artifacts you used? 3.2, 3.4, 4
|
| 294 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts? Not applicable. Left blank.
|
| 295 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)? 3.2, 8
|
| 296 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it? Not applicable. Left blank.
|
| 297 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.? 3.2
|
| 298 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be. 3.2, 4.1
|
| 299 |
+
|
| 300 |
+
C Did you run computational experiments?
|
| 301 |
+
|
| 302 |
+
4,5
|
| 303 |
+
|
| 304 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used? Not applicable. Left blank.
|
| 305 |
+
|
| 306 |
+
The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.
|
| 307 |
+
|
| 308 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values? 4.1
|
| 309 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run? 4, 5
|
| 310 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)? 3.2, 3.4, 4.1
|
| 311 |
+
|
| 312 |
+
# D Did you use human annotators (e.g., crowdworkers) or research with human participants?
|
| 313 |
+
|
| 314 |
+
Left blank.
|
| 315 |
+
|
| 316 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.? No response.
|
| 317 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)? No response.
|
| 318 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used? No response.
|
| 319 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board? No response.
|
| 320 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data? No response.
|
2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5268ed5916c137397f5d327729f8332abdf5c2216e4385ec98ae2adea6853463
|
| 3 |
+
size 405386
|
2023/Towards Domain-Agnostic and Domain-Adaptive Dementia Detection from Spoken Language/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Faithful Dialogues via Focus Learning/657e0968-cdfa-4f01-81a8-cef1a90aabec_content_list.json
ADDED
|
@@ -0,0 +1,2331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Towards Faithful Dialogs via Focus Learning",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
263,
|
| 8 |
+
90,
|
| 9 |
+
736,
|
| 10 |
+
112
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Yifan Deng $^{1,2}$ Xingsheng Zhang $^{1,2*}$ Heyan Huang $^{3*}$ Yue Hu $^{1,2}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
206,
|
| 19 |
+
124,
|
| 20 |
+
801,
|
| 21 |
+
142
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "list",
|
| 27 |
+
"sub_type": "text",
|
| 28 |
+
"list_items": [
|
| 29 |
+
"$^{1}$ Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China",
|
| 30 |
+
"$^{2}$ School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China"
|
| 31 |
+
],
|
| 32 |
+
"bbox": [
|
| 33 |
+
142,
|
| 34 |
+
142,
|
| 35 |
+
855,
|
| 36 |
+
174
|
| 37 |
+
],
|
| 38 |
+
"page_idx": 0
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"type": "text",
|
| 42 |
+
"text": "$^{3}$ School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China",
|
| 43 |
+
"bbox": [
|
| 44 |
+
117,
|
| 45 |
+
174,
|
| 46 |
+
885,
|
| 47 |
+
192
|
| 48 |
+
],
|
| 49 |
+
"page_idx": 0
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"type": "text",
|
| 53 |
+
"text": "{dengyifan, zhangxingsheng, huyue}@iie.ac.cn hhy63@bit.edu.cn",
|
| 54 |
+
"bbox": [
|
| 55 |
+
277,
|
| 56 |
+
192,
|
| 57 |
+
724,
|
| 58 |
+
225
|
| 59 |
+
],
|
| 60 |
+
"page_idx": 0
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"type": "text",
|
| 64 |
+
"text": "Abstract",
|
| 65 |
+
"text_level": 1,
|
| 66 |
+
"bbox": [
|
| 67 |
+
260,
|
| 68 |
+
252,
|
| 69 |
+
342,
|
| 70 |
+
268
|
| 71 |
+
],
|
| 72 |
+
"page_idx": 0
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"type": "text",
|
| 76 |
+
"text": "Maintaining faithfulness between responses and knowledge is an important research topic for building reliable knowledge-grounded dialogue systems. Existing models heavily rely on the elaborate data engineering and increasing the model's parameters ignoring to track the tokens that significantly influence losses, which is decisive for the optimization direction of the model in each iteration. To address this issue, we propose Focus Learning (FocusL), a novel learning approach that adjusts the contribution of each token to the optimization direction by directly scaling the corresponding objective loss. Specifically, we first introduce a positioning method by utilizing relevance distributions between knowledge and each response token to locate knowledge-aware tokens. Then, we further design a relevance-to-weight transformation to provide dynamic token-level weights for adjusting the cross-entropy loss. Finally, we use the weighted loss to encourage the model to pay special attention to the knowledge utilization. Experimental results demonstrate that our method achieves the new state-of-the-art results and generates more reliable responses while maintaining training stability.",
|
| 77 |
+
"bbox": [
|
| 78 |
+
141,
|
| 79 |
+
280,
|
| 80 |
+
460,
|
| 81 |
+
650
|
| 82 |
+
],
|
| 83 |
+
"page_idx": 0
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"type": "text",
|
| 87 |
+
"text": "1 Introduction",
|
| 88 |
+
"text_level": 1,
|
| 89 |
+
"bbox": [
|
| 90 |
+
114,
|
| 91 |
+
662,
|
| 92 |
+
260,
|
| 93 |
+
678
|
| 94 |
+
],
|
| 95 |
+
"page_idx": 0
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"type": "text",
|
| 99 |
+
"text": "Although open-domain conversation systems can generate smooth and fluent responses with the help of large-scale pre-trained models (Raffel et al., 2020; Lewis et al., 2020), vacuous responses (Li et al., 2016) continue to be prevalent. To enrich the content of responses, an effective way is to introduce external knowledge (Dinan et al., 2019; Zhou et al., 2018). The knowledge-grounded model, however, frequently generates responses that appear knowledgeable but are not derived from the given knowledge. This means that the correctness of the knowledge used in responses cannot be guaranteed. As shown in Figure 1, the \"Oklahoma\"",
|
| 100 |
+
"bbox": [
|
| 101 |
+
112,
|
| 102 |
+
688,
|
| 103 |
+
490,
|
| 104 |
+
897
|
| 105 |
+
],
|
| 106 |
+
"page_idx": 0
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"type": "image",
|
| 110 |
+
"img_path": "images/013720a46f330d83933ceb4e1e2b79b69b89113eaed8a170d12ee8d09e167cca.jpg",
|
| 111 |
+
"image_caption": [
|
| 112 |
+
"Figure 1: The different learning focus (i.e., the tokens that corresponding losses significantly influence the total objective loss) between general learning and focus learning. Original learning focus without guidance in general learning are fragmented with no rules to follow. Our methods make the model focus on the knowledge-aware tokens (i.e., tokens that have high semantic relevance to knowledge) to alleviate the hallucinations."
|
| 113 |
+
],
|
| 114 |
+
"image_footnote": [],
|
| 115 |
+
"bbox": [
|
| 116 |
+
527,
|
| 117 |
+
253,
|
| 118 |
+
860,
|
| 119 |
+
552
|
| 120 |
+
],
|
| 121 |
+
"page_idx": 0
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"type": "text",
|
| 125 |
+
"text": "in the response is not present in the given knowledge and relevant knowledge is unverifiable. This phenomenon is known as the hallucination (Dziri et al., 2022a) problem. Due to the inability to verify knowledge, hallucinations can mislead users and reduce the model's credibility.",
|
| 126 |
+
"bbox": [
|
| 127 |
+
505,
|
| 128 |
+
709,
|
| 129 |
+
884,
|
| 130 |
+
804
|
| 131 |
+
],
|
| 132 |
+
"page_idx": 0
|
| 133 |
+
},
|
| 134 |
+
{
|
| 135 |
+
"type": "text",
|
| 136 |
+
"text": "Numerous methods have been developed to tackle the hallucination problem by knowledge graph (Kang et al., 2022; Dziri et al., 2021), contrastive learning (Sun et al., 2022) or control code (Rashkin et al., 2021). These models enhance the model's attention to knowledge by increasing parameters and elaborate data engineering. An im",
|
| 137 |
+
"bbox": [
|
| 138 |
+
507,
|
| 139 |
+
806,
|
| 140 |
+
885,
|
| 141 |
+
919
|
| 142 |
+
],
|
| 143 |
+
"page_idx": 0
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"type": "page_footnote",
|
| 147 |
+
"text": "*Corresponding author",
|
| 148 |
+
"bbox": [
|
| 149 |
+
141,
|
| 150 |
+
904,
|
| 151 |
+
285,
|
| 152 |
+
917
|
| 153 |
+
],
|
| 154 |
+
"page_idx": 0
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"type": "page_number",
|
| 158 |
+
"text": "4554",
|
| 159 |
+
"bbox": [
|
| 160 |
+
480,
|
| 161 |
+
927,
|
| 162 |
+
521,
|
| 163 |
+
940
|
| 164 |
+
],
|
| 165 |
+
"page_idx": 0
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"type": "footer",
|
| 169 |
+
"text": "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics",
|
| 170 |
+
"bbox": [
|
| 171 |
+
226,
|
| 172 |
+
945,
|
| 173 |
+
771,
|
| 174 |
+
958
|
| 175 |
+
],
|
| 176 |
+
"page_idx": 0
|
| 177 |
+
},
|
| 178 |
+
{
|
| 179 |
+
"type": "footer",
|
| 180 |
+
"text": "Volume 1: Long Papers, pages 4554-4566",
|
| 181 |
+
"bbox": [
|
| 182 |
+
368,
|
| 183 |
+
958,
|
| 184 |
+
628,
|
| 185 |
+
971
|
| 186 |
+
],
|
| 187 |
+
"page_idx": 0
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"type": "footer",
|
| 191 |
+
"text": "July 9-14, 2023 ©2023 Association for Computational Linguistics",
|
| 192 |
+
"bbox": [
|
| 193 |
+
295,
|
| 194 |
+
972,
|
| 195 |
+
700,
|
| 196 |
+
985
|
| 197 |
+
],
|
| 198 |
+
"page_idx": 0
|
| 199 |
+
},
|
| 200 |
+
{
|
| 201 |
+
"type": "image",
|
| 202 |
+
"img_path": "images/5a6bb081a05bfe82aa0626bdd131e32dd1ebb834e7f6c71e854b031a3bdfbf40.jpg",
|
| 203 |
+
"image_caption": [
|
| 204 |
+
"Figure 2: Distributions of the loss and the relevance between knowledge and response tokens. We select a response as an example and visualize the loss, semantic relevance to knowledge, and the adjusted loss (FocusL) at the beginning of training. In the original loss, the model is less sensitive to optimization of knowledge-aware tokens. In contrast, the loss of knowledge-aware tokens in FocusL are larger than the others, and the knowledge-irrelevant tokens' loss are scaled down."
|
| 205 |
+
],
|
| 206 |
+
"image_footnote": [],
|
| 207 |
+
"bbox": [
|
| 208 |
+
132,
|
| 209 |
+
80,
|
| 210 |
+
470,
|
| 211 |
+
265
|
| 212 |
+
],
|
| 213 |
+
"page_idx": 1
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"type": "text",
|
| 217 |
+
"text": "important assumption for them is that the model has the ability to give more attention to knowledge during training, yet this is not always held true. We consider this to be a common problem with general training methods which neglect to track the tokens that significantly influence the objective loss (i.e., learning focus). Different from the traditional concept of attention mechanisms which primarily focus on identifying important information in the input, the focus emphasizes important information in the target response. As the example shown in Figure 1, in general learning scenario, the learning focus is often out of control, and the model tends to focus on simple words (e.g., be, the), which lead to neglect of the tokens that have high relevance to knowledge referred as knowledge-aware tokens (e.g., polar or temperate zones). Intuitively, knowledge-aware tokens are even more critical for improving consistency, and focusing the model's attention on them can make the optimization goal fitter for the task. Therefore, it is necessary to revise the original learning focus. However, there are two main challenges: (1) How to locate the desired learning focus? Due to the fact that the learning focus is on different words in each sentence and the token-level manual annotation of responses is extremely time-consuming and labor-intensive, the existing datasets do not have a fine-grained annotation of key semantic words in the responses. (2) Given the desired learning focus, how to correct",
|
| 218 |
+
"bbox": [
|
| 219 |
+
115,
|
| 220 |
+
437,
|
| 221 |
+
489,
|
| 222 |
+
917
|
| 223 |
+
],
|
| 224 |
+
"page_idx": 1
|
| 225 |
+
},
|
| 226 |
+
{
|
| 227 |
+
"type": "text",
|
| 228 |
+
"text": "the original learning focus? Existing training methods with cross-entropy loss lack direct guidance on learning focus.",
|
| 229 |
+
"bbox": [
|
| 230 |
+
507,
|
| 231 |
+
84,
|
| 232 |
+
884,
|
| 233 |
+
133
|
| 234 |
+
],
|
| 235 |
+
"page_idx": 1
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"type": "text",
|
| 239 |
+
"text": "To address above issues, we propose a novel learning approach, Focus Learning (FocusL). Instead of impacting knowledge utilization implicitly, we directly scale the corresponding objective loss to adjust the contribution of each token to the optimization direction. Specifically, for the first challenge, we first define the desired learning focus in knowledge-grounded dialogue task as knowledge-aware tokens. Then we devise a positioning method to get the relevance score distribution between knowledge and each response token. For the second challenge, we explore a relevance-to-weight transformation method to provide dynamic token-level weights for the cross-entropy loss. Finally, we use the corrected learning focus to guide the model training. As we can see in Figure 2, the losses of knowledge-aware tokens do not gain a high proportion of the original loss distribution. In contrast, our approach can expand the gap between knowledge-aware tokens and the others, which increases the impact of the change of knowledge-aware tokens' loss on the final loss, thus affecting the optimization direction and guiding the model to pay more attention to knowledge utilization.",
|
| 240 |
+
"bbox": [
|
| 241 |
+
507,
|
| 242 |
+
134,
|
| 243 |
+
884,
|
| 244 |
+
533
|
| 245 |
+
],
|
| 246 |
+
"page_idx": 1
|
| 247 |
+
},
|
| 248 |
+
{
|
| 249 |
+
"type": "text",
|
| 250 |
+
"text": "Our main contributions are summarized as below:",
|
| 251 |
+
"bbox": [
|
| 252 |
+
507,
|
| 253 |
+
536,
|
| 254 |
+
882,
|
| 255 |
+
565
|
| 256 |
+
],
|
| 257 |
+
"page_idx": 1
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"type": "list",
|
| 261 |
+
"sub_type": "text",
|
| 262 |
+
"list_items": [
|
| 263 |
+
"- We rethink existing models and learning methods, and propose a novel learning approach to address the hallucination problem by adjusting the learning focus.",
|
| 264 |
+
"- We propose a positioning method and a relevance-to-weight transformation method to adaptively scale the loss of each token in the response.",
|
| 265 |
+
"- Experimental results demonstrate that our approach significantly outperforms the current state-of-the-art baselines, and effectively reduces hallucinations while maintaining high quality of responses."
|
| 266 |
+
],
|
| 267 |
+
"bbox": [
|
| 268 |
+
507,
|
| 269 |
+
580,
|
| 270 |
+
884,
|
| 271 |
+
814
|
| 272 |
+
],
|
| 273 |
+
"page_idx": 1
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"type": "text",
|
| 277 |
+
"text": "2 Related Work",
|
| 278 |
+
"text_level": 1,
|
| 279 |
+
"bbox": [
|
| 280 |
+
507,
|
| 281 |
+
828,
|
| 282 |
+
665,
|
| 283 |
+
843
|
| 284 |
+
],
|
| 285 |
+
"page_idx": 1
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"type": "text",
|
| 289 |
+
"text": "Knowledge-grounded Dialogue Generation",
|
| 290 |
+
"text_level": 1,
|
| 291 |
+
"bbox": [
|
| 292 |
+
507,
|
| 293 |
+
854,
|
| 294 |
+
882,
|
| 295 |
+
870
|
| 296 |
+
],
|
| 297 |
+
"page_idx": 1
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"type": "text",
|
| 301 |
+
"text": "Knowledge-grounded dialogue systems aim to alleviate vacuous responses by injecting external knowledge into the dialogue model. Recently,",
|
| 302 |
+
"bbox": [
|
| 303 |
+
507,
|
| 304 |
+
871,
|
| 305 |
+
882,
|
| 306 |
+
917
|
| 307 |
+
],
|
| 308 |
+
"page_idx": 1
|
| 309 |
+
},
|
| 310 |
+
{
|
| 311 |
+
"type": "page_number",
|
| 312 |
+
"text": "4555",
|
| 313 |
+
"bbox": [
|
| 314 |
+
480,
|
| 315 |
+
928,
|
| 316 |
+
519,
|
| 317 |
+
940
|
| 318 |
+
],
|
| 319 |
+
"page_idx": 1
|
| 320 |
+
},
|
| 321 |
+
{
|
| 322 |
+
"type": "text",
|
| 323 |
+
"text": "various forms of external knowledge have been used in dialogue systems, such as tables (Moghe et al., 2018), graphs (Bollacker et al., 2008; Moon et al., 2019; Zhou et al., 2020; Peng et al., 2022), documents (Ghazvininejad et al., 2017; Zhou et al., 2018; Zhao et al., 2019). In spite of research on the forms of knowledge, most existing systems focus on knowledge selection (Lian et al., 2019; Kim et al., 2020; Zheng et al., 2020; Meng et al., 2020; Li et al., 2022) and response generation with given knowledge (Xu et al., 2020; Ma et al., 2020; Cai et al., 2020; Zhao et al., 2020). In this work, we mainly focus on avoiding models using unverifiable knowledge in response generation with given knowledge.",
|
| 324 |
+
"bbox": [
|
| 325 |
+
112,
|
| 326 |
+
84,
|
| 327 |
+
489,
|
| 328 |
+
325
|
| 329 |
+
],
|
| 330 |
+
"page_idx": 2
|
| 331 |
+
},
|
| 332 |
+
{
|
| 333 |
+
"type": "text",
|
| 334 |
+
"text": "Hallucinations in Text Generation Generating responses that are unfaithful to the provided knowledge, known as the hallucination, is a tricky problem in knowledge-grounded dialogue systems. Recently, the hallucination problem has attracted increasing attention because the generated text appears smooth and fluent but usually contains false knowledge, which significantly threatens the model's credibility. Some studies reduce hallucinations by introducing knowledge graph (Kang et al., 2022; Dziri et al., 2021), controllable generation (Rashkin et al., 2021), and contrastive learning (Sun et al., 2022). In a recent study, Dziri et al. (2022b) analyze the source of hallucination in detail and find that the most knowledge-grounded conversation datasets (Dinan et al., 2019; Zhou et al., 2018) inherently contain hallucinations, and models trained on such dataset further amplify hallucinations, which demonstrate that the pattern of hallucination responses is more likely to be learned by the model. To address this problem, Dziri et al. (2022a) further propose FaithDial, a new dataset that removes hallucinations in the Wizard of Wikipedia (Dinan et al., 2019). Different from these studies about models and datasets, we find that the training method with unexpected learning focus also plays a vital role in the hallucination problem and then present a method to adjust the original focus.",
|
| 335 |
+
"bbox": [
|
| 336 |
+
112,
|
| 337 |
+
338,
|
| 338 |
+
489,
|
| 339 |
+
806
|
| 340 |
+
],
|
| 341 |
+
"page_idx": 2
|
| 342 |
+
},
|
| 343 |
+
{
|
| 344 |
+
"type": "text",
|
| 345 |
+
"text": "3 Methods",
|
| 346 |
+
"text_level": 1,
|
| 347 |
+
"bbox": [
|
| 348 |
+
112,
|
| 349 |
+
819,
|
| 350 |
+
225,
|
| 351 |
+
834
|
| 352 |
+
],
|
| 353 |
+
"page_idx": 2
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"type": "text",
|
| 357 |
+
"text": "3.1 Our Approach",
|
| 358 |
+
"text_level": 1,
|
| 359 |
+
"bbox": [
|
| 360 |
+
112,
|
| 361 |
+
848,
|
| 362 |
+
275,
|
| 363 |
+
863
|
| 364 |
+
],
|
| 365 |
+
"page_idx": 2
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"type": "text",
|
| 369 |
+
"text": "The overview of FocusL is presented in Figure 3. Given the conversation context $C = (c_{1},\\dots,c_{n})$ consisting of a sequence of $n$ dialogue turns and",
|
| 370 |
+
"bbox": [
|
| 371 |
+
112,
|
| 372 |
+
870,
|
| 373 |
+
487,
|
| 374 |
+
917
|
| 375 |
+
],
|
| 376 |
+
"page_idx": 2
|
| 377 |
+
},
|
| 378 |
+
{
|
| 379 |
+
"type": "text",
|
| 380 |
+
"text": "the corresponding knowledge $K = (k_{1},\\dots,k_{m})$ for the current turn, where $m$ is the number of tokens in $K$ , the goal of our task is to generate responses $Y = (y_{1},\\dots,y_{T})$ where $T$ is the number of tokens in $Y$ . We first form the input $I$ with joint knowledge $K$ and conversation context $C$ as follows:",
|
| 381 |
+
"bbox": [
|
| 382 |
+
507,
|
| 383 |
+
84,
|
| 384 |
+
882,
|
| 385 |
+
195
|
| 386 |
+
],
|
| 387 |
+
"page_idx": 2
|
| 388 |
+
},
|
| 389 |
+
{
|
| 390 |
+
"type": "equation",
|
| 391 |
+
"text": "\n$$\nI = [ K; C ] \\tag {1}\n$$\n",
|
| 392 |
+
"text_format": "latex",
|
| 393 |
+
"bbox": [
|
| 394 |
+
650,
|
| 395 |
+
206,
|
| 396 |
+
880,
|
| 397 |
+
223
|
| 398 |
+
],
|
| 399 |
+
"page_idx": 2
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"type": "text",
|
| 403 |
+
"text": "where the utterances of $C$ are delimited by the speaker identifier (either $<\\text{user}>$ or $<\\text{bot}>$ ). Then we use T5 (Raffel et al., 2020) as the base model, which is a pre-trained encoder-decoder model that uses the transformer architecture (Vaswani et al., 2017). Taking $I$ as input, the base model outputs a logit distribution $h = (h_1, \\dots, h_T)$ , where $h_t$ is the corresponding logit distribution of the $t$ -th token in $Y$ . The positioning module locate knowledge-aware tokens in the response $Y$ and calculate the corresponding adjust weight. The focus shifting module adjusts the original logit distribution $h$ to obtain the final logit distribution $h_w$ . Finally, we train the model to produce the next conversation utterance $y_1 \\dots y_T$ by minimizing the cross-entropy loss.",
|
| 404 |
+
"bbox": [
|
| 405 |
+
507,
|
| 406 |
+
233,
|
| 407 |
+
882,
|
| 408 |
+
488
|
| 409 |
+
],
|
| 410 |
+
"page_idx": 2
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"type": "text",
|
| 414 |
+
"text": "In the following, we introduce three steps of the FocusL training process: (1) locate knowledge-aware tokens which used as the new learning focus ( $\\S 3.2$ ); (2) calculate adjust weights based on the relevance of knowledge with each token in the response ( $\\S 3.3$ ); (3) switch original learning focus to the knowledge-aware tokens ( $\\S 3.4$ ).",
|
| 415 |
+
"bbox": [
|
| 416 |
+
507,
|
| 417 |
+
491,
|
| 418 |
+
882,
|
| 419 |
+
602
|
| 420 |
+
],
|
| 421 |
+
"page_idx": 2
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "text",
|
| 425 |
+
"text": "3.2 Learning Focus Positioning",
|
| 426 |
+
"text_level": 1,
|
| 427 |
+
"bbox": [
|
| 428 |
+
507,
|
| 429 |
+
614,
|
| 430 |
+
769,
|
| 431 |
+
630
|
| 432 |
+
],
|
| 433 |
+
"page_idx": 2
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"type": "text",
|
| 437 |
+
"text": "To adjust the learning focus of the model, we first define knowledge-aware tokens as the new learning focus which is more in line with the knowledge-grounded dialogue task. And then we use the distance between the response token and knowledge in semantic space to measure its relevance:",
|
| 438 |
+
"bbox": [
|
| 439 |
+
507,
|
| 440 |
+
634,
|
| 441 |
+
882,
|
| 442 |
+
730
|
| 443 |
+
],
|
| 444 |
+
"page_idx": 2
|
| 445 |
+
},
|
| 446 |
+
{
|
| 447 |
+
"type": "equation",
|
| 448 |
+
"text": "\n$$\n\\operatorname {r e l e v a n c e} \\left(y _ {t} ^ {r}, \\mathcal {K}\\right) = \\frac {y _ {t} ^ {r} \\cdot \\mathcal {K}}{\\| y _ {t} ^ {r} \\| \\cdot \\| \\mathcal {K} \\|} \\tag {2}\n$$\n",
|
| 449 |
+
"text_format": "latex",
|
| 450 |
+
"bbox": [
|
| 451 |
+
569,
|
| 452 |
+
739,
|
| 453 |
+
880,
|
| 454 |
+
772
|
| 455 |
+
],
|
| 456 |
+
"page_idx": 2
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "text",
|
| 460 |
+
"text": "To get the semantic representation of the token $y_{t}$ and the knowledge $K$ , we use the embedding layer $Emb(\\cdot)$ of the base model to obtain a dense representation:",
|
| 461 |
+
"bbox": [
|
| 462 |
+
507,
|
| 463 |
+
780,
|
| 464 |
+
880,
|
| 465 |
+
843
|
| 466 |
+
],
|
| 467 |
+
"page_idx": 2
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "equation",
|
| 471 |
+
"text": "\n$$\ny _ {t} ^ {r} = \\operatorname {E m b} (y _ {t}) \\tag {3}\n$$\n",
|
| 472 |
+
"text_format": "latex",
|
| 473 |
+
"bbox": [
|
| 474 |
+
611,
|
| 475 |
+
854,
|
| 476 |
+
880,
|
| 477 |
+
871
|
| 478 |
+
],
|
| 479 |
+
"page_idx": 2
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"type": "equation",
|
| 483 |
+
"text": "\n$$\n\\mathcal {K} = \\frac {1}{m} \\sum_ {i = 1} ^ {m} E m b \\left(k _ {i}\\right) \\tag {4}\n$$\n",
|
| 484 |
+
"text_format": "latex",
|
| 485 |
+
"bbox": [
|
| 486 |
+
611,
|
| 487 |
+
875,
|
| 488 |
+
880,
|
| 489 |
+
914
|
| 490 |
+
],
|
| 491 |
+
"page_idx": 2
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"type": "page_number",
|
| 495 |
+
"text": "4556",
|
| 496 |
+
"bbox": [
|
| 497 |
+
480,
|
| 498 |
+
927,
|
| 499 |
+
519,
|
| 500 |
+
940
|
| 501 |
+
],
|
| 502 |
+
"page_idx": 2
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"type": "image",
|
| 506 |
+
"img_path": "images/1b3b758d304af4e7138231e26ee099113c8d81df3498cdc88d63fd655c92b395.jpg",
|
| 507 |
+
"image_caption": [
|
| 508 |
+
"Figure 3: Training process of FocusL. We first calculate original model output based on the given knowledge and the context. Then we calculate the relevance score between each token in the response and knowledge, and further convert it to the adjust weight distribution. Finally, we use the adjust weight to scale the original loss."
|
| 509 |
+
],
|
| 510 |
+
"image_footnote": [],
|
| 511 |
+
"bbox": [
|
| 512 |
+
141,
|
| 513 |
+
82,
|
| 514 |
+
858,
|
| 515 |
+
287
|
| 516 |
+
],
|
| 517 |
+
"page_idx": 3
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"type": "text",
|
| 521 |
+
"text": "Note that we do not use the model's encoder to obtain the representation vector of knowledge and responses, we think that the output of the embedding layer is sufficient to provide the desired semantic information, and also has less impact on the training speed. Instead of outputting knowledge-aware tokens directly, the positioning method uses relevance matrix to provide more information for §3.3.",
|
| 522 |
+
"bbox": [
|
| 523 |
+
112,
|
| 524 |
+
357,
|
| 525 |
+
489,
|
| 526 |
+
502
|
| 527 |
+
],
|
| 528 |
+
"page_idx": 3
|
| 529 |
+
},
|
| 530 |
+
{
|
| 531 |
+
"type": "text",
|
| 532 |
+
"text": "3.3 Adjust Weight",
|
| 533 |
+
"text_level": 1,
|
| 534 |
+
"bbox": [
|
| 535 |
+
112,
|
| 536 |
+
514,
|
| 537 |
+
275,
|
| 538 |
+
531
|
| 539 |
+
],
|
| 540 |
+
"page_idx": 3
|
| 541 |
+
},
|
| 542 |
+
{
|
| 543 |
+
"type": "text",
|
| 544 |
+
"text": "To adaptively assign a weight for each token to adjust the corresponding logit value, we can simply define adjust weight scalar $w_{t}^{a}$ as follows:",
|
| 545 |
+
"bbox": [
|
| 546 |
+
112,
|
| 547 |
+
537,
|
| 548 |
+
487,
|
| 549 |
+
586
|
| 550 |
+
],
|
| 551 |
+
"page_idx": 3
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"type": "equation",
|
| 555 |
+
"text": "\n$$\nw _ {t} ^ {a} = \\left\\{ \\begin{array}{l l} 2, & \\text {i f r e l e v a n c e} \\left(y _ {t} ^ {r}, \\mathcal {K}\\right) \\geq \\theta \\\\ 1, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {5}\n$$\n",
|
| 556 |
+
"text_format": "latex",
|
| 557 |
+
"bbox": [
|
| 558 |
+
157,
|
| 559 |
+
596,
|
| 560 |
+
485,
|
| 561 |
+
640
|
| 562 |
+
],
|
| 563 |
+
"page_idx": 3
|
| 564 |
+
},
|
| 565 |
+
{
|
| 566 |
+
"type": "text",
|
| 567 |
+
"text": "where $\\theta$ is a threshold value. We rigidly define knowledge-aware tokens by setting a specific $\\theta$ . The token with relevance greater than the threshold is regarded as knowledge-aware token and obtain a high adjust weight to increase corresponding loss. We keep the original logit value unchanged for tokens with relevance scores less than the threshold.",
|
| 568 |
+
"bbox": [
|
| 569 |
+
112,
|
| 570 |
+
651,
|
| 571 |
+
487,
|
| 572 |
+
762
|
| 573 |
+
],
|
| 574 |
+
"page_idx": 3
|
| 575 |
+
},
|
| 576 |
+
{
|
| 577 |
+
"type": "text",
|
| 578 |
+
"text": "However, the boundaries of knowledge-aware tokens are difficult to define, and the threshold value easily influences the learning effect of the model. To solve this problem, we further propose two different methods for converting relevance scores into adjust weights.",
|
| 579 |
+
"bbox": [
|
| 580 |
+
112,
|
| 581 |
+
764,
|
| 582 |
+
487,
|
| 583 |
+
860
|
| 584 |
+
],
|
| 585 |
+
"page_idx": 3
|
| 586 |
+
},
|
| 587 |
+
{
|
| 588 |
+
"type": "text",
|
| 589 |
+
"text": "Liner Weight To make full use of the information in the relevance matrix, we propose to assign a different adjust weight to each token. We obtain a",
|
| 590 |
+
"bbox": [
|
| 591 |
+
112,
|
| 592 |
+
870,
|
| 593 |
+
489,
|
| 594 |
+
919
|
| 595 |
+
],
|
| 596 |
+
"page_idx": 3
|
| 597 |
+
},
|
| 598 |
+
{
|
| 599 |
+
"type": "text",
|
| 600 |
+
"text": "non-negative distribution by the following formula:",
|
| 601 |
+
"bbox": [
|
| 602 |
+
507,
|
| 603 |
+
357,
|
| 604 |
+
882,
|
| 605 |
+
375
|
| 606 |
+
],
|
| 607 |
+
"page_idx": 3
|
| 608 |
+
},
|
| 609 |
+
{
|
| 610 |
+
"type": "equation",
|
| 611 |
+
"text": "\n$$\nw _ {t} ^ {a} = 1 + \\text {r e l e v a n c e} \\left(y _ {t} ^ {r}, \\mathcal {K}\\right) \\tag {6}\n$$\n",
|
| 612 |
+
"text_format": "latex",
|
| 613 |
+
"bbox": [
|
| 614 |
+
586,
|
| 615 |
+
387,
|
| 616 |
+
880,
|
| 617 |
+
405
|
| 618 |
+
],
|
| 619 |
+
"page_idx": 3
|
| 620 |
+
},
|
| 621 |
+
{
|
| 622 |
+
"type": "text",
|
| 623 |
+
"text": "This method adaptively scales up the loss of knowledge-aware tokens while scaling down the loss of rest tokens. Although this can adjust the weights of all tokens, a linear weight distribution is not a good simulation due to the complexity of focus changes in real-world human learning. Meanwhile, the weights between knowledge-aware and irrelevant tokens are not significantly different, which does not have a large enough impact on the loss.",
|
| 624 |
+
"bbox": [
|
| 625 |
+
507,
|
| 626 |
+
417,
|
| 627 |
+
882,
|
| 628 |
+
576
|
| 629 |
+
],
|
| 630 |
+
"page_idx": 3
|
| 631 |
+
},
|
| 632 |
+
{
|
| 633 |
+
"type": "text",
|
| 634 |
+
"text": "Non-linear Weight To ensure the stability of training, we aim to increase the loss of knowledge-aware tokens as much as possible while keeping that the adjusted final loss is not too different from the original loss. Therefore the distribution of weights should be smoother at low relevance interval and steeper at high relevance interval. We map the original relevance distribution to a logarithmic distribution with the following formula:",
|
| 635 |
+
"bbox": [
|
| 636 |
+
507,
|
| 637 |
+
587,
|
| 638 |
+
882,
|
| 639 |
+
731
|
| 640 |
+
],
|
| 641 |
+
"page_idx": 3
|
| 642 |
+
},
|
| 643 |
+
{
|
| 644 |
+
"type": "equation",
|
| 645 |
+
"text": "\n$$\nw _ {t} ^ {a} = - \\ln (1 - r e l e v a n c e \\left(y _ {t} ^ {r}, \\mathcal {K}\\right) + \\lambda) + 1 \\tag {7}\n$$\n",
|
| 646 |
+
"text_format": "latex",
|
| 647 |
+
"bbox": [
|
| 648 |
+
519,
|
| 649 |
+
744,
|
| 650 |
+
880,
|
| 651 |
+
762
|
| 652 |
+
],
|
| 653 |
+
"page_idx": 3
|
| 654 |
+
},
|
| 655 |
+
{
|
| 656 |
+
"type": "text",
|
| 657 |
+
"text": "where $\\lambda \\in (0, e - 2)$ is a small constant that we call it smoothing factor. A large smoothing factor represents a smoother distribution of the obtained weights.",
|
| 658 |
+
"bbox": [
|
| 659 |
+
507,
|
| 660 |
+
774,
|
| 661 |
+
880,
|
| 662 |
+
838
|
| 663 |
+
],
|
| 664 |
+
"page_idx": 3
|
| 665 |
+
},
|
| 666 |
+
{
|
| 667 |
+
"type": "text",
|
| 668 |
+
"text": "3.4 Focused Cross-Entropy Loss",
|
| 669 |
+
"text_level": 1,
|
| 670 |
+
"bbox": [
|
| 671 |
+
507,
|
| 672 |
+
850,
|
| 673 |
+
781,
|
| 674 |
+
866
|
| 675 |
+
],
|
| 676 |
+
"page_idx": 3
|
| 677 |
+
},
|
| 678 |
+
{
|
| 679 |
+
"type": "text",
|
| 680 |
+
"text": "After obtaining the adjust weight $w_{t}^{a}$ , we scale the original logit and then use the new logit to calculate the probability of each token. At the time step $t$",
|
| 681 |
+
"bbox": [
|
| 682 |
+
507,
|
| 683 |
+
871,
|
| 684 |
+
882,
|
| 685 |
+
919
|
| 686 |
+
],
|
| 687 |
+
"page_idx": 3
|
| 688 |
+
},
|
| 689 |
+
{
|
| 690 |
+
"type": "page_number",
|
| 691 |
+
"text": "4557",
|
| 692 |
+
"bbox": [
|
| 693 |
+
480,
|
| 694 |
+
927,
|
| 695 |
+
519,
|
| 696 |
+
940
|
| 697 |
+
],
|
| 698 |
+
"page_idx": 3
|
| 699 |
+
},
|
| 700 |
+
{
|
| 701 |
+
"type": "text",
|
| 702 |
+
"text": "given original model outputs $h_t$ , the probability of the token $y_t$ is calculated as follows:",
|
| 703 |
+
"bbox": [
|
| 704 |
+
112,
|
| 705 |
+
84,
|
| 706 |
+
489,
|
| 707 |
+
116
|
| 708 |
+
],
|
| 709 |
+
"page_idx": 4
|
| 710 |
+
},
|
| 711 |
+
{
|
| 712 |
+
"type": "equation",
|
| 713 |
+
"text": "\n$$\np _ {w} (y _ {t} | y _ {< t}, \\mathcal {I}) = s o f t m a x (w _ {t} ^ {a} \\cdot h _ {t}) \\qquad (8)\n$$\n",
|
| 714 |
+
"text_format": "latex",
|
| 715 |
+
"bbox": [
|
| 716 |
+
164,
|
| 717 |
+
129,
|
| 718 |
+
487,
|
| 719 |
+
147
|
| 720 |
+
],
|
| 721 |
+
"page_idx": 4
|
| 722 |
+
},
|
| 723 |
+
{
|
| 724 |
+
"type": "text",
|
| 725 |
+
"text": "We define the final loss for optimization as the Focused Cross-Entropy (FCE) loss:",
|
| 726 |
+
"bbox": [
|
| 727 |
+
112,
|
| 728 |
+
159,
|
| 729 |
+
487,
|
| 730 |
+
191
|
| 731 |
+
],
|
| 732 |
+
"page_idx": 4
|
| 733 |
+
},
|
| 734 |
+
{
|
| 735 |
+
"type": "equation",
|
| 736 |
+
"text": "\n$$\n\\mathcal {L} _ {F C E} = - \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\log p _ {w} \\left(y _ {t} \\mid y _ {< t}, \\mathcal {I}\\right) \\tag {9}\n$$\n",
|
| 737 |
+
"text_format": "latex",
|
| 738 |
+
"bbox": [
|
| 739 |
+
164,
|
| 740 |
+
202,
|
| 741 |
+
487,
|
| 742 |
+
244
|
| 743 |
+
],
|
| 744 |
+
"page_idx": 4
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "text",
|
| 748 |
+
"text": "where $T$ is the length of the response. FCE changes the original loss distribution, which leads the model to shift original learning focus to desired tokens. To reduce this loss function, the gradient descent approach is used to update all parameters.",
|
| 749 |
+
"bbox": [
|
| 750 |
+
112,
|
| 751 |
+
255,
|
| 752 |
+
489,
|
| 753 |
+
336
|
| 754 |
+
],
|
| 755 |
+
"page_idx": 4
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "text",
|
| 759 |
+
"text": "4 Experiments",
|
| 760 |
+
"text_level": 1,
|
| 761 |
+
"bbox": [
|
| 762 |
+
112,
|
| 763 |
+
348,
|
| 764 |
+
260,
|
| 765 |
+
363
|
| 766 |
+
],
|
| 767 |
+
"page_idx": 4
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"type": "text",
|
| 771 |
+
"text": "To evaluate the effectiveness of our method, we conduct experiments following the settings in (Dziri et al., 2022a). We use pre-trained T5 (Raffel et al., 2020)<sup>1</sup> from the HuggingFace library (Wolf et al., 2020) as our base language model and train 10 epochs via accumulating gradients for 4 steps. We utilize a learning rate of 6.25E-5, and AdamW (Loshchilov and Hutter, 2019) for optimization. We set the warmup ratio to $4\\%$ followed by a linear decay. The max length of the input and output is 256 and 128 respectively. We set the batch size to 8. For adjust weights, we set the smoothing factor $\\lambda$ to 0.01 and the threshold value $\\theta$ to 0.5. As for decoding, we use nucleus sampling with $p = 0.6$ . We train our model on a single NVIDIA Tesla V100 GPU with 32GB memory. Each epoch takes about 130 minutes for WoW and 35 minutes for FaithDial. Our code is available at https://github.com/Mute-ZEN/AgileLightning.",
|
| 772 |
+
"bbox": [
|
| 773 |
+
112,
|
| 774 |
+
373,
|
| 775 |
+
489,
|
| 776 |
+
678
|
| 777 |
+
],
|
| 778 |
+
"page_idx": 4
|
| 779 |
+
},
|
| 780 |
+
{
|
| 781 |
+
"type": "text",
|
| 782 |
+
"text": "4.1 Datasets",
|
| 783 |
+
"text_level": 1,
|
| 784 |
+
"bbox": [
|
| 785 |
+
112,
|
| 786 |
+
689,
|
| 787 |
+
228,
|
| 788 |
+
703
|
| 789 |
+
],
|
| 790 |
+
"page_idx": 4
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "text",
|
| 794 |
+
"text": "We conduct experiments on two knowledge-grounded dialogue datasets: (1) Wizard of WikiPedia (WoW) published in (Dinan et al., 2019); (2) FaithDial published in (Dziri et al., 2022a)",
|
| 795 |
+
"bbox": [
|
| 796 |
+
112,
|
| 797 |
+
709,
|
| 798 |
+
489,
|
| 799 |
+
790
|
| 800 |
+
],
|
| 801 |
+
"page_idx": 4
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"type": "text",
|
| 805 |
+
"text": "WoW is a widely used dataset for knowledge-grounded dialogue based on WikiPedia. WoW is collected by two crowdsourcing workers, one of which is a knowledgeable wizard and the other is an inquisitive apprentice. The wizard can access the knowledge of WikiPedia, while the apprentice",
|
| 806 |
+
"bbox": [
|
| 807 |
+
112,
|
| 808 |
+
799,
|
| 809 |
+
489,
|
| 810 |
+
896
|
| 811 |
+
],
|
| 812 |
+
"page_idx": 4
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "text",
|
| 816 |
+
"text": "cannot. The dataset includes 22,311 conversations with 201,999 turns, and the test set has two subsets: Test Seen and Test Unseen. Test Seen comprises 533 topics that overlap with the training set and contain new dialogues. Test Unseen contains 58 topics that have never been encountered in training or validation.",
|
| 817 |
+
"bbox": [
|
| 818 |
+
507,
|
| 819 |
+
84,
|
| 820 |
+
884,
|
| 821 |
+
197
|
| 822 |
+
],
|
| 823 |
+
"page_idx": 4
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "text",
|
| 827 |
+
"text": "FaithDial Since the current knowledge conversation dataset (Dinan et al., 2019) contains a large number of hallucination responses (Dziri et al., 2022b), Dziri et al. (2022a) proposes FaithDial, which corrects the responses in WoW to be more faithful to knowledge. The percentage of corrections to the original wizard's responses exceeded $80\\%$ . The dataset contains a total of 5,649 conversations with 50,761 turns.",
|
| 828 |
+
"bbox": [
|
| 829 |
+
507,
|
| 830 |
+
206,
|
| 831 |
+
885,
|
| 832 |
+
350
|
| 833 |
+
],
|
| 834 |
+
"page_idx": 4
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"type": "text",
|
| 838 |
+
"text": "4.2 Baselines",
|
| 839 |
+
"text_level": 1,
|
| 840 |
+
"bbox": [
|
| 841 |
+
507,
|
| 842 |
+
363,
|
| 843 |
+
628,
|
| 844 |
+
378
|
| 845 |
+
],
|
| 846 |
+
"page_idx": 4
|
| 847 |
+
},
|
| 848 |
+
{
|
| 849 |
+
"type": "text",
|
| 850 |
+
"text": "We compare our model with the following baselines:",
|
| 851 |
+
"bbox": [
|
| 852 |
+
507,
|
| 853 |
+
385,
|
| 854 |
+
885,
|
| 855 |
+
414
|
| 856 |
+
],
|
| 857 |
+
"page_idx": 4
|
| 858 |
+
},
|
| 859 |
+
{
|
| 860 |
+
"type": "text",
|
| 861 |
+
"text": "GPT2 (Radford et al., 2019) is an autoregressive model based on the transformer decoder architecture (Vaswani et al., 2017).",
|
| 862 |
+
"bbox": [
|
| 863 |
+
507,
|
| 864 |
+
426,
|
| 865 |
+
884,
|
| 866 |
+
475
|
| 867 |
+
],
|
| 868 |
+
"page_idx": 4
|
| 869 |
+
},
|
| 870 |
+
{
|
| 871 |
+
"type": "text",
|
| 872 |
+
"text": "DIALOGPT (Zhang et al., 2020) is pre-trained on a large scale dialogue datasets based on GPT2 to be more applicable to conversation generation.",
|
| 873 |
+
"bbox": [
|
| 874 |
+
507,
|
| 875 |
+
485,
|
| 876 |
+
882,
|
| 877 |
+
533
|
| 878 |
+
],
|
| 879 |
+
"page_idx": 4
|
| 880 |
+
},
|
| 881 |
+
{
|
| 882 |
+
"type": "text",
|
| 883 |
+
"text": "DOHA (Prabhumoye et al., 2021) equips the BART (Lewis et al., 2020) model with a knowledge-aware attention module, enabling specific attention to the information in the knowledge.",
|
| 884 |
+
"bbox": [
|
| 885 |
+
507,
|
| 886 |
+
543,
|
| 887 |
+
882,
|
| 888 |
+
608
|
| 889 |
+
],
|
| 890 |
+
"page_idx": 4
|
| 891 |
+
},
|
| 892 |
+
{
|
| 893 |
+
"type": "text",
|
| 894 |
+
"text": "CTRL (Rashkin et al., 2021) utilizes control codes to guide the model to generate responses that are more faithful to knowledge. Following (Dziri et al., 2022a), we use T5 as the base model of CTRL.",
|
| 895 |
+
"bbox": [
|
| 896 |
+
507,
|
| 897 |
+
618,
|
| 898 |
+
882,
|
| 899 |
+
697
|
| 900 |
+
],
|
| 901 |
+
"page_idx": 4
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"type": "text",
|
| 905 |
+
"text": "4.3 Evaluation Metrics",
|
| 906 |
+
"text_level": 1,
|
| 907 |
+
"bbox": [
|
| 908 |
+
507,
|
| 909 |
+
709,
|
| 910 |
+
707,
|
| 911 |
+
724
|
| 912 |
+
],
|
| 913 |
+
"page_idx": 4
|
| 914 |
+
},
|
| 915 |
+
{
|
| 916 |
+
"type": "text",
|
| 917 |
+
"text": "We aim to verify the effectiveness of our method in two aspects: fluency and faithfulness. We use both automatic metrics and human evaluations to compare all models.",
|
| 918 |
+
"bbox": [
|
| 919 |
+
507,
|
| 920 |
+
731,
|
| 921 |
+
882,
|
| 922 |
+
796
|
| 923 |
+
],
|
| 924 |
+
"page_idx": 4
|
| 925 |
+
},
|
| 926 |
+
{
|
| 927 |
+
"type": "text",
|
| 928 |
+
"text": "Automatic Metrics We use BLEU (Papineni et al., 2002), ROUGE (Lin, 2004) to evaluate the fluency of the generated responses, which reflect the similarity of the generated responses to the reference responses and both are widely used in text generation evaluation (Dziri et al., 2022a; Zhou et al., 2022). To evaluate the faithfulness",
|
| 929 |
+
"bbox": [
|
| 930 |
+
507,
|
| 931 |
+
806,
|
| 932 |
+
884,
|
| 933 |
+
917
|
| 934 |
+
],
|
| 935 |
+
"page_idx": 4
|
| 936 |
+
},
|
| 937 |
+
{
|
| 938 |
+
"type": "page_footnote",
|
| 939 |
+
"text": "1https://huggingface.co/t5-base",
|
| 940 |
+
"bbox": [
|
| 941 |
+
134,
|
| 942 |
+
903,
|
| 943 |
+
374,
|
| 944 |
+
917
|
| 945 |
+
],
|
| 946 |
+
"page_idx": 4
|
| 947 |
+
},
|
| 948 |
+
{
|
| 949 |
+
"type": "page_number",
|
| 950 |
+
"text": "4558",
|
| 951 |
+
"bbox": [
|
| 952 |
+
480,
|
| 953 |
+
927,
|
| 954 |
+
519,
|
| 955 |
+
940
|
| 956 |
+
],
|
| 957 |
+
"page_idx": 4
|
| 958 |
+
},
|
| 959 |
+
{
|
| 960 |
+
"type": "text",
|
| 961 |
+
"text": "of the generated responses to knowledge, we use BERTScore (Zhang et al., 2019), F1 and $\\mathbf{Q}^2$ (Honovich et al., 2021). BERTScore can measure the semantic similarity of responses to knowledge with sentence embeddings from BERT (Devlin et al., 2019), while F1 measures the lexical overlap between responses and knowledge, and $\\mathbf{Q}^2$ uses an automated question-and-answer technique to evaluate the consistency of responses and knowledge.",
|
| 962 |
+
"bbox": [
|
| 963 |
+
112,
|
| 964 |
+
84,
|
| 965 |
+
489,
|
| 966 |
+
230
|
| 967 |
+
],
|
| 968 |
+
"page_idx": 5
|
| 969 |
+
},
|
| 970 |
+
{
|
| 971 |
+
"type": "text",
|
| 972 |
+
"text": "Human Evaluation To mitigate the unreliability of automatic evaluation, we use more diverse evaluation methods to show the model performance as objectively as possible, and further conduct a human evaluation to verify the effectiveness of our method. We randomly select 100 dialogues from the test set of FaithDial and ask three human evaluators to evaluate. We ask the human evaluators to rate the fluency (Fluency), informativeness (Inform.) and faithfulness (Faithful.) of the generated responses on a 5-point scale, where 1, 3, and 5 indicate unacceptable, moderate, and perfect performance, respectively. Among the metrics, Fluency evaluate the response generation quality, Inform. evaluate the whether the response is safe or vacuous, and Faithful. focus on whether the knowledge used in response is come from the given knowledge, which is stricter than Inform.. We then calculate the average score of the three human evaluators as the final score.",
|
| 973 |
+
"bbox": [
|
| 974 |
+
115,
|
| 975 |
+
237,
|
| 976 |
+
490,
|
| 977 |
+
558
|
| 978 |
+
],
|
| 979 |
+
"page_idx": 5
|
| 980 |
+
},
|
| 981 |
+
{
|
| 982 |
+
"type": "text",
|
| 983 |
+
"text": "5 Results",
|
| 984 |
+
"text_level": 1,
|
| 985 |
+
"bbox": [
|
| 986 |
+
112,
|
| 987 |
+
571,
|
| 988 |
+
213,
|
| 989 |
+
586
|
| 990 |
+
],
|
| 991 |
+
"page_idx": 5
|
| 992 |
+
},
|
| 993 |
+
{
|
| 994 |
+
"type": "text",
|
| 995 |
+
"text": "The results on FaithDial and WoW are shown in Table 1, 2 and 3. As can be seen, FocusL outperforms all baselines in both faithfulness and fluency.",
|
| 996 |
+
"bbox": [
|
| 997 |
+
112,
|
| 998 |
+
596,
|
| 999 |
+
489,
|
| 1000 |
+
645
|
| 1001 |
+
],
|
| 1002 |
+
"page_idx": 5
|
| 1003 |
+
},
|
| 1004 |
+
{
|
| 1005 |
+
"type": "text",
|
| 1006 |
+
"text": "5.1 Automatic Evaluation",
|
| 1007 |
+
"text_level": 1,
|
| 1008 |
+
"bbox": [
|
| 1009 |
+
112,
|
| 1010 |
+
657,
|
| 1011 |
+
332,
|
| 1012 |
+
671
|
| 1013 |
+
],
|
| 1014 |
+
"page_idx": 5
|
| 1015 |
+
},
|
| 1016 |
+
{
|
| 1017 |
+
"type": "text",
|
| 1018 |
+
"text": "FCE vs CE To test the effectiveness of FocusL equipped with FCE, we compare our method with baselines on FaithDial dataset, and report the results in Table 1. We use the test results of baselines from (Dziri et al., 2022a) and keep the same metric calculate method to evaluate FocusL. We can see that our method outperforms the state-of-the-art baselines on all automatic metrics. In particular, FocusL achieves a significant improvement in BERTScore, F1, BLEU, ROUGE, and a large improvement in $Q^2$ F1 and $Q^2$ NLI. We also find that the models based on transformer decoder architecture (GPT2, DIALOGPT) perform worse than the encoder-decoder architecture (T5, CTRL, DOHA). Noticeably, despite the fact that CTRL performs",
|
| 1019 |
+
"bbox": [
|
| 1020 |
+
112,
|
| 1021 |
+
677,
|
| 1022 |
+
489,
|
| 1023 |
+
919
|
| 1024 |
+
],
|
| 1025 |
+
"page_idx": 5
|
| 1026 |
+
},
|
| 1027 |
+
{
|
| 1028 |
+
"type": "image",
|
| 1029 |
+
"img_path": "images/2a7cb25ddeb063dc3f85e75752d57998942d93a9cb7ff75ccaabeb1eb3d964a2.jpg",
|
| 1030 |
+
"image_caption": [
|
| 1031 |
+
"Figure 4: The loss during training. FCE has almost no impact on training stability."
|
| 1032 |
+
],
|
| 1033 |
+
"image_footnote": [],
|
| 1034 |
+
"bbox": [
|
| 1035 |
+
526,
|
| 1036 |
+
90,
|
| 1037 |
+
862,
|
| 1038 |
+
287
|
| 1039 |
+
],
|
| 1040 |
+
"page_idx": 5
|
| 1041 |
+
},
|
| 1042 |
+
{
|
| 1043 |
+
"type": "text",
|
| 1044 |
+
"text": "well in terms of faithfulness, it doesn't improve fluency much. In contrast, FocusL achieves a significant improvement in both faithfulness and fluency. This indicates that FocusL reasonably utilizes knowledge during conversation.",
|
| 1045 |
+
"bbox": [
|
| 1046 |
+
507,
|
| 1047 |
+
362,
|
| 1048 |
+
884,
|
| 1049 |
+
443
|
| 1050 |
+
],
|
| 1051 |
+
"page_idx": 5
|
| 1052 |
+
},
|
| 1053 |
+
{
|
| 1054 |
+
"type": "text",
|
| 1055 |
+
"text": "Moreover, to demonstrate the learning focus of our approach, we analyze the trend of loss during training as shown in Figure 4. FocusL achieves higher performance with nearly the same trend as the original CE loss variation and also shows that our FCE loss does not destabilize training. Compared to the original CE loss, FCE has a higher loss at the beginning of training, which the more significant adjustment of our approach to the learning focus at the beginning of training can explain.",
|
| 1056 |
+
"bbox": [
|
| 1057 |
+
507,
|
| 1058 |
+
444,
|
| 1059 |
+
882,
|
| 1060 |
+
605
|
| 1061 |
+
],
|
| 1062 |
+
"page_idx": 5
|
| 1063 |
+
},
|
| 1064 |
+
{
|
| 1065 |
+
"type": "text",
|
| 1066 |
+
"text": "Robustness to Out-of-Domain Knowledge To evaluate the ability to apply knowledge of out-of-domain, We further test our method on WoW, and report the results in Table 2. We select the baselines which perform well on FaithDial and use T5 as the backbone for comparison. We train the model on the WoW training set and then test it on the two subsets separately. Results show that FocusL outperforms all baselines on faithfulness metrics, significantly improves model's reliability with slightly impact on fluency. It is worth noting that our model improves more significantly in the out-of-domain setting, which indicates that our method is more robust to out-of-domain knowledge.",
|
| 1067 |
+
"bbox": [
|
| 1068 |
+
507,
|
| 1069 |
+
617,
|
| 1070 |
+
882,
|
| 1071 |
+
843
|
| 1072 |
+
],
|
| 1073 |
+
"page_idx": 5
|
| 1074 |
+
},
|
| 1075 |
+
{
|
| 1076 |
+
"type": "text",
|
| 1077 |
+
"text": "Robustness to Data Size In order to verify the learning efficiency of our approach with adjusted learning focus, we also conduct experiments in a low-resource setting. We randomly select 1/2, 1/4,",
|
| 1078 |
+
"bbox": [
|
| 1079 |
+
507,
|
| 1080 |
+
854,
|
| 1081 |
+
884,
|
| 1082 |
+
919
|
| 1083 |
+
],
|
| 1084 |
+
"page_idx": 5
|
| 1085 |
+
},
|
| 1086 |
+
{
|
| 1087 |
+
"type": "page_number",
|
| 1088 |
+
"text": "4559",
|
| 1089 |
+
"bbox": [
|
| 1090 |
+
480,
|
| 1091 |
+
927,
|
| 1092 |
+
519,
|
| 1093 |
+
940
|
| 1094 |
+
],
|
| 1095 |
+
"page_idx": 5
|
| 1096 |
+
},
|
| 1097 |
+
{
|
| 1098 |
+
"type": "table",
|
| 1099 |
+
"img_path": "images/814a9246a47b5ff612558bfa6f27f5726716be0f8dca892f6f00f4e50b9ba3dd.jpg",
|
| 1100 |
+
"table_caption": [],
|
| 1101 |
+
"table_footnote": [],
|
| 1102 |
+
"table_body": "<table><tr><td rowspan=\"2\">Models</td><td colspan=\"4\">Faithfulness</td><td colspan=\"2\">Fluency</td></tr><tr><td>BERTScore</td><td>F1</td><td>Q2 F1</td><td>Q2 NLI</td><td>BLEU</td><td>ROUGE</td></tr><tr><td>GPT2</td><td>0.36</td><td>50.41</td><td>58.4</td><td>69.8</td><td>9.50</td><td>33.43</td></tr><tr><td>DIALOGPT</td><td>0.36</td><td>52.25</td><td>56.5</td><td>66.2</td><td>9.63</td><td>33.13</td></tr><tr><td>DOHA</td><td>0.39</td><td>58.32</td><td>69.1</td><td>78.3</td><td>9.89</td><td>31.78</td></tr><tr><td>T5</td><td>0.41</td><td>59.22</td><td>70.4</td><td>79.5</td><td>10.31</td><td>33.89</td></tr><tr><td>CTRL</td><td>0.46</td><td>62.21</td><td>72.4</td><td>81.5</td><td>10.41</td><td>33.97</td></tr><tr><td>FocusL</td><td>0.50**</td><td>65.07*</td><td>73.25</td><td>82.58</td><td>11.58**</td><td>35.41**</td></tr></table>",
|
| 1103 |
+
"bbox": [
|
| 1104 |
+
191,
|
| 1105 |
+
82,
|
| 1106 |
+
803,
|
| 1107 |
+
233
|
| 1108 |
+
],
|
| 1109 |
+
"page_idx": 6
|
| 1110 |
+
},
|
| 1111 |
+
{
|
| 1112 |
+
"type": "table",
|
| 1113 |
+
"img_path": "images/6a75676b5d2180a15f799b6928c5e2d0e80b976a57f8450756123f8113a9d017.jpg",
|
| 1114 |
+
"table_caption": [
|
| 1115 |
+
"Table 1: Automatic results on FaithDial to evaluate the Faithfulness and Fluency of the generated responses. The best performance are bolded. One \"*\" denotes statistical significant with $p < 0.05$ , and \"**\" denotes significant improvement with $p < 0.01$ ."
|
| 1116 |
+
],
|
| 1117 |
+
"table_footnote": [],
|
| 1118 |
+
"table_body": "<table><tr><td rowspan=\"2\">Test Set Split</td><td rowspan=\"2\">Models</td><td colspan=\"4\">Faithfulness</td><td colspan=\"2\">Fluency</td></tr><tr><td>BERTScore</td><td>F1</td><td>Q2 F1</td><td>Q2 NLI</td><td>BLEU</td><td>ROUGE</td></tr><tr><td rowspan=\"3\">seen topic</td><td>T5</td><td>0.48</td><td>61.88</td><td>69.08</td><td>75.02</td><td>12.44</td><td>32.79</td></tr><tr><td>CTRL</td><td>0.49</td><td>62.99</td><td>70.56</td><td>76.35</td><td>12.61</td><td>33.20</td></tr><tr><td>FocusL</td><td>0.52</td><td>65.25</td><td>71.41</td><td>77.32</td><td>12.63</td><td>32.95</td></tr><tr><td rowspan=\"3\">unseen topic</td><td>T5</td><td>0.47</td><td>60.68</td><td>67.13</td><td>73.09</td><td>12.63</td><td>32.81</td></tr><tr><td>CTRL</td><td>0.46</td><td>59.81</td><td>66.70</td><td>72.59</td><td>12.30</td><td>32.73</td></tr><tr><td>FocusL</td><td>0.51</td><td>63.99</td><td>69.09</td><td>74.97</td><td>12.48</td><td>32.84</td></tr></table>",
|
| 1119 |
+
"bbox": [
|
| 1120 |
+
157,
|
| 1121 |
+
300,
|
| 1122 |
+
838,
|
| 1123 |
+
451
|
| 1124 |
+
],
|
| 1125 |
+
"page_idx": 6
|
| 1126 |
+
},
|
| 1127 |
+
{
|
| 1128 |
+
"type": "text",
|
| 1129 |
+
"text": "1/8, 1/16, and 1/32 of the training data and report the results in Figure 5. We can see that our method has higher faithfulness even with 1/32 training data. Results also show that FocusL has more significant improvement compared with baselines in the low-resource setting, which demonstrates that our approach can learn how to use knowledge more efficiently. The faithfulness of both T5 and CTRL does not change significantly, however their fluency decrease severely, which might be explained by that models tend to copy knowledge and ignore the fluency of the response. In comparison, FocusL can achieve a better trade-off between fluency and faithfulness with limited data.",
|
| 1130 |
+
"bbox": [
|
| 1131 |
+
112,
|
| 1132 |
+
517,
|
| 1133 |
+
487,
|
| 1134 |
+
741
|
| 1135 |
+
],
|
| 1136 |
+
"page_idx": 6
|
| 1137 |
+
},
|
| 1138 |
+
{
|
| 1139 |
+
"type": "text",
|
| 1140 |
+
"text": "Meanwhile the performance of the model should be weakened as the amount of data decreases, however the experimental results do not seem to be what we expected. We can see that all models in the Figure 5 have almost the highest BERTScore at 1/16 data, and our model FocusL even reaches the highest value on the BLEU metric as well. After rigorously repeating the experiment multiple times, the results obtained remain the same. We argue that this may be related to the data distribution",
|
| 1141 |
+
"bbox": [
|
| 1142 |
+
112,
|
| 1143 |
+
758,
|
| 1144 |
+
489,
|
| 1145 |
+
917
|
| 1146 |
+
],
|
| 1147 |
+
"page_idx": 6
|
| 1148 |
+
},
|
| 1149 |
+
{
|
| 1150 |
+
"type": "table",
|
| 1151 |
+
"img_path": "images/ba7ae35ac5fb530211e25ef23f19972806fbfb3eaeb6465419cad670b1fb2d9f.jpg",
|
| 1152 |
+
"table_caption": [
|
| 1153 |
+
"Table 2: Automatic results on WoW to evaluate the Faithfulness and Fluency of the generated responses. The best performance are bolded."
|
| 1154 |
+
],
|
| 1155 |
+
"table_footnote": [],
|
| 1156 |
+
"table_body": "<table><tr><td>Models</td><td>Faithful.</td><td>Fluency</td><td>Inform.</td></tr><tr><td>T5</td><td>2.80</td><td>3.62</td><td>3.23</td></tr><tr><td>CTRL</td><td>2.98</td><td>3.53</td><td>3.14</td></tr><tr><td>FocusL</td><td>3.11*</td><td>3.59</td><td>3.44*</td></tr></table>",
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
531,
|
| 1159 |
+
514,
|
| 1160 |
+
860,
|
| 1161 |
+
596
|
| 1162 |
+
],
|
| 1163 |
+
"page_idx": 6
|
| 1164 |
+
},
|
| 1165 |
+
{
|
| 1166 |
+
"type": "text",
|
| 1167 |
+
"text": "Table 3: Human evaluation on WoW. Bolded numbers indicate the best performance. Numbers marked with * indicate that the improvement is statistically significant (p-value < 0.05).",
|
| 1168 |
+
"bbox": [
|
| 1169 |
+
507,
|
| 1170 |
+
606,
|
| 1171 |
+
882,
|
| 1172 |
+
664
|
| 1173 |
+
],
|
| 1174 |
+
"page_idx": 6
|
| 1175 |
+
},
|
| 1176 |
+
{
|
| 1177 |
+
"type": "text",
|
| 1178 |
+
"text": "characteristics of the dataset and deserves further study.",
|
| 1179 |
+
"bbox": [
|
| 1180 |
+
507,
|
| 1181 |
+
699,
|
| 1182 |
+
880,
|
| 1183 |
+
730
|
| 1184 |
+
],
|
| 1185 |
+
"page_idx": 6
|
| 1186 |
+
},
|
| 1187 |
+
{
|
| 1188 |
+
"type": "text",
|
| 1189 |
+
"text": "5.2 Human Evaluation",
|
| 1190 |
+
"text_level": 1,
|
| 1191 |
+
"bbox": [
|
| 1192 |
+
507,
|
| 1193 |
+
760,
|
| 1194 |
+
705,
|
| 1195 |
+
774
|
| 1196 |
+
],
|
| 1197 |
+
"page_idx": 6
|
| 1198 |
+
},
|
| 1199 |
+
{
|
| 1200 |
+
"type": "text",
|
| 1201 |
+
"text": "In addition to automatic evaluation, we present human evaluation results in Table 3. We choose T5 and CTRL as baselines for comparison. Results show that FocusL receives higher scores on both Faithful. and Inform., and fluency is slightly lower than T5. Overall, our approach can make the model more reliable with almost as much fluency as baselines.",
|
| 1202 |
+
"bbox": [
|
| 1203 |
+
505,
|
| 1204 |
+
790,
|
| 1205 |
+
882,
|
| 1206 |
+
917
|
| 1207 |
+
],
|
| 1208 |
+
"page_idx": 6
|
| 1209 |
+
},
|
| 1210 |
+
{
|
| 1211 |
+
"type": "page_number",
|
| 1212 |
+
"text": "4560",
|
| 1213 |
+
"bbox": [
|
| 1214 |
+
480,
|
| 1215 |
+
928,
|
| 1216 |
+
519,
|
| 1217 |
+
940
|
| 1218 |
+
],
|
| 1219 |
+
"page_idx": 6
|
| 1220 |
+
},
|
| 1221 |
+
{
|
| 1222 |
+
"type": "image",
|
| 1223 |
+
"img_path": "images/fc46d7de17ac82689c5dbe10c99433fd044f779f5acbf56feed2a31db4b3c428.jpg",
|
| 1224 |
+
"image_caption": [
|
| 1225 |
+
"(a)"
|
| 1226 |
+
],
|
| 1227 |
+
"image_footnote": [],
|
| 1228 |
+
"bbox": [
|
| 1229 |
+
122,
|
| 1230 |
+
80,
|
| 1231 |
+
310,
|
| 1232 |
+
165
|
| 1233 |
+
],
|
| 1234 |
+
"page_idx": 7
|
| 1235 |
+
},
|
| 1236 |
+
{
|
| 1237 |
+
"type": "image",
|
| 1238 |
+
"img_path": "images/7d50664e5a8090c085b8c7d39b18a4bb6f7af691fda77197ced6f6272ccc4435.jpg",
|
| 1239 |
+
"image_caption": [
|
| 1240 |
+
"(b)"
|
| 1241 |
+
],
|
| 1242 |
+
"image_footnote": [],
|
| 1243 |
+
"bbox": [
|
| 1244 |
+
315,
|
| 1245 |
+
80,
|
| 1246 |
+
500,
|
| 1247 |
+
164
|
| 1248 |
+
],
|
| 1249 |
+
"page_idx": 7
|
| 1250 |
+
},
|
| 1251 |
+
{
|
| 1252 |
+
"type": "image",
|
| 1253 |
+
"img_path": "images/0883155b9a738bab2a3e74ae43c444e92f735dca475b7532686270185d2f2243.jpg",
|
| 1254 |
+
"image_caption": [
|
| 1255 |
+
"(c)",
|
| 1256 |
+
"Figure 5: Automatic results on WoW with limited training data. (a) and (b) show the results of BERTScore on seen and unseen test set, respectively. (c) and (d) show the results of BLEU on seen and unseen test set, respectively."
|
| 1257 |
+
],
|
| 1258 |
+
"image_footnote": [],
|
| 1259 |
+
"bbox": [
|
| 1260 |
+
502,
|
| 1261 |
+
80,
|
| 1262 |
+
690,
|
| 1263 |
+
165
|
| 1264 |
+
],
|
| 1265 |
+
"page_idx": 7
|
| 1266 |
+
},
|
| 1267 |
+
{
|
| 1268 |
+
"type": "image",
|
| 1269 |
+
"img_path": "images/37cdca860318d4fe5f42d99170a2e155d9e83f8a5d93ea99965279a5e7d5a048.jpg",
|
| 1270 |
+
"image_caption": [
|
| 1271 |
+
"(d)"
|
| 1272 |
+
],
|
| 1273 |
+
"image_footnote": [],
|
| 1274 |
+
"bbox": [
|
| 1275 |
+
695,
|
| 1276 |
+
80,
|
| 1277 |
+
882,
|
| 1278 |
+
165
|
| 1279 |
+
],
|
| 1280 |
+
"page_idx": 7
|
| 1281 |
+
},
|
| 1282 |
+
{
|
| 1283 |
+
"type": "table",
|
| 1284 |
+
"img_path": "images/1709082b640772e39969807671809ba528eee7a4d87554de938ef5e8612ff27a.jpg",
|
| 1285 |
+
"table_caption": [],
|
| 1286 |
+
"table_footnote": [],
|
| 1287 |
+
"table_body": "<table><tr><td>Model</td><td>BERTScore</td><td>F1</td><td>BLEU</td></tr><tr><td>FocusL</td><td>0.51</td><td>66.11</td><td>11.65</td></tr><tr><td>-TW</td><td>0.38</td><td>52.63</td><td>9.10</td></tr><tr><td>-LW</td><td>0.42</td><td>57.86</td><td>11.62</td></tr><tr><td>w/o FCE</td><td>0.40</td><td>57.17</td><td>11.89</td></tr></table>",
|
| 1288 |
+
"bbox": [
|
| 1289 |
+
136,
|
| 1290 |
+
250,
|
| 1291 |
+
463,
|
| 1292 |
+
357
|
| 1293 |
+
],
|
| 1294 |
+
"page_idx": 7
|
| 1295 |
+
},
|
| 1296 |
+
{
|
| 1297 |
+
"type": "table",
|
| 1298 |
+
"img_path": "images/5c91350fcbb8d1c09f32dcaa9c01221a6b0c1384cb0613f665b90b5e47f6aa2d.jpg",
|
| 1299 |
+
"table_caption": [
|
| 1300 |
+
"Table 4: The ablation study of various adjust weight distribution. Bolded numbers indicate the best performance."
|
| 1301 |
+
],
|
| 1302 |
+
"table_footnote": [],
|
| 1303 |
+
"table_body": "<table><tr><td>Model</td><td>BERTScore</td><td>F1</td><td>BLEU</td></tr><tr><td>FocusL</td><td>0.51</td><td>66.11</td><td>11.65</td></tr><tr><td>λ = 0.05</td><td>0.45</td><td>61.51</td><td>11.78</td></tr><tr><td>λ = 0.1</td><td>0.50</td><td>64.55</td><td>11.53</td></tr><tr><td>λ = 0.2</td><td>0.44</td><td>60.88</td><td>12.08</td></tr><tr><td>λ = 0.4</td><td>0.43</td><td>60.19</td><td>12.13</td></tr><tr><td>λ = 0.7</td><td>0.43</td><td>60.81</td><td>12.53</td></tr></table>",
|
| 1304 |
+
"bbox": [
|
| 1305 |
+
136,
|
| 1306 |
+
423,
|
| 1307 |
+
465,
|
| 1308 |
+
560
|
| 1309 |
+
],
|
| 1310 |
+
"page_idx": 7
|
| 1311 |
+
},
|
| 1312 |
+
{
|
| 1313 |
+
"type": "text",
|
| 1314 |
+
"text": "Table 5: The ablation study of various $\\lambda$ for non-linear adjust weight distribution. Bolded numbers indicate the best performance.",
|
| 1315 |
+
"bbox": [
|
| 1316 |
+
112,
|
| 1317 |
+
570,
|
| 1318 |
+
487,
|
| 1319 |
+
613
|
| 1320 |
+
],
|
| 1321 |
+
"page_idx": 7
|
| 1322 |
+
},
|
| 1323 |
+
{
|
| 1324 |
+
"type": "text",
|
| 1325 |
+
"text": "5.3 Ablation Study",
|
| 1326 |
+
"text_level": 1,
|
| 1327 |
+
"bbox": [
|
| 1328 |
+
112,
|
| 1329 |
+
639,
|
| 1330 |
+
278,
|
| 1331 |
+
656
|
| 1332 |
+
],
|
| 1333 |
+
"page_idx": 7
|
| 1334 |
+
},
|
| 1335 |
+
{
|
| 1336 |
+
"type": "text",
|
| 1337 |
+
"text": "Finally, we attempt to study the performances of variation of FCE described in §3.3. Results for different adjust weight distribution are shown in Table 4, and Table 5 is for different $\\lambda$ in non-linear weight. In Table 4, we compared the weight distribution with the threshold (TW), linear weight (LW), non-linear weight (FocusL), and without FCE (w/o FCE). Among them, TW performs the worst, which may be influenced by the threshold. In contrast, the effect of LW is more stable than TW and does not suffer from hyperparameter effects. Even though BLEU is slightly lower than CE, FocusL has significantly improved BERTScore and F1.",
|
| 1338 |
+
"bbox": [
|
| 1339 |
+
112,
|
| 1340 |
+
661,
|
| 1341 |
+
487,
|
| 1342 |
+
869
|
| 1343 |
+
],
|
| 1344 |
+
"page_idx": 7
|
| 1345 |
+
},
|
| 1346 |
+
{
|
| 1347 |
+
"type": "text",
|
| 1348 |
+
"text": "To further study the effect of $\\lambda$ in non-linear weight, we set $\\lambda$ to 0.05, 0.1, 0.2, 0.4, 0.7, and present the results in Table 5. Note that FocusL",
|
| 1349 |
+
"bbox": [
|
| 1350 |
+
112,
|
| 1351 |
+
871,
|
| 1352 |
+
487,
|
| 1353 |
+
917
|
| 1354 |
+
],
|
| 1355 |
+
"page_idx": 7
|
| 1356 |
+
},
|
| 1357 |
+
{
|
| 1358 |
+
"type": "text",
|
| 1359 |
+
"text": "Given Knowledge:",
|
| 1360 |
+
"text_level": 1,
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
526,
|
| 1363 |
+
256,
|
| 1364 |
+
650,
|
| 1365 |
+
269
|
| 1366 |
+
],
|
| 1367 |
+
"page_idx": 7
|
| 1368 |
+
},
|
| 1369 |
+
{
|
| 1370 |
+
"type": "text",
|
| 1371 |
+
"text": "The global presentation of cheerleading was led by the 1997 broadcast of ESPN's International cheerleading competition, and the worldwide release of the 2000 film \"Bring It On\".",
|
| 1372 |
+
"bbox": [
|
| 1373 |
+
526,
|
| 1374 |
+
269,
|
| 1375 |
+
863,
|
| 1376 |
+
324
|
| 1377 |
+
],
|
| 1378 |
+
"page_idx": 7
|
| 1379 |
+
},
|
| 1380 |
+
{
|
| 1381 |
+
"type": "text",
|
| 1382 |
+
"text": "Context:",
|
| 1383 |
+
"text_level": 1,
|
| 1384 |
+
"bbox": [
|
| 1385 |
+
527,
|
| 1386 |
+
326,
|
| 1387 |
+
588,
|
| 1388 |
+
336
|
| 1389 |
+
],
|
| 1390 |
+
"page_idx": 7
|
| 1391 |
+
},
|
| 1392 |
+
{
|
| 1393 |
+
"type": "text",
|
| 1394 |
+
"text": "<user> She has done a lot of dance and tumbling already. \nShe will try it out and see what works best for her.",
|
| 1395 |
+
"bbox": [
|
| 1396 |
+
527,
|
| 1397 |
+
337,
|
| 1398 |
+
865,
|
| 1399 |
+
361
|
| 1400 |
+
],
|
| 1401 |
+
"page_idx": 7
|
| 1402 |
+
},
|
| 1403 |
+
{
|
| 1404 |
+
"type": "text",
|
| 1405 |
+
"text": "<bot> Got it, are you from the United States? Cheerleading is an activity that originated there, it is also predominantly in America.",
|
| 1406 |
+
"bbox": [
|
| 1407 |
+
527,
|
| 1408 |
+
361,
|
| 1409 |
+
863,
|
| 1410 |
+
395
|
| 1411 |
+
],
|
| 1412 |
+
"page_idx": 7
|
| 1413 |
+
},
|
| 1414 |
+
{
|
| 1415 |
+
"type": "text",
|
| 1416 |
+
"text": "<user> Yes we are, she wants to be a cheerleader since she was a little kid, I am sure she will be fine. I could see her going on to do it in college as well.",
|
| 1417 |
+
"bbox": [
|
| 1418 |
+
527,
|
| 1419 |
+
395,
|
| 1420 |
+
863,
|
| 1421 |
+
432
|
| 1422 |
+
],
|
| 1423 |
+
"page_idx": 7
|
| 1424 |
+
},
|
| 1425 |
+
{
|
| 1426 |
+
"type": "text",
|
| 1427 |
+
"text": "Gold Response: Nice, have you watched the film Bring It on ? it is from 2000 .",
|
| 1428 |
+
"bbox": [
|
| 1429 |
+
527,
|
| 1430 |
+
432,
|
| 1431 |
+
863,
|
| 1432 |
+
458
|
| 1433 |
+
],
|
| 1434 |
+
"page_idx": 7
|
| 1435 |
+
},
|
| 1436 |
+
{
|
| 1437 |
+
"type": "text",
|
| 1438 |
+
"text": "T5: I see, did you know that the 1998 televised ESPN's International cheerleading competition led to the global presentation of cheerleading? That's interesting.",
|
| 1439 |
+
"bbox": [
|
| 1440 |
+
527,
|
| 1441 |
+
463,
|
| 1442 |
+
863,
|
| 1443 |
+
511
|
| 1444 |
+
],
|
| 1445 |
+
"page_idx": 7
|
| 1446 |
+
},
|
| 1447 |
+
{
|
| 1448 |
+
"type": "text",
|
| 1449 |
+
"text": "CTRL: Yes, the world presented cheerleading in 1997",
|
| 1450 |
+
"bbox": [
|
| 1451 |
+
527,
|
| 1452 |
+
513,
|
| 1453 |
+
863,
|
| 1454 |
+
527
|
| 1455 |
+
],
|
| 1456 |
+
"page_idx": 7
|
| 1457 |
+
},
|
| 1458 |
+
{
|
| 1459 |
+
"type": "text",
|
| 1460 |
+
"text": "FocusL: I see, did you know that the movie Bring It On was released in 2000?",
|
| 1461 |
+
"bbox": [
|
| 1462 |
+
527,
|
| 1463 |
+
527,
|
| 1464 |
+
863,
|
| 1465 |
+
556
|
| 1466 |
+
],
|
| 1467 |
+
"page_idx": 7
|
| 1468 |
+
},
|
| 1469 |
+
{
|
| 1470 |
+
"type": "text",
|
| 1471 |
+
"text": "Table 6: An example case from FaithDial.",
|
| 1472 |
+
"bbox": [
|
| 1473 |
+
551,
|
| 1474 |
+
573,
|
| 1475 |
+
836,
|
| 1476 |
+
586
|
| 1477 |
+
],
|
| 1478 |
+
"page_idx": 7
|
| 1479 |
+
},
|
| 1480 |
+
{
|
| 1481 |
+
"type": "text",
|
| 1482 |
+
"text": "uses $\\lambda = 0.01$ in the experiments. As the $\\lambda$ increases, faithfulness metrics of the model gradually decrease, and fluency metrics gradually increase. This indicates that smaller $\\lambda$ with steeper weight distribution makes the model more sensitive to knowledge-aware tokens' losses, which increases the accuracy of knowledge utilization. In contrast, larger $\\lambda$ with smoother weight distribution makes the model focus on the quality of the response.",
|
| 1483 |
+
"bbox": [
|
| 1484 |
+
507,
|
| 1485 |
+
613,
|
| 1486 |
+
882,
|
| 1487 |
+
758
|
| 1488 |
+
],
|
| 1489 |
+
"page_idx": 7
|
| 1490 |
+
},
|
| 1491 |
+
{
|
| 1492 |
+
"type": "text",
|
| 1493 |
+
"text": "5.4 Case Study",
|
| 1494 |
+
"text_level": 1,
|
| 1495 |
+
"bbox": [
|
| 1496 |
+
507,
|
| 1497 |
+
769,
|
| 1498 |
+
643,
|
| 1499 |
+
784
|
| 1500 |
+
],
|
| 1501 |
+
"page_idx": 7
|
| 1502 |
+
},
|
| 1503 |
+
{
|
| 1504 |
+
"type": "text",
|
| 1505 |
+
"text": "To better illustrate the advantage of our approach, we present an example case in Table 6. We randomly select one dialogue from the test set of FaithDial, and compare the responses generated by T5, CTRL, and FocusL. It can be observed that the response generated by T5 uses a wrong year \"1998\" while the given knowledge is about \"1997\", and its causality also cannot be inferred from the",
|
| 1506 |
+
"bbox": [
|
| 1507 |
+
507,
|
| 1508 |
+
790,
|
| 1509 |
+
882,
|
| 1510 |
+
917
|
| 1511 |
+
],
|
| 1512 |
+
"page_idx": 7
|
| 1513 |
+
},
|
| 1514 |
+
{
|
| 1515 |
+
"type": "page_number",
|
| 1516 |
+
"text": "4561",
|
| 1517 |
+
"bbox": [
|
| 1518 |
+
480,
|
| 1519 |
+
928,
|
| 1520 |
+
517,
|
| 1521 |
+
940
|
| 1522 |
+
],
|
| 1523 |
+
"page_idx": 7
|
| 1524 |
+
},
|
| 1525 |
+
{
|
| 1526 |
+
"type": "text",
|
| 1527 |
+
"text": "given knowledge. CTRL misunderstands the given knowledge and ignores the impact of \"Bring It On\" on the global presentation of cheerleading. In contrast, FocusL can generate a response more related to the given knowledge and closest to the gold response, which contains all knowledge entities in the gold response.",
|
| 1528 |
+
"bbox": [
|
| 1529 |
+
112,
|
| 1530 |
+
84,
|
| 1531 |
+
490,
|
| 1532 |
+
197
|
| 1533 |
+
],
|
| 1534 |
+
"page_idx": 8
|
| 1535 |
+
},
|
| 1536 |
+
{
|
| 1537 |
+
"type": "text",
|
| 1538 |
+
"text": "6 Conclusion",
|
| 1539 |
+
"text_level": 1,
|
| 1540 |
+
"bbox": [
|
| 1541 |
+
112,
|
| 1542 |
+
211,
|
| 1543 |
+
247,
|
| 1544 |
+
227
|
| 1545 |
+
],
|
| 1546 |
+
"page_idx": 8
|
| 1547 |
+
},
|
| 1548 |
+
{
|
| 1549 |
+
"type": "text",
|
| 1550 |
+
"text": "In this paper, we propose a novel learning approach with more direct guidance on the training process to improve the faithfulness of knowledge-grounded dialogue systems, referred to as FocusL. By leveraging semantic relevance between the response and knowledge, FocusL correct the model's learning focus, leading to more consistent and fluent response generation. We empirically show that our approach has the best performance with a stable training process and is robust to data size and out-of-domain knowledge. FocusL is simple yet effective and can achieve state-of-the-art results in two knowledge-grounded datasets.",
|
| 1551 |
+
"bbox": [
|
| 1552 |
+
112,
|
| 1553 |
+
239,
|
| 1554 |
+
489,
|
| 1555 |
+
448
|
| 1556 |
+
],
|
| 1557 |
+
"page_idx": 8
|
| 1558 |
+
},
|
| 1559 |
+
{
|
| 1560 |
+
"type": "text",
|
| 1561 |
+
"text": "Limitations",
|
| 1562 |
+
"text_level": 1,
|
| 1563 |
+
"bbox": [
|
| 1564 |
+
114,
|
| 1565 |
+
463,
|
| 1566 |
+
220,
|
| 1567 |
+
478
|
| 1568 |
+
],
|
| 1569 |
+
"page_idx": 8
|
| 1570 |
+
},
|
| 1571 |
+
{
|
| 1572 |
+
"type": "text",
|
| 1573 |
+
"text": "As we have shown, there is much room to improve the learning approach, which incur lower costs than increasing model's parameters or elaborate data engineering. This paper is an exercise in guiding learning focus, and we argue that FocusL is not perfect for the positioning method and the relevance-to-weight transformation method. For example, our positioning method may contain noise, and some words that are not important in given knowledge may be used as our learning focus. We will continue to explore better methods to guide the model's learning focus. Meanwhile, our method only experiments on the basic cross-entropy loss, and still needs to be explored for other learning approaches such as contrastive learning.",
|
| 1574 |
+
"bbox": [
|
| 1575 |
+
112,
|
| 1576 |
+
491,
|
| 1577 |
+
489,
|
| 1578 |
+
731
|
| 1579 |
+
],
|
| 1580 |
+
"page_idx": 8
|
| 1581 |
+
},
|
| 1582 |
+
{
|
| 1583 |
+
"type": "text",
|
| 1584 |
+
"text": "Ethics Statement",
|
| 1585 |
+
"text_level": 1,
|
| 1586 |
+
"bbox": [
|
| 1587 |
+
114,
|
| 1588 |
+
746,
|
| 1589 |
+
265,
|
| 1590 |
+
762
|
| 1591 |
+
],
|
| 1592 |
+
"page_idx": 8
|
| 1593 |
+
},
|
| 1594 |
+
{
|
| 1595 |
+
"type": "text",
|
| 1596 |
+
"text": "FocusL aims to to convey correct knowledge to users rather than misleading hallucinations. We hope to see a reliable and trustworthy dialogue system impact from better guiding the model's learning focus. However, even if the dialogue system does not produce hallucinations, there is still a risk of potential misuse. For example, the dialogue systems may be used to spread misinformation or to mislead users. If possible, we would prefer that the",
|
| 1597 |
+
"bbox": [
|
| 1598 |
+
112,
|
| 1599 |
+
774,
|
| 1600 |
+
489,
|
| 1601 |
+
917
|
| 1602 |
+
],
|
| 1603 |
+
"page_idx": 8
|
| 1604 |
+
},
|
| 1605 |
+
{
|
| 1606 |
+
"type": "text",
|
| 1607 |
+
"text": "model itself has the ability to identify undesirable knowledge and block it.",
|
| 1608 |
+
"bbox": [
|
| 1609 |
+
507,
|
| 1610 |
+
84,
|
| 1611 |
+
882,
|
| 1612 |
+
116
|
| 1613 |
+
],
|
| 1614 |
+
"page_idx": 8
|
| 1615 |
+
},
|
| 1616 |
+
{
|
| 1617 |
+
"type": "text",
|
| 1618 |
+
"text": "Acknowledgements",
|
| 1619 |
+
"text_level": 1,
|
| 1620 |
+
"bbox": [
|
| 1621 |
+
509,
|
| 1622 |
+
127,
|
| 1623 |
+
682,
|
| 1624 |
+
143
|
| 1625 |
+
],
|
| 1626 |
+
"page_idx": 8
|
| 1627 |
+
},
|
| 1628 |
+
{
|
| 1629 |
+
"type": "text",
|
| 1630 |
+
"text": "This work is supported by the National Natural Science Foundation of China (Grant No.U21B2009). This research is also supported by the Strategic Priority Research Program of Chinese Academy of Science, Grant No.XDC02030400.",
|
| 1631 |
+
"bbox": [
|
| 1632 |
+
507,
|
| 1633 |
+
151,
|
| 1634 |
+
885,
|
| 1635 |
+
231
|
| 1636 |
+
],
|
| 1637 |
+
"page_idx": 8
|
| 1638 |
+
},
|
| 1639 |
+
{
|
| 1640 |
+
"type": "text",
|
| 1641 |
+
"text": "References",
|
| 1642 |
+
"text_level": 1,
|
| 1643 |
+
"bbox": [
|
| 1644 |
+
510,
|
| 1645 |
+
256,
|
| 1646 |
+
608,
|
| 1647 |
+
272
|
| 1648 |
+
],
|
| 1649 |
+
"page_idx": 8
|
| 1650 |
+
},
|
| 1651 |
+
{
|
| 1652 |
+
"type": "list",
|
| 1653 |
+
"sub_type": "ref_text",
|
| 1654 |
+
"list_items": [
|
| 1655 |
+
"Kurt Bollacker, Colin Evans, Praveen Paritosh, Tim Sturge, and Jamie Taylor. 2008. Freebase: a collaboratively created graph database for structuring human knowledge. In Proceedings of the 2008 ACM SIGMOD international conference on Management of data, pages 1247-1250.",
|
| 1656 |
+
"Yuanyuan Cai, Min Zuo, Qingchuan Zhang, Haitao Xiong, and Ke Li. 2020. A bichannel transformer with context encoding for document-driven conversation generation in social media. Complex., 2020:3710104:1-3710104:13.",
|
| 1657 |
+
"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.",
|
| 1658 |
+
"Emily Dinan, Stephen Roller, Kurt Shuster, Angela Fan, Michael Auli, and Jason Weston. 2019. Wizard of wikipedia: Knowledge-powered conversational agents. In International Conference on Learning Representations.",
|
| 1659 |
+
"Nouha Dziri, Ehsan Kamalloo, Sivan Milton, Osmar Zaiane, Mo Yu, Edoardo Ponti, and Siva Reddy. 2022a. Faithdial: A faithful benchmark for information-seeking dialogue. arXiv preprint, arXiv:2204.10757.",
|
| 1660 |
+
"Nouha Dziri, Andrea Madotto, Osmar Zaïane, and Avishek Joey Bose. 2021. Neural path hunter: Reducing hallucination in dialogue systems via path grounding. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 2197-2214, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.",
|
| 1661 |
+
"Nouha Dziri, Sivan Milton, Mo Yu, Osmar Zaiane, and Siva Reddy. 2022b. On the origin of hallucinations in conversational models: Is it the datasets or the models? In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5271-5285, Seattle, United States. Association for Computational Linguistics."
|
| 1662 |
+
],
|
| 1663 |
+
"bbox": [
|
| 1664 |
+
509,
|
| 1665 |
+
279,
|
| 1666 |
+
885,
|
| 1667 |
+
919
|
| 1668 |
+
],
|
| 1669 |
+
"page_idx": 8
|
| 1670 |
+
},
|
| 1671 |
+
{
|
| 1672 |
+
"type": "page_number",
|
| 1673 |
+
"text": "4562",
|
| 1674 |
+
"bbox": [
|
| 1675 |
+
480,
|
| 1676 |
+
928,
|
| 1677 |
+
519,
|
| 1678 |
+
940
|
| 1679 |
+
],
|
| 1680 |
+
"page_idx": 8
|
| 1681 |
+
},
|
| 1682 |
+
{
|
| 1683 |
+
"type": "list",
|
| 1684 |
+
"sub_type": "ref_text",
|
| 1685 |
+
"list_items": [
|
| 1686 |
+
"Marjan Ghazvininejad, Chris Brockett, Ming-Wei Chang, William B. Dolan, Jianfeng Gao, Wen tau Yih, and Michel Galley. 2017. A knowledge-grounded neural conversation model. ArXiv, abs/1702.01932.",
|
| 1687 |
+
"Or Honovich, Leshem Choshen, Roee Aharoni, Ella Neeman, Idan Szpektor, and Omri Abend. 2021. $q^2$ : Evaluating factual consistency in knowledge-grounded dialogues via question generation and question answering. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7856-7870, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.",
|
| 1688 |
+
"Minki Kang, Jin Myung Kwak, Jinheon Baek, and Sung Ju Hwang. 2022. Knowledge-consistent dialogue generation with knowledge graphs. In ICML 2022 Workshop on Knowledge Retrieval and Language Models.",
|
| 1689 |
+
"Byeongchang Kim, Jaewoo Ahn, and Gunhee Kim. 2020. Sequential latent knowledge selection for knowledge-grounded dialogue. In International Conference on Learning Representations.",
|
| 1690 |
+
"Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.",
|
| 1691 |
+
"Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016. A diversity-promoting objective function for neural conversation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 110-119, San Diego, California. Association for Computational Linguistics.",
|
| 1692 |
+
"Sha Li, Mahdi Namazifar, Di Jin, Mohit Bansal, Heng Ji, Yang Liu, and Dilek Hakkani-Tur. 2022. Enhancing knowledge selection for grounded dialogues via document semantic graphs. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2810-2823, Seattle, United States. Association for Computational Linguistics.",
|
| 1693 |
+
"Rongzhong Lian, Min Xie, Fan Wang, Jinhua Peng, and Hua Wu. 2019. Learning to select knowledge for response generation in dialog systems. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI-19, pages 5081-5087. International Joint Conferences on Artificial Intelligence Organization.",
|
| 1694 |
+
"Chin-Yew Lin. 2004. ROUGE: A package for automatic evaluation of summaries. In Text Summariza"
|
| 1695 |
+
],
|
| 1696 |
+
"bbox": [
|
| 1697 |
+
115,
|
| 1698 |
+
85,
|
| 1699 |
+
485,
|
| 1700 |
+
917
|
| 1701 |
+
],
|
| 1702 |
+
"page_idx": 9
|
| 1703 |
+
},
|
| 1704 |
+
{
|
| 1705 |
+
"type": "list",
|
| 1706 |
+
"sub_type": "ref_text",
|
| 1707 |
+
"list_items": [
|
| 1708 |
+
"tion Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.",
|
| 1709 |
+
"Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In International Conference on Learning Representations.",
|
| 1710 |
+
"Longxuan Ma, Wei-Nan Zhang, Runxin Sun, and Ting Liu. 2020. A compare aggregate transformer for understanding document-grounded dialogue. In *Findings of the Association for Computational Linguistics: EMNLP* 2020, pages 1358-1367, Online. Association for Computational Linguistics.",
|
| 1711 |
+
"Chuan Meng, Pengjie Ren, Zhumin Chen, Weiwei Sun, Zhaochun Ren, Zhaopeng Tu, and M. de Rijke. 2020. Dukenet: A dual knowledge interaction network for knowledge-grounded conversation. Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval.",
|
| 1712 |
+
"Nikita Moghe, Siddharth Arora, Suman Banerjee, and Mitesh M. Khapra. 2018. Towards exploiting background knowledge for building conversation systems. In Conference on Empirical Methods in Natural Language Processing.",
|
| 1713 |
+
"Seungwhan Moon, Pararth Shah, Anuj Kumar, and Rajen Subba. 2019. Opendialkg: Explanable conversational reasoning with attention-based walks over knowledge graphs. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 845-854.",
|
| 1714 |
+
"Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Annual Meeting of the Association for Computational Linguistics.",
|
| 1715 |
+
"Wei Peng, Yue Hu, Luxi Xing, Yuqiang Xie, Yajing Sun, and Yunpeng Li. 2022. Control globally, understand locally: A global-to-local hierarchical graph network for emotional support conversation. In Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI 2022, Vienna, Austria, 23-29 July 2022, pages 4324-4330. ijcai.org.",
|
| 1716 |
+
"Shrimai Prabhumoye, Kazuma Hashimoto, Yingbo Zhou, Alan W Black, and Ruslan Salakhutdinov. 2021. Focused attention improves document-grounded generation. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4274-4287, Online. Association for Computational Linguistics.",
|
| 1717 |
+
"Alec Radford, Jeff Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners.",
|
| 1718 |
+
"Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67."
|
| 1719 |
+
],
|
| 1720 |
+
"bbox": [
|
| 1721 |
+
510,
|
| 1722 |
+
85,
|
| 1723 |
+
880,
|
| 1724 |
+
917
|
| 1725 |
+
],
|
| 1726 |
+
"page_idx": 9
|
| 1727 |
+
},
|
| 1728 |
+
{
|
| 1729 |
+
"type": "page_number",
|
| 1730 |
+
"text": "4563",
|
| 1731 |
+
"bbox": [
|
| 1732 |
+
480,
|
| 1733 |
+
928,
|
| 1734 |
+
519,
|
| 1735 |
+
940
|
| 1736 |
+
],
|
| 1737 |
+
"page_idx": 9
|
| 1738 |
+
},
|
| 1739 |
+
{
|
| 1740 |
+
"type": "list",
|
| 1741 |
+
"sub_type": "ref_text",
|
| 1742 |
+
"list_items": [
|
| 1743 |
+
"Hannah Rashkin, David Reitter, Gaurav Singh Tomar, and Dipanjan Das. 2021. Increasing faithfulness in knowledge-grounded dialogue with controllable features. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 704-718, Online. Association for Computational Linguistics.",
|
| 1744 |
+
"Weiwei Sun, Zhengliang Shi, Shen Gao, Pengjie Ren, M. de Rijke, and Zhaochun Ren. 2022. Contrastive learning reduces hallucination in conversations. ArXiv, abs/2212.10400.",
|
| 1745 |
+
"Ashish Vaswani, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. ArXiv, abs/1706.03762.",
|
| 1746 |
+
"Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.",
|
| 1747 |
+
"Jun Xu, Haifeng Wang, Zheng-Yu Niu, Hua Wu, Wanxiang Che, and Ting Liu. 2020. Conversational graph grounded policy learning for open-domain conversation generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1835-1845, Online. Association for Computational Linguistics.",
|
| 1748 |
+
"Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2019. *Bertscore: Evaluating text generation with bert.* ArXiv, abs/1904.09675.",
|
| 1749 |
+
"Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, and Bill Dolan. 2020. DIALOGPT: Large-scale generative pre-training for conversational response generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 270-278, Online. Association for Computational Linguistics.",
|
| 1750 |
+
"Xueliang Zhao, Chongyang Tao, Wei Wu, Can Xu, Dongyan Zhao, and Rui Yan. 2019. A document-grounded matching network for response selection in retrieval-based chatbots. In International Joint Conference on Artificial Intelligence.",
|
| 1751 |
+
"Xueliang Zhao, Wei Wu, Chongyang Tao, Can Xu, Dongyan Zhao, and Rui Yan. 2020. Low-resource knowledge-grounded dialogue generation. In International Conference on Learning Representations."
|
| 1752 |
+
],
|
| 1753 |
+
"bbox": [
|
| 1754 |
+
115,
|
| 1755 |
+
85,
|
| 1756 |
+
489,
|
| 1757 |
+
917
|
| 1758 |
+
],
|
| 1759 |
+
"page_idx": 10
|
| 1760 |
+
},
|
| 1761 |
+
{
|
| 1762 |
+
"type": "list",
|
| 1763 |
+
"sub_type": "ref_text",
|
| 1764 |
+
"list_items": [
|
| 1765 |
+
"Chujie Zheng, Yunbo Cao, Daxin Jiang, and Minlie Huang. 2020. Difference-aware knowledge selection for knowledge-grounded conversation generation. In *Findings of the Association for Computational Linguistics: EMNLP* 2020, pages 115–125, Online. Association for Computational Linguistics.",
|
| 1766 |
+
"Hao Zhou, Chujie Zheng, Kaili Huang, Minlie Huang, and Xiaoyan Zhu. 2020. KdConv: A Chinese multi-domain dialogue dataset towards multi-turn knowledge-driven conversation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7098-7108, Online. Association for Computational Linguistics.",
|
| 1767 |
+
"Kangyan Zhou, Shrimai Prabhumoye, and Alan W Black. 2018. A dataset for document grounded conversations. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 708-713, Brussels, Belgium. Association for Computational Linguistics.",
|
| 1768 |
+
"Pei Zhou, Karthik Gopalakrishnan, Behnam Hedayatnia, Seokhwan Kim, Jay Pujara, Xiang Ren, Yang Liu, and Dilek Hakkani-Tur. 2022. Think before you speak: Explicitly generating implicit commonsense knowledge for response generation. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1237-1252, Dublin, Ireland. Association for Computational Linguistics."
|
| 1769 |
+
],
|
| 1770 |
+
"bbox": [
|
| 1771 |
+
510,
|
| 1772 |
+
85,
|
| 1773 |
+
882,
|
| 1774 |
+
482
|
| 1775 |
+
],
|
| 1776 |
+
"page_idx": 10
|
| 1777 |
+
},
|
| 1778 |
+
{
|
| 1779 |
+
"type": "page_number",
|
| 1780 |
+
"text": "4564",
|
| 1781 |
+
"bbox": [
|
| 1782 |
+
480,
|
| 1783 |
+
928,
|
| 1784 |
+
519,
|
| 1785 |
+
940
|
| 1786 |
+
],
|
| 1787 |
+
"page_idx": 10
|
| 1788 |
+
},
|
| 1789 |
+
{
|
| 1790 |
+
"type": "text",
|
| 1791 |
+
"text": "A For every submission:",
|
| 1792 |
+
"text_level": 1,
|
| 1793 |
+
"bbox": [
|
| 1794 |
+
114,
|
| 1795 |
+
107,
|
| 1796 |
+
322,
|
| 1797 |
+
122
|
| 1798 |
+
],
|
| 1799 |
+
"page_idx": 11
|
| 1800 |
+
},
|
| 1801 |
+
{
|
| 1802 |
+
"type": "text",
|
| 1803 |
+
"text": "A1. Did you describe the limitations of your work?",
|
| 1804 |
+
"bbox": [
|
| 1805 |
+
129,
|
| 1806 |
+
126,
|
| 1807 |
+
532,
|
| 1808 |
+
143
|
| 1809 |
+
],
|
| 1810 |
+
"page_idx": 11
|
| 1811 |
+
},
|
| 1812 |
+
{
|
| 1813 |
+
"type": "text",
|
| 1814 |
+
"text": "We discuss the limitations in the \"Limitations\" Section before \"Ethics Statement\" Section.",
|
| 1815 |
+
"bbox": [
|
| 1816 |
+
147,
|
| 1817 |
+
143,
|
| 1818 |
+
806,
|
| 1819 |
+
159
|
| 1820 |
+
],
|
| 1821 |
+
"page_idx": 11
|
| 1822 |
+
},
|
| 1823 |
+
{
|
| 1824 |
+
"type": "text",
|
| 1825 |
+
"text": "A2. Did you discuss any potential risks of your work?",
|
| 1826 |
+
"bbox": [
|
| 1827 |
+
129,
|
| 1828 |
+
168,
|
| 1829 |
+
552,
|
| 1830 |
+
186
|
| 1831 |
+
],
|
| 1832 |
+
"page_idx": 11
|
| 1833 |
+
},
|
| 1834 |
+
{
|
| 1835 |
+
"type": "text",
|
| 1836 |
+
"text": "We discuss potential risks in the \"Ethics Statement\" Section at the end of paper.",
|
| 1837 |
+
"bbox": [
|
| 1838 |
+
147,
|
| 1839 |
+
186,
|
| 1840 |
+
726,
|
| 1841 |
+
202
|
| 1842 |
+
],
|
| 1843 |
+
"page_idx": 11
|
| 1844 |
+
},
|
| 1845 |
+
{
|
| 1846 |
+
"type": "text",
|
| 1847 |
+
"text": "A3. Do the abstract and introduction summarize the paper's main claims?",
|
| 1848 |
+
"bbox": [
|
| 1849 |
+
129,
|
| 1850 |
+
212,
|
| 1851 |
+
695,
|
| 1852 |
+
228
|
| 1853 |
+
],
|
| 1854 |
+
"page_idx": 11
|
| 1855 |
+
},
|
| 1856 |
+
{
|
| 1857 |
+
"type": "text",
|
| 1858 |
+
"text": "Abstract is in the \"Abstract\" section, and introduction is in section 1.",
|
| 1859 |
+
"bbox": [
|
| 1860 |
+
147,
|
| 1861 |
+
229,
|
| 1862 |
+
658,
|
| 1863 |
+
244
|
| 1864 |
+
],
|
| 1865 |
+
"page_idx": 11
|
| 1866 |
+
},
|
| 1867 |
+
{
|
| 1868 |
+
"type": "text",
|
| 1869 |
+
"text": "A4. Have you used AI writing assistants when working on this paper?",
|
| 1870 |
+
"bbox": [
|
| 1871 |
+
129,
|
| 1872 |
+
255,
|
| 1873 |
+
668,
|
| 1874 |
+
272
|
| 1875 |
+
],
|
| 1876 |
+
"page_idx": 11
|
| 1877 |
+
},
|
| 1878 |
+
{
|
| 1879 |
+
"type": "text",
|
| 1880 |
+
"text": "Left blank.",
|
| 1881 |
+
"bbox": [
|
| 1882 |
+
149,
|
| 1883 |
+
273,
|
| 1884 |
+
231,
|
| 1885 |
+
288
|
| 1886 |
+
],
|
| 1887 |
+
"page_idx": 11
|
| 1888 |
+
},
|
| 1889 |
+
{
|
| 1890 |
+
"type": "text",
|
| 1891 |
+
"text": "B Did you use or create scientific artifacts?",
|
| 1892 |
+
"text_level": 1,
|
| 1893 |
+
"bbox": [
|
| 1894 |
+
114,
|
| 1895 |
+
299,
|
| 1896 |
+
487,
|
| 1897 |
+
316
|
| 1898 |
+
],
|
| 1899 |
+
"page_idx": 11
|
| 1900 |
+
},
|
| 1901 |
+
{
|
| 1902 |
+
"type": "text",
|
| 1903 |
+
"text": "We show the artifacts in the section 4",
|
| 1904 |
+
"bbox": [
|
| 1905 |
+
132,
|
| 1906 |
+
321,
|
| 1907 |
+
411,
|
| 1908 |
+
336
|
| 1909 |
+
],
|
| 1910 |
+
"page_idx": 11
|
| 1911 |
+
},
|
| 1912 |
+
{
|
| 1913 |
+
"type": "text",
|
| 1914 |
+
"text": "B1. Did you cite the creators of artifacts you used?",
|
| 1915 |
+
"bbox": [
|
| 1916 |
+
129,
|
| 1917 |
+
346,
|
| 1918 |
+
529,
|
| 1919 |
+
363
|
| 1920 |
+
],
|
| 1921 |
+
"page_idx": 11
|
| 1922 |
+
},
|
| 1923 |
+
{
|
| 1924 |
+
"type": "text",
|
| 1925 |
+
"text": "We cite the creators in the section 4",
|
| 1926 |
+
"bbox": [
|
| 1927 |
+
149,
|
| 1928 |
+
363,
|
| 1929 |
+
415,
|
| 1930 |
+
378
|
| 1931 |
+
],
|
| 1932 |
+
"page_idx": 11
|
| 1933 |
+
},
|
| 1934 |
+
{
|
| 1935 |
+
"type": "text",
|
| 1936 |
+
"text": "B2. Did you discuss the license or terms for use and / or distribution of any artifacts?",
|
| 1937 |
+
"bbox": [
|
| 1938 |
+
127,
|
| 1939 |
+
390,
|
| 1940 |
+
778,
|
| 1941 |
+
406
|
| 1942 |
+
],
|
| 1943 |
+
"page_idx": 11
|
| 1944 |
+
},
|
| 1945 |
+
{
|
| 1946 |
+
"type": "text",
|
| 1947 |
+
"text": "Not applicable. Left blank.",
|
| 1948 |
+
"bbox": [
|
| 1949 |
+
149,
|
| 1950 |
+
407,
|
| 1951 |
+
349,
|
| 1952 |
+
422
|
| 1953 |
+
],
|
| 1954 |
+
"page_idx": 11
|
| 1955 |
+
},
|
| 1956 |
+
{
|
| 1957 |
+
"type": "text",
|
| 1958 |
+
"text": "B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)?",
|
| 1959 |
+
"bbox": [
|
| 1960 |
+
127,
|
| 1961 |
+
432,
|
| 1962 |
+
880,
|
| 1963 |
+
495
|
| 1964 |
+
],
|
| 1965 |
+
"page_idx": 11
|
| 1966 |
+
},
|
| 1967 |
+
{
|
| 1968 |
+
"type": "text",
|
| 1969 |
+
"text": "Not applicable. Left blank.",
|
| 1970 |
+
"bbox": [
|
| 1971 |
+
149,
|
| 1972 |
+
497,
|
| 1973 |
+
349,
|
| 1974 |
+
513
|
| 1975 |
+
],
|
| 1976 |
+
"page_idx": 11
|
| 1977 |
+
},
|
| 1978 |
+
{
|
| 1979 |
+
"type": "text",
|
| 1980 |
+
"text": "B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it?",
|
| 1981 |
+
"bbox": [
|
| 1982 |
+
127,
|
| 1983 |
+
524,
|
| 1984 |
+
880,
|
| 1985 |
+
571
|
| 1986 |
+
],
|
| 1987 |
+
"page_idx": 11
|
| 1988 |
+
},
|
| 1989 |
+
{
|
| 1990 |
+
"type": "text",
|
| 1991 |
+
"text": "Not applicable. Left blank.",
|
| 1992 |
+
"bbox": [
|
| 1993 |
+
149,
|
| 1994 |
+
573,
|
| 1995 |
+
349,
|
| 1996 |
+
588
|
| 1997 |
+
],
|
| 1998 |
+
"page_idx": 11
|
| 1999 |
+
},
|
| 2000 |
+
{
|
| 2001 |
+
"type": "text",
|
| 2002 |
+
"text": "B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.?",
|
| 2003 |
+
"bbox": [
|
| 2004 |
+
129,
|
| 2005 |
+
598,
|
| 2006 |
+
880,
|
| 2007 |
+
631
|
| 2008 |
+
],
|
| 2009 |
+
"page_idx": 11
|
| 2010 |
+
},
|
| 2011 |
+
{
|
| 2012 |
+
"type": "text",
|
| 2013 |
+
"text": "We provide it in the section 4.",
|
| 2014 |
+
"bbox": [
|
| 2015 |
+
149,
|
| 2016 |
+
632,
|
| 2017 |
+
369,
|
| 2018 |
+
646
|
| 2019 |
+
],
|
| 2020 |
+
"page_idx": 11
|
| 2021 |
+
},
|
| 2022 |
+
{
|
| 2023 |
+
"type": "text",
|
| 2024 |
+
"text": "B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be.",
|
| 2025 |
+
"bbox": [
|
| 2026 |
+
129,
|
| 2027 |
+
657,
|
| 2028 |
+
880,
|
| 2029 |
+
739
|
| 2030 |
+
],
|
| 2031 |
+
"page_idx": 11
|
| 2032 |
+
},
|
| 2033 |
+
{
|
| 2034 |
+
"type": "text",
|
| 2035 |
+
"text": "We report it in the section 4.",
|
| 2036 |
+
"bbox": [
|
| 2037 |
+
149,
|
| 2038 |
+
739,
|
| 2039 |
+
359,
|
| 2040 |
+
753
|
| 2041 |
+
],
|
| 2042 |
+
"page_idx": 11
|
| 2043 |
+
},
|
| 2044 |
+
{
|
| 2045 |
+
"type": "text",
|
| 2046 |
+
"text": "C Did you run computational experiments?",
|
| 2047 |
+
"text_level": 1,
|
| 2048 |
+
"bbox": [
|
| 2049 |
+
114,
|
| 2050 |
+
764,
|
| 2051 |
+
492,
|
| 2052 |
+
781
|
| 2053 |
+
],
|
| 2054 |
+
"page_idx": 11
|
| 2055 |
+
},
|
| 2056 |
+
{
|
| 2057 |
+
"type": "text",
|
| 2058 |
+
"text": "We present computational experiments in the section 4.",
|
| 2059 |
+
"bbox": [
|
| 2060 |
+
132,
|
| 2061 |
+
787,
|
| 2062 |
+
539,
|
| 2063 |
+
802
|
| 2064 |
+
],
|
| 2065 |
+
"page_idx": 11
|
| 2066 |
+
},
|
| 2067 |
+
{
|
| 2068 |
+
"type": "text",
|
| 2069 |
+
"text": "C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used?",
|
| 2070 |
+
"bbox": [
|
| 2071 |
+
129,
|
| 2072 |
+
812,
|
| 2073 |
+
880,
|
| 2074 |
+
845
|
| 2075 |
+
],
|
| 2076 |
+
"page_idx": 11
|
| 2077 |
+
},
|
| 2078 |
+
{
|
| 2079 |
+
"type": "text",
|
| 2080 |
+
"text": "We report it in the section 4.",
|
| 2081 |
+
"bbox": [
|
| 2082 |
+
149,
|
| 2083 |
+
846,
|
| 2084 |
+
359,
|
| 2085 |
+
860
|
| 2086 |
+
],
|
| 2087 |
+
"page_idx": 11
|
| 2088 |
+
},
|
| 2089 |
+
{
|
| 2090 |
+
"type": "text",
|
| 2091 |
+
"text": "The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.",
|
| 2092 |
+
"bbox": [
|
| 2093 |
+
112,
|
| 2094 |
+
868,
|
| 2095 |
+
877,
|
| 2096 |
+
892
|
| 2097 |
+
],
|
| 2098 |
+
"page_idx": 11
|
| 2099 |
+
},
|
| 2100 |
+
{
|
| 2101 |
+
"type": "header",
|
| 2102 |
+
"text": "ACL 2023 Responsible NLP Checklist",
|
| 2103 |
+
"bbox": [
|
| 2104 |
+
132,
|
| 2105 |
+
84,
|
| 2106 |
+
433,
|
| 2107 |
+
99
|
| 2108 |
+
],
|
| 2109 |
+
"page_idx": 11
|
| 2110 |
+
},
|
| 2111 |
+
{
|
| 2112 |
+
"type": "page_number",
|
| 2113 |
+
"text": "4565",
|
| 2114 |
+
"bbox": [
|
| 2115 |
+
480,
|
| 2116 |
+
928,
|
| 2117 |
+
519,
|
| 2118 |
+
940
|
| 2119 |
+
],
|
| 2120 |
+
"page_idx": 11
|
| 2121 |
+
},
|
| 2122 |
+
{
|
| 2123 |
+
"type": "text",
|
| 2124 |
+
"text": "C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values?",
|
| 2125 |
+
"bbox": [
|
| 2126 |
+
129,
|
| 2127 |
+
83,
|
| 2128 |
+
878,
|
| 2129 |
+
115
|
| 2130 |
+
],
|
| 2131 |
+
"page_idx": 12
|
| 2132 |
+
},
|
| 2133 |
+
{
|
| 2134 |
+
"type": "text",
|
| 2135 |
+
"text": "We discuss the experimental setup in section 4.",
|
| 2136 |
+
"bbox": [
|
| 2137 |
+
149,
|
| 2138 |
+
117,
|
| 2139 |
+
495,
|
| 2140 |
+
131
|
| 2141 |
+
],
|
| 2142 |
+
"page_idx": 12
|
| 2143 |
+
},
|
| 2144 |
+
{
|
| 2145 |
+
"type": "text",
|
| 2146 |
+
"text": "C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run?",
|
| 2147 |
+
"bbox": [
|
| 2148 |
+
129,
|
| 2149 |
+
142,
|
| 2150 |
+
882,
|
| 2151 |
+
190
|
| 2152 |
+
],
|
| 2153 |
+
"page_idx": 12
|
| 2154 |
+
},
|
| 2155 |
+
{
|
| 2156 |
+
"type": "text",
|
| 2157 |
+
"text": "We report it in the section 5.",
|
| 2158 |
+
"bbox": [
|
| 2159 |
+
149,
|
| 2160 |
+
192,
|
| 2161 |
+
359,
|
| 2162 |
+
206
|
| 2163 |
+
],
|
| 2164 |
+
"page_idx": 12
|
| 2165 |
+
},
|
| 2166 |
+
{
|
| 2167 |
+
"type": "text",
|
| 2168 |
+
"text": "C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)?",
|
| 2169 |
+
"bbox": [
|
| 2170 |
+
129,
|
| 2171 |
+
217,
|
| 2172 |
+
882,
|
| 2173 |
+
265
|
| 2174 |
+
],
|
| 2175 |
+
"page_idx": 12
|
| 2176 |
+
},
|
| 2177 |
+
{
|
| 2178 |
+
"type": "text",
|
| 2179 |
+
"text": "We report it in the section 4.",
|
| 2180 |
+
"bbox": [
|
| 2181 |
+
149,
|
| 2182 |
+
267,
|
| 2183 |
+
359,
|
| 2184 |
+
282
|
| 2185 |
+
],
|
| 2186 |
+
"page_idx": 12
|
| 2187 |
+
},
|
| 2188 |
+
{
|
| 2189 |
+
"type": "text",
|
| 2190 |
+
"text": "D Did you use human annotators (e.g., crowdworkers) or research with human participants?",
|
| 2191 |
+
"bbox": [
|
| 2192 |
+
112,
|
| 2193 |
+
292,
|
| 2194 |
+
877,
|
| 2195 |
+
310
|
| 2196 |
+
],
|
| 2197 |
+
"page_idx": 12
|
| 2198 |
+
},
|
| 2199 |
+
{
|
| 2200 |
+
"type": "text",
|
| 2201 |
+
"text": "We report it in the section 4.3.",
|
| 2202 |
+
"bbox": [
|
| 2203 |
+
132,
|
| 2204 |
+
313,
|
| 2205 |
+
356,
|
| 2206 |
+
329
|
| 2207 |
+
],
|
| 2208 |
+
"page_idx": 12
|
| 2209 |
+
},
|
| 2210 |
+
{
|
| 2211 |
+
"type": "text",
|
| 2212 |
+
"text": "D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.?",
|
| 2213 |
+
"bbox": [
|
| 2214 |
+
127,
|
| 2215 |
+
340,
|
| 2216 |
+
882,
|
| 2217 |
+
372
|
| 2218 |
+
],
|
| 2219 |
+
"page_idx": 12
|
| 2220 |
+
},
|
| 2221 |
+
{
|
| 2222 |
+
"type": "text",
|
| 2223 |
+
"text": "Not applicable. Left blank.",
|
| 2224 |
+
"bbox": [
|
| 2225 |
+
149,
|
| 2226 |
+
373,
|
| 2227 |
+
349,
|
| 2228 |
+
388
|
| 2229 |
+
],
|
| 2230 |
+
"page_idx": 12
|
| 2231 |
+
},
|
| 2232 |
+
{
|
| 2233 |
+
"type": "text",
|
| 2234 |
+
"text": "D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)?",
|
| 2235 |
+
"bbox": [
|
| 2236 |
+
127,
|
| 2237 |
+
399,
|
| 2238 |
+
882,
|
| 2239 |
+
447
|
| 2240 |
+
],
|
| 2241 |
+
"page_idx": 12
|
| 2242 |
+
},
|
| 2243 |
+
{
|
| 2244 |
+
"type": "text",
|
| 2245 |
+
"text": "Not applicable. Left blank.",
|
| 2246 |
+
"bbox": [
|
| 2247 |
+
149,
|
| 2248 |
+
448,
|
| 2249 |
+
349,
|
| 2250 |
+
464
|
| 2251 |
+
],
|
| 2252 |
+
"page_idx": 12
|
| 2253 |
+
},
|
| 2254 |
+
{
|
| 2255 |
+
"type": "text",
|
| 2256 |
+
"text": "D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used?",
|
| 2257 |
+
"bbox": [
|
| 2258 |
+
127,
|
| 2259 |
+
475,
|
| 2260 |
+
882,
|
| 2261 |
+
521
|
| 2262 |
+
],
|
| 2263 |
+
"page_idx": 12
|
| 2264 |
+
},
|
| 2265 |
+
{
|
| 2266 |
+
"type": "text",
|
| 2267 |
+
"text": "Not applicable. Left blank.",
|
| 2268 |
+
"bbox": [
|
| 2269 |
+
149,
|
| 2270 |
+
523,
|
| 2271 |
+
349,
|
| 2272 |
+
539
|
| 2273 |
+
],
|
| 2274 |
+
"page_idx": 12
|
| 2275 |
+
},
|
| 2276 |
+
{
|
| 2277 |
+
"type": "text",
|
| 2278 |
+
"text": "D4. Was the data collection protocol approved (or determined exempt) by an ethics review board?",
|
| 2279 |
+
"bbox": [
|
| 2280 |
+
127,
|
| 2281 |
+
549,
|
| 2282 |
+
873,
|
| 2283 |
+
565
|
| 2284 |
+
],
|
| 2285 |
+
"page_idx": 12
|
| 2286 |
+
},
|
| 2287 |
+
{
|
| 2288 |
+
"type": "text",
|
| 2289 |
+
"text": "Not applicable. Left blank.",
|
| 2290 |
+
"bbox": [
|
| 2291 |
+
149,
|
| 2292 |
+
565,
|
| 2293 |
+
349,
|
| 2294 |
+
582
|
| 2295 |
+
],
|
| 2296 |
+
"page_idx": 12
|
| 2297 |
+
},
|
| 2298 |
+
{
|
| 2299 |
+
"type": "text",
|
| 2300 |
+
"text": "D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data?",
|
| 2301 |
+
"bbox": [
|
| 2302 |
+
127,
|
| 2303 |
+
592,
|
| 2304 |
+
880,
|
| 2305 |
+
623
|
| 2306 |
+
],
|
| 2307 |
+
"page_idx": 12
|
| 2308 |
+
},
|
| 2309 |
+
{
|
| 2310 |
+
"type": "text",
|
| 2311 |
+
"text": "Not applicable. Left blank.",
|
| 2312 |
+
"bbox": [
|
| 2313 |
+
149,
|
| 2314 |
+
625,
|
| 2315 |
+
349,
|
| 2316 |
+
640
|
| 2317 |
+
],
|
| 2318 |
+
"page_idx": 12
|
| 2319 |
+
},
|
| 2320 |
+
{
|
| 2321 |
+
"type": "page_number",
|
| 2322 |
+
"text": "4566",
|
| 2323 |
+
"bbox": [
|
| 2324 |
+
480,
|
| 2325 |
+
928,
|
| 2326 |
+
519,
|
| 2327 |
+
940
|
| 2328 |
+
],
|
| 2329 |
+
"page_idx": 12
|
| 2330 |
+
}
|
| 2331 |
+
]
|
2023/Towards Faithful Dialogues via Focus Learning/657e0968-cdfa-4f01-81a8-cef1a90aabec_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Faithful Dialogues via Focus Learning/657e0968-cdfa-4f01-81a8-cef1a90aabec_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:382a47f6c8d8971529a6230c37508a4ba334c54be0089453d6a5c22c36eaf2fa
|
| 3 |
+
size 1497384
|
2023/Towards Faithful Dialogues via Focus Learning/full.md
ADDED
|
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Towards Faithful Dialogs via Focus Learning
|
| 2 |
+
|
| 3 |
+
Yifan Deng $^{1,2}$ Xingsheng Zhang $^{1,2*}$ Heyan Huang $^{3*}$ Yue Hu $^{1,2}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China
|
| 6 |
+
$^{2}$ School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China
|
| 7 |
+
|
| 8 |
+
$^{3}$ School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China
|
| 9 |
+
|
| 10 |
+
{dengyifan, zhangxingsheng, huyue}@iie.ac.cn hhy63@bit.edu.cn
|
| 11 |
+
|
| 12 |
+
# Abstract
|
| 13 |
+
|
| 14 |
+
Maintaining faithfulness between responses and knowledge is an important research topic for building reliable knowledge-grounded dialogue systems. Existing models heavily rely on the elaborate data engineering and increasing the model's parameters ignoring to track the tokens that significantly influence losses, which is decisive for the optimization direction of the model in each iteration. To address this issue, we propose Focus Learning (FocusL), a novel learning approach that adjusts the contribution of each token to the optimization direction by directly scaling the corresponding objective loss. Specifically, we first introduce a positioning method by utilizing relevance distributions between knowledge and each response token to locate knowledge-aware tokens. Then, we further design a relevance-to-weight transformation to provide dynamic token-level weights for adjusting the cross-entropy loss. Finally, we use the weighted loss to encourage the model to pay special attention to the knowledge utilization. Experimental results demonstrate that our method achieves the new state-of-the-art results and generates more reliable responses while maintaining training stability.
|
| 15 |
+
|
| 16 |
+
# 1 Introduction
|
| 17 |
+
|
| 18 |
+
Although open-domain conversation systems can generate smooth and fluent responses with the help of large-scale pre-trained models (Raffel et al., 2020; Lewis et al., 2020), vacuous responses (Li et al., 2016) continue to be prevalent. To enrich the content of responses, an effective way is to introduce external knowledge (Dinan et al., 2019; Zhou et al., 2018). The knowledge-grounded model, however, frequently generates responses that appear knowledgeable but are not derived from the given knowledge. This means that the correctness of the knowledge used in responses cannot be guaranteed. As shown in Figure 1, the "Oklahoma"
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
Figure 1: The different learning focus (i.e., the tokens that corresponding losses significantly influence the total objective loss) between general learning and focus learning. Original learning focus without guidance in general learning are fragmented with no rules to follow. Our methods make the model focus on the knowledge-aware tokens (i.e., tokens that have high semantic relevance to knowledge) to alleviate the hallucinations.
|
| 22 |
+
|
| 23 |
+
in the response is not present in the given knowledge and relevant knowledge is unverifiable. This phenomenon is known as the hallucination (Dziri et al., 2022a) problem. Due to the inability to verify knowledge, hallucinations can mislead users and reduce the model's credibility.
|
| 24 |
+
|
| 25 |
+
Numerous methods have been developed to tackle the hallucination problem by knowledge graph (Kang et al., 2022; Dziri et al., 2021), contrastive learning (Sun et al., 2022) or control code (Rashkin et al., 2021). These models enhance the model's attention to knowledge by increasing parameters and elaborate data engineering. An im
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
Figure 2: Distributions of the loss and the relevance between knowledge and response tokens. We select a response as an example and visualize the loss, semantic relevance to knowledge, and the adjusted loss (FocusL) at the beginning of training. In the original loss, the model is less sensitive to optimization of knowledge-aware tokens. In contrast, the loss of knowledge-aware tokens in FocusL are larger than the others, and the knowledge-irrelevant tokens' loss are scaled down.
|
| 29 |
+
|
| 30 |
+
important assumption for them is that the model has the ability to give more attention to knowledge during training, yet this is not always held true. We consider this to be a common problem with general training methods which neglect to track the tokens that significantly influence the objective loss (i.e., learning focus). Different from the traditional concept of attention mechanisms which primarily focus on identifying important information in the input, the focus emphasizes important information in the target response. As the example shown in Figure 1, in general learning scenario, the learning focus is often out of control, and the model tends to focus on simple words (e.g., be, the), which lead to neglect of the tokens that have high relevance to knowledge referred as knowledge-aware tokens (e.g., polar or temperate zones). Intuitively, knowledge-aware tokens are even more critical for improving consistency, and focusing the model's attention on them can make the optimization goal fitter for the task. Therefore, it is necessary to revise the original learning focus. However, there are two main challenges: (1) How to locate the desired learning focus? Due to the fact that the learning focus is on different words in each sentence and the token-level manual annotation of responses is extremely time-consuming and labor-intensive, the existing datasets do not have a fine-grained annotation of key semantic words in the responses. (2) Given the desired learning focus, how to correct
|
| 31 |
+
|
| 32 |
+
the original learning focus? Existing training methods with cross-entropy loss lack direct guidance on learning focus.
|
| 33 |
+
|
| 34 |
+
To address above issues, we propose a novel learning approach, Focus Learning (FocusL). Instead of impacting knowledge utilization implicitly, we directly scale the corresponding objective loss to adjust the contribution of each token to the optimization direction. Specifically, for the first challenge, we first define the desired learning focus in knowledge-grounded dialogue task as knowledge-aware tokens. Then we devise a positioning method to get the relevance score distribution between knowledge and each response token. For the second challenge, we explore a relevance-to-weight transformation method to provide dynamic token-level weights for the cross-entropy loss. Finally, we use the corrected learning focus to guide the model training. As we can see in Figure 2, the losses of knowledge-aware tokens do not gain a high proportion of the original loss distribution. In contrast, our approach can expand the gap between knowledge-aware tokens and the others, which increases the impact of the change of knowledge-aware tokens' loss on the final loss, thus affecting the optimization direction and guiding the model to pay more attention to knowledge utilization.
|
| 35 |
+
|
| 36 |
+
Our main contributions are summarized as below:
|
| 37 |
+
|
| 38 |
+
- We rethink existing models and learning methods, and propose a novel learning approach to address the hallucination problem by adjusting the learning focus.
|
| 39 |
+
- We propose a positioning method and a relevance-to-weight transformation method to adaptively scale the loss of each token in the response.
|
| 40 |
+
- Experimental results demonstrate that our approach significantly outperforms the current state-of-the-art baselines, and effectively reduces hallucinations while maintaining high quality of responses.
|
| 41 |
+
|
| 42 |
+
# 2 Related Work
|
| 43 |
+
|
| 44 |
+
# Knowledge-grounded Dialogue Generation
|
| 45 |
+
|
| 46 |
+
Knowledge-grounded dialogue systems aim to alleviate vacuous responses by injecting external knowledge into the dialogue model. Recently,
|
| 47 |
+
|
| 48 |
+
various forms of external knowledge have been used in dialogue systems, such as tables (Moghe et al., 2018), graphs (Bollacker et al., 2008; Moon et al., 2019; Zhou et al., 2020; Peng et al., 2022), documents (Ghazvininejad et al., 2017; Zhou et al., 2018; Zhao et al., 2019). In spite of research on the forms of knowledge, most existing systems focus on knowledge selection (Lian et al., 2019; Kim et al., 2020; Zheng et al., 2020; Meng et al., 2020; Li et al., 2022) and response generation with given knowledge (Xu et al., 2020; Ma et al., 2020; Cai et al., 2020; Zhao et al., 2020). In this work, we mainly focus on avoiding models using unverifiable knowledge in response generation with given knowledge.
|
| 49 |
+
|
| 50 |
+
Hallucinations in Text Generation Generating responses that are unfaithful to the provided knowledge, known as the hallucination, is a tricky problem in knowledge-grounded dialogue systems. Recently, the hallucination problem has attracted increasing attention because the generated text appears smooth and fluent but usually contains false knowledge, which significantly threatens the model's credibility. Some studies reduce hallucinations by introducing knowledge graph (Kang et al., 2022; Dziri et al., 2021), controllable generation (Rashkin et al., 2021), and contrastive learning (Sun et al., 2022). In a recent study, Dziri et al. (2022b) analyze the source of hallucination in detail and find that the most knowledge-grounded conversation datasets (Dinan et al., 2019; Zhou et al., 2018) inherently contain hallucinations, and models trained on such dataset further amplify hallucinations, which demonstrate that the pattern of hallucination responses is more likely to be learned by the model. To address this problem, Dziri et al. (2022a) further propose FaithDial, a new dataset that removes hallucinations in the Wizard of Wikipedia (Dinan et al., 2019). Different from these studies about models and datasets, we find that the training method with unexpected learning focus also plays a vital role in the hallucination problem and then present a method to adjust the original focus.
|
| 51 |
+
|
| 52 |
+
# 3 Methods
|
| 53 |
+
|
| 54 |
+
# 3.1 Our Approach
|
| 55 |
+
|
| 56 |
+
The overview of FocusL is presented in Figure 3. Given the conversation context $C = (c_{1},\dots,c_{n})$ consisting of a sequence of $n$ dialogue turns and
|
| 57 |
+
|
| 58 |
+
the corresponding knowledge $K = (k_{1},\dots,k_{m})$ for the current turn, where $m$ is the number of tokens in $K$ , the goal of our task is to generate responses $Y = (y_{1},\dots,y_{T})$ where $T$ is the number of tokens in $Y$ . We first form the input $I$ with joint knowledge $K$ and conversation context $C$ as follows:
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
I = [ K; C ] \tag {1}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
where the utterances of $C$ are delimited by the speaker identifier (either $<\text{user}>$ or $<\text{bot}>$ ). Then we use T5 (Raffel et al., 2020) as the base model, which is a pre-trained encoder-decoder model that uses the transformer architecture (Vaswani et al., 2017). Taking $I$ as input, the base model outputs a logit distribution $h = (h_1, \dots, h_T)$ , where $h_t$ is the corresponding logit distribution of the $t$ -th token in $Y$ . The positioning module locate knowledge-aware tokens in the response $Y$ and calculate the corresponding adjust weight. The focus shifting module adjusts the original logit distribution $h$ to obtain the final logit distribution $h_w$ . Finally, we train the model to produce the next conversation utterance $y_1 \dots y_T$ by minimizing the cross-entropy loss.
|
| 65 |
+
|
| 66 |
+
In the following, we introduce three steps of the FocusL training process: (1) locate knowledge-aware tokens which used as the new learning focus ( $\S 3.2$ ); (2) calculate adjust weights based on the relevance of knowledge with each token in the response ( $\S 3.3$ ); (3) switch original learning focus to the knowledge-aware tokens ( $\S 3.4$ ).
|
| 67 |
+
|
| 68 |
+
# 3.2 Learning Focus Positioning
|
| 69 |
+
|
| 70 |
+
To adjust the learning focus of the model, we first define knowledge-aware tokens as the new learning focus which is more in line with the knowledge-grounded dialogue task. And then we use the distance between the response token and knowledge in semantic space to measure its relevance:
|
| 71 |
+
|
| 72 |
+
$$
|
| 73 |
+
\operatorname {r e l e v a n c e} \left(y _ {t} ^ {r}, \mathcal {K}\right) = \frac {y _ {t} ^ {r} \cdot \mathcal {K}}{\| y _ {t} ^ {r} \| \cdot \| \mathcal {K} \|} \tag {2}
|
| 74 |
+
$$
|
| 75 |
+
|
| 76 |
+
To get the semantic representation of the token $y_{t}$ and the knowledge $K$ , we use the embedding layer $Emb(\cdot)$ of the base model to obtain a dense representation:
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
y _ {t} ^ {r} = \operatorname {E m b} (y _ {t}) \tag {3}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\mathcal {K} = \frac {1}{m} \sum_ {i = 1} ^ {m} E m b \left(k _ {i}\right) \tag {4}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
Figure 3: Training process of FocusL. We first calculate original model output based on the given knowledge and the context. Then we calculate the relevance score between each token in the response and knowledge, and further convert it to the adjust weight distribution. Finally, we use the adjust weight to scale the original loss.
|
| 88 |
+
|
| 89 |
+
Note that we do not use the model's encoder to obtain the representation vector of knowledge and responses, we think that the output of the embedding layer is sufficient to provide the desired semantic information, and also has less impact on the training speed. Instead of outputting knowledge-aware tokens directly, the positioning method uses relevance matrix to provide more information for §3.3.
|
| 90 |
+
|
| 91 |
+
# 3.3 Adjust Weight
|
| 92 |
+
|
| 93 |
+
To adaptively assign a weight for each token to adjust the corresponding logit value, we can simply define adjust weight scalar $w_{t}^{a}$ as follows:
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
w _ {t} ^ {a} = \left\{ \begin{array}{l l} 2, & \text {i f r e l e v a n c e} \left(y _ {t} ^ {r}, \mathcal {K}\right) \geq \theta \\ 1, & \text {o t h e r w i s e} \end{array} \right. \tag {5}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $\theta$ is a threshold value. We rigidly define knowledge-aware tokens by setting a specific $\theta$ . The token with relevance greater than the threshold is regarded as knowledge-aware token and obtain a high adjust weight to increase corresponding loss. We keep the original logit value unchanged for tokens with relevance scores less than the threshold.
|
| 100 |
+
|
| 101 |
+
However, the boundaries of knowledge-aware tokens are difficult to define, and the threshold value easily influences the learning effect of the model. To solve this problem, we further propose two different methods for converting relevance scores into adjust weights.
|
| 102 |
+
|
| 103 |
+
Liner Weight To make full use of the information in the relevance matrix, we propose to assign a different adjust weight to each token. We obtain a
|
| 104 |
+
|
| 105 |
+
non-negative distribution by the following formula:
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
w _ {t} ^ {a} = 1 + \text {r e l e v a n c e} \left(y _ {t} ^ {r}, \mathcal {K}\right) \tag {6}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
This method adaptively scales up the loss of knowledge-aware tokens while scaling down the loss of rest tokens. Although this can adjust the weights of all tokens, a linear weight distribution is not a good simulation due to the complexity of focus changes in real-world human learning. Meanwhile, the weights between knowledge-aware and irrelevant tokens are not significantly different, which does not have a large enough impact on the loss.
|
| 112 |
+
|
| 113 |
+
Non-linear Weight To ensure the stability of training, we aim to increase the loss of knowledge-aware tokens as much as possible while keeping that the adjusted final loss is not too different from the original loss. Therefore the distribution of weights should be smoother at low relevance interval and steeper at high relevance interval. We map the original relevance distribution to a logarithmic distribution with the following formula:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
w _ {t} ^ {a} = - \ln (1 - r e l e v a n c e \left(y _ {t} ^ {r}, \mathcal {K}\right) + \lambda) + 1 \tag {7}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
where $\lambda \in (0, e - 2)$ is a small constant that we call it smoothing factor. A large smoothing factor represents a smoother distribution of the obtained weights.
|
| 120 |
+
|
| 121 |
+
# 3.4 Focused Cross-Entropy Loss
|
| 122 |
+
|
| 123 |
+
After obtaining the adjust weight $w_{t}^{a}$ , we scale the original logit and then use the new logit to calculate the probability of each token. At the time step $t$
|
| 124 |
+
|
| 125 |
+
given original model outputs $h_t$ , the probability of the token $y_t$ is calculated as follows:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
p _ {w} (y _ {t} | y _ {< t}, \mathcal {I}) = s o f t m a x (w _ {t} ^ {a} \cdot h _ {t}) \qquad (8)
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
We define the final loss for optimization as the Focused Cross-Entropy (FCE) loss:
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\mathcal {L} _ {F C E} = - \frac {1}{T} \sum_ {t = 1} ^ {T} \log p _ {w} \left(y _ {t} \mid y _ {< t}, \mathcal {I}\right) \tag {9}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
where $T$ is the length of the response. FCE changes the original loss distribution, which leads the model to shift original learning focus to desired tokens. To reduce this loss function, the gradient descent approach is used to update all parameters.
|
| 138 |
+
|
| 139 |
+
# 4 Experiments
|
| 140 |
+
|
| 141 |
+
To evaluate the effectiveness of our method, we conduct experiments following the settings in (Dziri et al., 2022a). We use pre-trained T5 (Raffel et al., 2020)<sup>1</sup> from the HuggingFace library (Wolf et al., 2020) as our base language model and train 10 epochs via accumulating gradients for 4 steps. We utilize a learning rate of 6.25E-5, and AdamW (Loshchilov and Hutter, 2019) for optimization. We set the warmup ratio to $4\%$ followed by a linear decay. The max length of the input and output is 256 and 128 respectively. We set the batch size to 8. For adjust weights, we set the smoothing factor $\lambda$ to 0.01 and the threshold value $\theta$ to 0.5. As for decoding, we use nucleus sampling with $p = 0.6$ . We train our model on a single NVIDIA Tesla V100 GPU with 32GB memory. Each epoch takes about 130 minutes for WoW and 35 minutes for FaithDial. Our code is available at https://github.com/Mute-ZEN/AgileLightning.
|
| 142 |
+
|
| 143 |
+
# 4.1 Datasets
|
| 144 |
+
|
| 145 |
+
We conduct experiments on two knowledge-grounded dialogue datasets: (1) Wizard of WikiPedia (WoW) published in (Dinan et al., 2019); (2) FaithDial published in (Dziri et al., 2022a)
|
| 146 |
+
|
| 147 |
+
WoW is a widely used dataset for knowledge-grounded dialogue based on WikiPedia. WoW is collected by two crowdsourcing workers, one of which is a knowledgeable wizard and the other is an inquisitive apprentice. The wizard can access the knowledge of WikiPedia, while the apprentice
|
| 148 |
+
|
| 149 |
+
cannot. The dataset includes 22,311 conversations with 201,999 turns, and the test set has two subsets: Test Seen and Test Unseen. Test Seen comprises 533 topics that overlap with the training set and contain new dialogues. Test Unseen contains 58 topics that have never been encountered in training or validation.
|
| 150 |
+
|
| 151 |
+
FaithDial Since the current knowledge conversation dataset (Dinan et al., 2019) contains a large number of hallucination responses (Dziri et al., 2022b), Dziri et al. (2022a) proposes FaithDial, which corrects the responses in WoW to be more faithful to knowledge. The percentage of corrections to the original wizard's responses exceeded $80\%$ . The dataset contains a total of 5,649 conversations with 50,761 turns.
|
| 152 |
+
|
| 153 |
+
# 4.2 Baselines
|
| 154 |
+
|
| 155 |
+
We compare our model with the following baselines:
|
| 156 |
+
|
| 157 |
+
GPT2 (Radford et al., 2019) is an autoregressive model based on the transformer decoder architecture (Vaswani et al., 2017).
|
| 158 |
+
|
| 159 |
+
DIALOGPT (Zhang et al., 2020) is pre-trained on a large scale dialogue datasets based on GPT2 to be more applicable to conversation generation.
|
| 160 |
+
|
| 161 |
+
DOHA (Prabhumoye et al., 2021) equips the BART (Lewis et al., 2020) model with a knowledge-aware attention module, enabling specific attention to the information in the knowledge.
|
| 162 |
+
|
| 163 |
+
CTRL (Rashkin et al., 2021) utilizes control codes to guide the model to generate responses that are more faithful to knowledge. Following (Dziri et al., 2022a), we use T5 as the base model of CTRL.
|
| 164 |
+
|
| 165 |
+
# 4.3 Evaluation Metrics
|
| 166 |
+
|
| 167 |
+
We aim to verify the effectiveness of our method in two aspects: fluency and faithfulness. We use both automatic metrics and human evaluations to compare all models.
|
| 168 |
+
|
| 169 |
+
Automatic Metrics We use BLEU (Papineni et al., 2002), ROUGE (Lin, 2004) to evaluate the fluency of the generated responses, which reflect the similarity of the generated responses to the reference responses and both are widely used in text generation evaluation (Dziri et al., 2022a; Zhou et al., 2022). To evaluate the faithfulness
|
| 170 |
+
|
| 171 |
+
of the generated responses to knowledge, we use BERTScore (Zhang et al., 2019), F1 and $\mathbf{Q}^2$ (Honovich et al., 2021). BERTScore can measure the semantic similarity of responses to knowledge with sentence embeddings from BERT (Devlin et al., 2019), while F1 measures the lexical overlap between responses and knowledge, and $\mathbf{Q}^2$ uses an automated question-and-answer technique to evaluate the consistency of responses and knowledge.
|
| 172 |
+
|
| 173 |
+
Human Evaluation To mitigate the unreliability of automatic evaluation, we use more diverse evaluation methods to show the model performance as objectively as possible, and further conduct a human evaluation to verify the effectiveness of our method. We randomly select 100 dialogues from the test set of FaithDial and ask three human evaluators to evaluate. We ask the human evaluators to rate the fluency (Fluency), informativeness (Inform.) and faithfulness (Faithful.) of the generated responses on a 5-point scale, where 1, 3, and 5 indicate unacceptable, moderate, and perfect performance, respectively. Among the metrics, Fluency evaluate the response generation quality, Inform. evaluate the whether the response is safe or vacuous, and Faithful. focus on whether the knowledge used in response is come from the given knowledge, which is stricter than Inform.. We then calculate the average score of the three human evaluators as the final score.
|
| 174 |
+
|
| 175 |
+
# 5 Results
|
| 176 |
+
|
| 177 |
+
The results on FaithDial and WoW are shown in Table 1, 2 and 3. As can be seen, FocusL outperforms all baselines in both faithfulness and fluency.
|
| 178 |
+
|
| 179 |
+
# 5.1 Automatic Evaluation
|
| 180 |
+
|
| 181 |
+
FCE vs CE To test the effectiveness of FocusL equipped with FCE, we compare our method with baselines on FaithDial dataset, and report the results in Table 1. We use the test results of baselines from (Dziri et al., 2022a) and keep the same metric calculate method to evaluate FocusL. We can see that our method outperforms the state-of-the-art baselines on all automatic metrics. In particular, FocusL achieves a significant improvement in BERTScore, F1, BLEU, ROUGE, and a large improvement in $Q^2$ F1 and $Q^2$ NLI. We also find that the models based on transformer decoder architecture (GPT2, DIALOGPT) perform worse than the encoder-decoder architecture (T5, CTRL, DOHA). Noticeably, despite the fact that CTRL performs
|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
Figure 4: The loss during training. FCE has almost no impact on training stability.
|
| 185 |
+
|
| 186 |
+
well in terms of faithfulness, it doesn't improve fluency much. In contrast, FocusL achieves a significant improvement in both faithfulness and fluency. This indicates that FocusL reasonably utilizes knowledge during conversation.
|
| 187 |
+
|
| 188 |
+
Moreover, to demonstrate the learning focus of our approach, we analyze the trend of loss during training as shown in Figure 4. FocusL achieves higher performance with nearly the same trend as the original CE loss variation and also shows that our FCE loss does not destabilize training. Compared to the original CE loss, FCE has a higher loss at the beginning of training, which the more significant adjustment of our approach to the learning focus at the beginning of training can explain.
|
| 189 |
+
|
| 190 |
+
Robustness to Out-of-Domain Knowledge To evaluate the ability to apply knowledge of out-of-domain, We further test our method on WoW, and report the results in Table 2. We select the baselines which perform well on FaithDial and use T5 as the backbone for comparison. We train the model on the WoW training set and then test it on the two subsets separately. Results show that FocusL outperforms all baselines on faithfulness metrics, significantly improves model's reliability with slightly impact on fluency. It is worth noting that our model improves more significantly in the out-of-domain setting, which indicates that our method is more robust to out-of-domain knowledge.
|
| 191 |
+
|
| 192 |
+
Robustness to Data Size In order to verify the learning efficiency of our approach with adjusted learning focus, we also conduct experiments in a low-resource setting. We randomly select 1/2, 1/4,
|
| 193 |
+
|
| 194 |
+
<table><tr><td rowspan="2">Models</td><td colspan="4">Faithfulness</td><td colspan="2">Fluency</td></tr><tr><td>BERTScore</td><td>F1</td><td>Q2 F1</td><td>Q2 NLI</td><td>BLEU</td><td>ROUGE</td></tr><tr><td>GPT2</td><td>0.36</td><td>50.41</td><td>58.4</td><td>69.8</td><td>9.50</td><td>33.43</td></tr><tr><td>DIALOGPT</td><td>0.36</td><td>52.25</td><td>56.5</td><td>66.2</td><td>9.63</td><td>33.13</td></tr><tr><td>DOHA</td><td>0.39</td><td>58.32</td><td>69.1</td><td>78.3</td><td>9.89</td><td>31.78</td></tr><tr><td>T5</td><td>0.41</td><td>59.22</td><td>70.4</td><td>79.5</td><td>10.31</td><td>33.89</td></tr><tr><td>CTRL</td><td>0.46</td><td>62.21</td><td>72.4</td><td>81.5</td><td>10.41</td><td>33.97</td></tr><tr><td>FocusL</td><td>0.50**</td><td>65.07*</td><td>73.25</td><td>82.58</td><td>11.58**</td><td>35.41**</td></tr></table>
|
| 195 |
+
|
| 196 |
+
Table 1: Automatic results on FaithDial to evaluate the Faithfulness and Fluency of the generated responses. The best performance are bolded. One "*" denotes statistical significant with $p < 0.05$ , and "**" denotes significant improvement with $p < 0.01$ .
|
| 197 |
+
|
| 198 |
+
<table><tr><td rowspan="2">Test Set Split</td><td rowspan="2">Models</td><td colspan="4">Faithfulness</td><td colspan="2">Fluency</td></tr><tr><td>BERTScore</td><td>F1</td><td>Q2 F1</td><td>Q2 NLI</td><td>BLEU</td><td>ROUGE</td></tr><tr><td rowspan="3">seen topic</td><td>T5</td><td>0.48</td><td>61.88</td><td>69.08</td><td>75.02</td><td>12.44</td><td>32.79</td></tr><tr><td>CTRL</td><td>0.49</td><td>62.99</td><td>70.56</td><td>76.35</td><td>12.61</td><td>33.20</td></tr><tr><td>FocusL</td><td>0.52</td><td>65.25</td><td>71.41</td><td>77.32</td><td>12.63</td><td>32.95</td></tr><tr><td rowspan="3">unseen topic</td><td>T5</td><td>0.47</td><td>60.68</td><td>67.13</td><td>73.09</td><td>12.63</td><td>32.81</td></tr><tr><td>CTRL</td><td>0.46</td><td>59.81</td><td>66.70</td><td>72.59</td><td>12.30</td><td>32.73</td></tr><tr><td>FocusL</td><td>0.51</td><td>63.99</td><td>69.09</td><td>74.97</td><td>12.48</td><td>32.84</td></tr></table>
|
| 199 |
+
|
| 200 |
+
1/8, 1/16, and 1/32 of the training data and report the results in Figure 5. We can see that our method has higher faithfulness even with 1/32 training data. Results also show that FocusL has more significant improvement compared with baselines in the low-resource setting, which demonstrates that our approach can learn how to use knowledge more efficiently. The faithfulness of both T5 and CTRL does not change significantly, however their fluency decrease severely, which might be explained by that models tend to copy knowledge and ignore the fluency of the response. In comparison, FocusL can achieve a better trade-off between fluency and faithfulness with limited data.
|
| 201 |
+
|
| 202 |
+
Meanwhile the performance of the model should be weakened as the amount of data decreases, however the experimental results do not seem to be what we expected. We can see that all models in the Figure 5 have almost the highest BERTScore at 1/16 data, and our model FocusL even reaches the highest value on the BLEU metric as well. After rigorously repeating the experiment multiple times, the results obtained remain the same. We argue that this may be related to the data distribution
|
| 203 |
+
|
| 204 |
+
Table 2: Automatic results on WoW to evaluate the Faithfulness and Fluency of the generated responses. The best performance are bolded.
|
| 205 |
+
|
| 206 |
+
<table><tr><td>Models</td><td>Faithful.</td><td>Fluency</td><td>Inform.</td></tr><tr><td>T5</td><td>2.80</td><td>3.62</td><td>3.23</td></tr><tr><td>CTRL</td><td>2.98</td><td>3.53</td><td>3.14</td></tr><tr><td>FocusL</td><td>3.11*</td><td>3.59</td><td>3.44*</td></tr></table>
|
| 207 |
+
|
| 208 |
+
Table 3: Human evaluation on WoW. Bolded numbers indicate the best performance. Numbers marked with * indicate that the improvement is statistically significant (p-value < 0.05).
|
| 209 |
+
|
| 210 |
+
characteristics of the dataset and deserves further study.
|
| 211 |
+
|
| 212 |
+
# 5.2 Human Evaluation
|
| 213 |
+
|
| 214 |
+
In addition to automatic evaluation, we present human evaluation results in Table 3. We choose T5 and CTRL as baselines for comparison. Results show that FocusL receives higher scores on both Faithful. and Inform., and fluency is slightly lower than T5. Overall, our approach can make the model more reliable with almost as much fluency as baselines.
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
(a)
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
(b)
|
| 221 |
+
|
| 222 |
+

|
| 223 |
+
(c)
|
| 224 |
+
Figure 5: Automatic results on WoW with limited training data. (a) and (b) show the results of BERTScore on seen and unseen test set, respectively. (c) and (d) show the results of BLEU on seen and unseen test set, respectively.
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
(d)
|
| 228 |
+
|
| 229 |
+
<table><tr><td>Model</td><td>BERTScore</td><td>F1</td><td>BLEU</td></tr><tr><td>FocusL</td><td>0.51</td><td>66.11</td><td>11.65</td></tr><tr><td>-TW</td><td>0.38</td><td>52.63</td><td>9.10</td></tr><tr><td>-LW</td><td>0.42</td><td>57.86</td><td>11.62</td></tr><tr><td>w/o FCE</td><td>0.40</td><td>57.17</td><td>11.89</td></tr></table>
|
| 230 |
+
|
| 231 |
+
Table 4: The ablation study of various adjust weight distribution. Bolded numbers indicate the best performance.
|
| 232 |
+
|
| 233 |
+
<table><tr><td>Model</td><td>BERTScore</td><td>F1</td><td>BLEU</td></tr><tr><td>FocusL</td><td>0.51</td><td>66.11</td><td>11.65</td></tr><tr><td>λ = 0.05</td><td>0.45</td><td>61.51</td><td>11.78</td></tr><tr><td>λ = 0.1</td><td>0.50</td><td>64.55</td><td>11.53</td></tr><tr><td>λ = 0.2</td><td>0.44</td><td>60.88</td><td>12.08</td></tr><tr><td>λ = 0.4</td><td>0.43</td><td>60.19</td><td>12.13</td></tr><tr><td>λ = 0.7</td><td>0.43</td><td>60.81</td><td>12.53</td></tr></table>
|
| 234 |
+
|
| 235 |
+
Table 5: The ablation study of various $\lambda$ for non-linear adjust weight distribution. Bolded numbers indicate the best performance.
|
| 236 |
+
|
| 237 |
+
# 5.3 Ablation Study
|
| 238 |
+
|
| 239 |
+
Finally, we attempt to study the performances of variation of FCE described in §3.3. Results for different adjust weight distribution are shown in Table 4, and Table 5 is for different $\lambda$ in non-linear weight. In Table 4, we compared the weight distribution with the threshold (TW), linear weight (LW), non-linear weight (FocusL), and without FCE (w/o FCE). Among them, TW performs the worst, which may be influenced by the threshold. In contrast, the effect of LW is more stable than TW and does not suffer from hyperparameter effects. Even though BLEU is slightly lower than CE, FocusL has significantly improved BERTScore and F1.
|
| 240 |
+
|
| 241 |
+
To further study the effect of $\lambda$ in non-linear weight, we set $\lambda$ to 0.05, 0.1, 0.2, 0.4, 0.7, and present the results in Table 5. Note that FocusL
|
| 242 |
+
|
| 243 |
+
# Given Knowledge:
|
| 244 |
+
|
| 245 |
+
The global presentation of cheerleading was led by the 1997 broadcast of ESPN's International cheerleading competition, and the worldwide release of the 2000 film "Bring It On".
|
| 246 |
+
|
| 247 |
+
# Context:
|
| 248 |
+
|
| 249 |
+
<user> She has done a lot of dance and tumbling already.
|
| 250 |
+
She will try it out and see what works best for her.
|
| 251 |
+
|
| 252 |
+
<bot> Got it, are you from the United States? Cheerleading is an activity that originated there, it is also predominantly in America.
|
| 253 |
+
|
| 254 |
+
<user> Yes we are, she wants to be a cheerleader since she was a little kid, I am sure she will be fine. I could see her going on to do it in college as well.
|
| 255 |
+
|
| 256 |
+
Gold Response: Nice, have you watched the film Bring It on ? it is from 2000 .
|
| 257 |
+
|
| 258 |
+
T5: I see, did you know that the 1998 televised ESPN's International cheerleading competition led to the global presentation of cheerleading? That's interesting.
|
| 259 |
+
|
| 260 |
+
CTRL: Yes, the world presented cheerleading in 1997
|
| 261 |
+
|
| 262 |
+
FocusL: I see, did you know that the movie Bring It On was released in 2000?
|
| 263 |
+
|
| 264 |
+
Table 6: An example case from FaithDial.
|
| 265 |
+
|
| 266 |
+
uses $\lambda = 0.01$ in the experiments. As the $\lambda$ increases, faithfulness metrics of the model gradually decrease, and fluency metrics gradually increase. This indicates that smaller $\lambda$ with steeper weight distribution makes the model more sensitive to knowledge-aware tokens' losses, which increases the accuracy of knowledge utilization. In contrast, larger $\lambda$ with smoother weight distribution makes the model focus on the quality of the response.
|
| 267 |
+
|
| 268 |
+
# 5.4 Case Study
|
| 269 |
+
|
| 270 |
+
To better illustrate the advantage of our approach, we present an example case in Table 6. We randomly select one dialogue from the test set of FaithDial, and compare the responses generated by T5, CTRL, and FocusL. It can be observed that the response generated by T5 uses a wrong year "1998" while the given knowledge is about "1997", and its causality also cannot be inferred from the
|
| 271 |
+
|
| 272 |
+
given knowledge. CTRL misunderstands the given knowledge and ignores the impact of "Bring It On" on the global presentation of cheerleading. In contrast, FocusL can generate a response more related to the given knowledge and closest to the gold response, which contains all knowledge entities in the gold response.
|
| 273 |
+
|
| 274 |
+
# 6 Conclusion
|
| 275 |
+
|
| 276 |
+
In this paper, we propose a novel learning approach with more direct guidance on the training process to improve the faithfulness of knowledge-grounded dialogue systems, referred to as FocusL. By leveraging semantic relevance between the response and knowledge, FocusL correct the model's learning focus, leading to more consistent and fluent response generation. We empirically show that our approach has the best performance with a stable training process and is robust to data size and out-of-domain knowledge. FocusL is simple yet effective and can achieve state-of-the-art results in two knowledge-grounded datasets.
|
| 277 |
+
|
| 278 |
+
# Limitations
|
| 279 |
+
|
| 280 |
+
As we have shown, there is much room to improve the learning approach, which incur lower costs than increasing model's parameters or elaborate data engineering. This paper is an exercise in guiding learning focus, and we argue that FocusL is not perfect for the positioning method and the relevance-to-weight transformation method. For example, our positioning method may contain noise, and some words that are not important in given knowledge may be used as our learning focus. We will continue to explore better methods to guide the model's learning focus. Meanwhile, our method only experiments on the basic cross-entropy loss, and still needs to be explored for other learning approaches such as contrastive learning.
|
| 281 |
+
|
| 282 |
+
# Ethics Statement
|
| 283 |
+
|
| 284 |
+
FocusL aims to to convey correct knowledge to users rather than misleading hallucinations. We hope to see a reliable and trustworthy dialogue system impact from better guiding the model's learning focus. However, even if the dialogue system does not produce hallucinations, there is still a risk of potential misuse. For example, the dialogue systems may be used to spread misinformation or to mislead users. If possible, we would prefer that the
|
| 285 |
+
|
| 286 |
+
model itself has the ability to identify undesirable knowledge and block it.
|
| 287 |
+
|
| 288 |
+
# Acknowledgements
|
| 289 |
+
|
| 290 |
+
This work is supported by the National Natural Science Foundation of China (Grant No.U21B2009). This research is also supported by the Strategic Priority Research Program of Chinese Academy of Science, Grant No.XDC02030400.
|
| 291 |
+
|
| 292 |
+
# References
|
| 293 |
+
|
| 294 |
+
Kurt Bollacker, Colin Evans, Praveen Paritosh, Tim Sturge, and Jamie Taylor. 2008. Freebase: a collaboratively created graph database for structuring human knowledge. In Proceedings of the 2008 ACM SIGMOD international conference on Management of data, pages 1247-1250.
|
| 295 |
+
Yuanyuan Cai, Min Zuo, Qingchuan Zhang, Haitao Xiong, and Ke Li. 2020. A bichannel transformer with context encoding for document-driven conversation generation in social media. Complex., 2020:3710104:1-3710104:13.
|
| 296 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 297 |
+
Emily Dinan, Stephen Roller, Kurt Shuster, Angela Fan, Michael Auli, and Jason Weston. 2019. Wizard of wikipedia: Knowledge-powered conversational agents. In International Conference on Learning Representations.
|
| 298 |
+
Nouha Dziri, Ehsan Kamalloo, Sivan Milton, Osmar Zaiane, Mo Yu, Edoardo Ponti, and Siva Reddy. 2022a. Faithdial: A faithful benchmark for information-seeking dialogue. arXiv preprint, arXiv:2204.10757.
|
| 299 |
+
Nouha Dziri, Andrea Madotto, Osmar Zaïane, and Avishek Joey Bose. 2021. Neural path hunter: Reducing hallucination in dialogue systems via path grounding. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 2197-2214, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 300 |
+
Nouha Dziri, Sivan Milton, Mo Yu, Osmar Zaiane, and Siva Reddy. 2022b. On the origin of hallucinations in conversational models: Is it the datasets or the models? In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5271-5285, Seattle, United States. Association for Computational Linguistics.
|
| 301 |
+
|
| 302 |
+
Marjan Ghazvininejad, Chris Brockett, Ming-Wei Chang, William B. Dolan, Jianfeng Gao, Wen tau Yih, and Michel Galley. 2017. A knowledge-grounded neural conversation model. ArXiv, abs/1702.01932.
|
| 303 |
+
Or Honovich, Leshem Choshen, Roee Aharoni, Ella Neeman, Idan Szpektor, and Omri Abend. 2021. $q^2$ : Evaluating factual consistency in knowledge-grounded dialogues via question generation and question answering. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7856-7870, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 304 |
+
Minki Kang, Jin Myung Kwak, Jinheon Baek, and Sung Ju Hwang. 2022. Knowledge-consistent dialogue generation with knowledge graphs. In ICML 2022 Workshop on Knowledge Retrieval and Language Models.
|
| 305 |
+
Byeongchang Kim, Jaewoo Ahn, and Gunhee Kim. 2020. Sequential latent knowledge selection for knowledge-grounded dialogue. In International Conference on Learning Representations.
|
| 306 |
+
Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.
|
| 307 |
+
Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016. A diversity-promoting objective function for neural conversation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 110-119, San Diego, California. Association for Computational Linguistics.
|
| 308 |
+
Sha Li, Mahdi Namazifar, Di Jin, Mohit Bansal, Heng Ji, Yang Liu, and Dilek Hakkani-Tur. 2022. Enhancing knowledge selection for grounded dialogues via document semantic graphs. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2810-2823, Seattle, United States. Association for Computational Linguistics.
|
| 309 |
+
Rongzhong Lian, Min Xie, Fan Wang, Jinhua Peng, and Hua Wu. 2019. Learning to select knowledge for response generation in dialog systems. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI-19, pages 5081-5087. International Joint Conferences on Artificial Intelligence Organization.
|
| 310 |
+
Chin-Yew Lin. 2004. ROUGE: A package for automatic evaluation of summaries. In Text Summariza
|
| 311 |
+
|
| 312 |
+
tion Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.
|
| 313 |
+
Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In International Conference on Learning Representations.
|
| 314 |
+
Longxuan Ma, Wei-Nan Zhang, Runxin Sun, and Ting Liu. 2020. A compare aggregate transformer for understanding document-grounded dialogue. In *Findings of the Association for Computational Linguistics: EMNLP* 2020, pages 1358-1367, Online. Association for Computational Linguistics.
|
| 315 |
+
Chuan Meng, Pengjie Ren, Zhumin Chen, Weiwei Sun, Zhaochun Ren, Zhaopeng Tu, and M. de Rijke. 2020. Dukenet: A dual knowledge interaction network for knowledge-grounded conversation. Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval.
|
| 316 |
+
Nikita Moghe, Siddharth Arora, Suman Banerjee, and Mitesh M. Khapra. 2018. Towards exploiting background knowledge for building conversation systems. In Conference on Empirical Methods in Natural Language Processing.
|
| 317 |
+
Seungwhan Moon, Pararth Shah, Anuj Kumar, and Rajen Subba. 2019. Opendialkg: Explanable conversational reasoning with attention-based walks over knowledge graphs. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 845-854.
|
| 318 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Annual Meeting of the Association for Computational Linguistics.
|
| 319 |
+
Wei Peng, Yue Hu, Luxi Xing, Yuqiang Xie, Yajing Sun, and Yunpeng Li. 2022. Control globally, understand locally: A global-to-local hierarchical graph network for emotional support conversation. In Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI 2022, Vienna, Austria, 23-29 July 2022, pages 4324-4330. ijcai.org.
|
| 320 |
+
Shrimai Prabhumoye, Kazuma Hashimoto, Yingbo Zhou, Alan W Black, and Ruslan Salakhutdinov. 2021. Focused attention improves document-grounded generation. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4274-4287, Online. Association for Computational Linguistics.
|
| 321 |
+
Alec Radford, Jeff Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners.
|
| 322 |
+
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67.
|
| 323 |
+
|
| 324 |
+
Hannah Rashkin, David Reitter, Gaurav Singh Tomar, and Dipanjan Das. 2021. Increasing faithfulness in knowledge-grounded dialogue with controllable features. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 704-718, Online. Association for Computational Linguistics.
|
| 325 |
+
Weiwei Sun, Zhengliang Shi, Shen Gao, Pengjie Ren, M. de Rijke, and Zhaochun Ren. 2022. Contrastive learning reduces hallucination in conversations. ArXiv, abs/2212.10400.
|
| 326 |
+
Ashish Vaswani, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. ArXiv, abs/1706.03762.
|
| 327 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
|
| 328 |
+
Jun Xu, Haifeng Wang, Zheng-Yu Niu, Hua Wu, Wanxiang Che, and Ting Liu. 2020. Conversational graph grounded policy learning for open-domain conversation generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1835-1845, Online. Association for Computational Linguistics.
|
| 329 |
+
Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2019. *Bertscore: Evaluating text generation with bert.* ArXiv, abs/1904.09675.
|
| 330 |
+
Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, and Bill Dolan. 2020. DIALOGPT: Large-scale generative pre-training for conversational response generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 270-278, Online. Association for Computational Linguistics.
|
| 331 |
+
Xueliang Zhao, Chongyang Tao, Wei Wu, Can Xu, Dongyan Zhao, and Rui Yan. 2019. A document-grounded matching network for response selection in retrieval-based chatbots. In International Joint Conference on Artificial Intelligence.
|
| 332 |
+
Xueliang Zhao, Wei Wu, Chongyang Tao, Can Xu, Dongyan Zhao, and Rui Yan. 2020. Low-resource knowledge-grounded dialogue generation. In International Conference on Learning Representations.
|
| 333 |
+
|
| 334 |
+
Chujie Zheng, Yunbo Cao, Daxin Jiang, and Minlie Huang. 2020. Difference-aware knowledge selection for knowledge-grounded conversation generation. In *Findings of the Association for Computational Linguistics: EMNLP* 2020, pages 115–125, Online. Association for Computational Linguistics.
|
| 335 |
+
Hao Zhou, Chujie Zheng, Kaili Huang, Minlie Huang, and Xiaoyan Zhu. 2020. KdConv: A Chinese multi-domain dialogue dataset towards multi-turn knowledge-driven conversation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7098-7108, Online. Association for Computational Linguistics.
|
| 336 |
+
Kangyan Zhou, Shrimai Prabhumoye, and Alan W Black. 2018. A dataset for document grounded conversations. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 708-713, Brussels, Belgium. Association for Computational Linguistics.
|
| 337 |
+
Pei Zhou, Karthik Gopalakrishnan, Behnam Hedayatnia, Seokhwan Kim, Jay Pujara, Xiang Ren, Yang Liu, and Dilek Hakkani-Tur. 2022. Think before you speak: Explicitly generating implicit commonsense knowledge for response generation. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1237-1252, Dublin, Ireland. Association for Computational Linguistics.
|
| 338 |
+
|
| 339 |
+
# A For every submission:
|
| 340 |
+
|
| 341 |
+
A1. Did you describe the limitations of your work?
|
| 342 |
+
|
| 343 |
+
We discuss the limitations in the "Limitations" Section before "Ethics Statement" Section.
|
| 344 |
+
|
| 345 |
+
A2. Did you discuss any potential risks of your work?
|
| 346 |
+
|
| 347 |
+
We discuss potential risks in the "Ethics Statement" Section at the end of paper.
|
| 348 |
+
|
| 349 |
+
A3. Do the abstract and introduction summarize the paper's main claims?
|
| 350 |
+
|
| 351 |
+
Abstract is in the "Abstract" section, and introduction is in section 1.
|
| 352 |
+
|
| 353 |
+
A4. Have you used AI writing assistants when working on this paper?
|
| 354 |
+
|
| 355 |
+
Left blank.
|
| 356 |
+
|
| 357 |
+
# B Did you use or create scientific artifacts?
|
| 358 |
+
|
| 359 |
+
We show the artifacts in the section 4
|
| 360 |
+
|
| 361 |
+
B1. Did you cite the creators of artifacts you used?
|
| 362 |
+
|
| 363 |
+
We cite the creators in the section 4
|
| 364 |
+
|
| 365 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts?
|
| 366 |
+
|
| 367 |
+
Not applicable. Left blank.
|
| 368 |
+
|
| 369 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)?
|
| 370 |
+
|
| 371 |
+
Not applicable. Left blank.
|
| 372 |
+
|
| 373 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it?
|
| 374 |
+
|
| 375 |
+
Not applicable. Left blank.
|
| 376 |
+
|
| 377 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.?
|
| 378 |
+
|
| 379 |
+
We provide it in the section 4.
|
| 380 |
+
|
| 381 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be.
|
| 382 |
+
|
| 383 |
+
We report it in the section 4.
|
| 384 |
+
|
| 385 |
+
# C Did you run computational experiments?
|
| 386 |
+
|
| 387 |
+
We present computational experiments in the section 4.
|
| 388 |
+
|
| 389 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used?
|
| 390 |
+
|
| 391 |
+
We report it in the section 4.
|
| 392 |
+
|
| 393 |
+
The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.
|
| 394 |
+
|
| 395 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values?
|
| 396 |
+
|
| 397 |
+
We discuss the experimental setup in section 4.
|
| 398 |
+
|
| 399 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run?
|
| 400 |
+
|
| 401 |
+
We report it in the section 5.
|
| 402 |
+
|
| 403 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)?
|
| 404 |
+
|
| 405 |
+
We report it in the section 4.
|
| 406 |
+
|
| 407 |
+
D Did you use human annotators (e.g., crowdworkers) or research with human participants?
|
| 408 |
+
|
| 409 |
+
We report it in the section 4.3.
|
| 410 |
+
|
| 411 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.?
|
| 412 |
+
|
| 413 |
+
Not applicable. Left blank.
|
| 414 |
+
|
| 415 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)?
|
| 416 |
+
|
| 417 |
+
Not applicable. Left blank.
|
| 418 |
+
|
| 419 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used?
|
| 420 |
+
|
| 421 |
+
Not applicable. Left blank.
|
| 422 |
+
|
| 423 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board?
|
| 424 |
+
|
| 425 |
+
Not applicable. Left blank.
|
| 426 |
+
|
| 427 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data?
|
| 428 |
+
|
| 429 |
+
Not applicable. Left blank.
|
2023/Towards Faithful Dialogues via Focus Learning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d1fadd99bcd9ffa056a2be213efda9866ed2b31073484af4da71ec740cad045f
|
| 3 |
+
size 407163
|
2023/Towards Faithful Dialogues via Focus Learning/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/46a16516-75c3-4923-9a1f-8ba83ee5b355_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/46a16516-75c3-4923-9a1f-8ba83ee5b355_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/46a16516-75c3-4923-9a1f-8ba83ee5b355_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e53aeb9c1fa5e0d4f623af14a960221d2750c9d48a2b52dc7aa2f0d686879e84
|
| 3 |
+
size 2235488
|
2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/full.md
ADDED
|
@@ -0,0 +1,489 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Towards Higher Pareto Frontier in Multilingual Machine Translation
|
| 2 |
+
|
| 3 |
+
Yichong Huang†, Xiaocheng Feng†‡, Xinwei Geng†, Baohang Li†, Bing Qin†‡
|
| 4 |
+
|
| 5 |
+
†Harbin Institute of Technology ‡ Peng Cheng Laboratory
|
| 6 |
+
|
| 7 |
+
{ychuang,xcfeng,xwgeng,baohangli,qinb}@ir.hit.edu.cn
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Multilingual neural machine translation has witnessed remarkable progress in recent years. However, the long-tailed distribution of multilingual corpora poses a challenge of Pareto optimization, i.e., optimizing for some languages may come at the cost of degrading the performance of others. Existing balancing training strategies are equivalent to a series of Pareto optimal solutions, which trade off on a Pareto frontier<sup>1</sup>. In this work, we propose a new training framework, Pareto Mutual Distillation (Pareto-MD), towards pushing the Pareto frontier outwards rather than making trade-offs. Specifically, Pareto-MD collaboratively trains two Pareto optimal solutions that favor different languages and allows them to learn from the strengths of each other via knowledge distillation. Furthermore, we introduce a novel strategy to enable stronger communication between Pareto optimal solutions and broaden the applicability of our approach. Experimental results on the widely-used WMT and TED datasets show that our method significantly pushes the Pareto frontier and outperforms baselines by up to $+2.46$ $\mathrm{BLEU}^2$ .
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Multilingual neural machine translation (MNMT) is a popular paradigm that uses a unified model to handle the entire translation process for multiple language pairs (Ha et al., 2016; First et al., 2016; Johnson et al., 2017). This paradigm is particularly effective at improving the performance of low-resource languages through transfer learning (Aha-roni et al., 2019; Dabre et al., 2020; Siddhant et al., 2022). Besides, MNMT is highly deployable since only one model is required (Fan et al., 2021; Yang et al., 2021; NLLB Team et al., 2022).
|
| 16 |
+
|
| 17 |
+
However, the severely imbalanced distribution of multilingual training data puts the MNMT in
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Figure 1: Multilingual performance frontier shifts outwards. X-axis and Y-axis indicate the performance of Low-Resource Languages and High-Resource Languages, respectively. Existing methods reflect a tradeoff on the Pareto frontier (i.e., the gray curve). Our work aims to push the original Pareto frontier i.e., the blue dotted curve. To this effect, we ameliorate each individual model's shortcoming while retaining their strengths, e.g., moving right the solution $A$ to $A'$ and moving up the solution $B$ to $B'$ , via our Pareto Mutual Distillation.
|
| 21 |
+
|
| 22 |
+
a situation of Pareto optimization (also known as multi-objective optimization). That is, when some languages are optimized, others degenerate. Existing methods can be considered a set of Pareto optimal solutions that trade off on a Pareto frontier, which focus on balancing the performance across different languages by adjusting the sampling distribution (Arivazhagan et al., 2019; Wang et al., 2020; Wu et al., 2021). The widely-used temperature-based sampling (Arivazhagan et al., 2019) is typical evidence of the claim above, which uses a hyper-parameter to smooth the training distribution over all language pairs to enhance the representation of low-source Languages (LRLs) while sacrificing the which of High-Resource Languages (HRLs). Despite the emergence of several sophis
|
| 23 |
+
|
| 24 |
+
ticated dynamic sampling technologies designed to overcome the inflexibility of temperature-based sampling, their performance remains restricted to this Pareto frontier (Wang et al., 2020; Zhou et al., 2021; Zhang et al., 2021).
|
| 25 |
+
|
| 26 |
+
In this work, we propose a novel training framework, named Pareto Mutual Distillation (Pareto-MD), to push the Pareto frontier of multilingual models. Specifically, Pareto-MD uses different training distributions that favor dissimilar subsets of languages to train two multilingual models simultaneously. These two models learn from each other at each training step with knowledge distillation. The underlying idea of Pareto-MD is to address shortcomings of individual Pareto optimal solutions via access to a better one in terms of that shortcoming, thereby raising the Pareto frontier, as Fig. 1 depicts. To fully exploit the potential of our approach in multilingual settings, we further propose Automatic Pareto Mutual Distillation, which dynamically determines the contribution of distillation learning loss on each objective. These contributions, controlled by a set of distillation weights, adapt automatically to the evolving models, eliminating the need for manual hyper-parameter search.
|
| 27 |
+
|
| 28 |
+
While our method applies essentially to any multi-objective optimization problem, we specifically demonstrate its benefit on multilingual machine translation. The experimental results on two widely-used datasets demonstrate the effectiveness of our method, which improves up to +2.46 BLEU, and the further analysis shows the Pareto frontier is pushed outwards visibly.
|
| 29 |
+
|
| 30 |
+
# 2 Preliminaries
|
| 31 |
+
|
| 32 |
+
Neural machine translation (NMT) is a classic NLP task that translates a sentence $x$ in source language into a sentence $y$ in target language (Kalchbrenner and Blunsom, 2013; Sutskever et al., 2014; Bahdanau et al., 2015; Vaswani et al., 2017). Given a parallel corpus $D = \{(x,y) \in \mathcal{X} \times \mathcal{Y}\}$ , the NMT model is commonly trained by minimizing the negative log-likelihood loss:
|
| 33 |
+
|
| 34 |
+
$$
|
| 35 |
+
\mathcal {L} _ {c e} = \sum_ {(x, y) \sim D} \sum_ {i \leqslant | y |} - \log p \left(y _ {i} \mid x, y _ {< i}; \theta\right), \tag {1}
|
| 36 |
+
$$
|
| 37 |
+
|
| 38 |
+
where $p(\cdot |\cdot ;\theta)$ maps the source sentence and previous generated text to the next target token.
|
| 39 |
+
|
| 40 |
+
# 2.1 Multilingual Machine Translation
|
| 41 |
+
|
| 42 |
+
Given a set of language pairs $L$ , the MNMT model is trained on the combination of $|L|$ parallel
|
| 43 |
+
|
| 44 |
+
datasets: $\{D_{\ell}^{train}\}_{\ell = 1}^{|L|}$ , where $D_{\ell}^{train}$ is the dataset of language pair $(S_{\ell},T_{\ell})$ . In order to encode and decode the text in various languages into and from a universal semantic space, a large multilingual vocabulary $\mathcal{V}$ is constructed. The language tag is appended to the beginning of source sentences as a hint of the target language. The MNMT model is also trained with the loss function as Eq.1 over the multilingual datasets.
|
| 45 |
+
|
| 46 |
+
Temperature-based Sampling. The multilingual datasets form a distribution $P$ , where $P(\ell) = \frac{N_{\ell}}{\sum_{j} N_{j}}$ is the sampling probability of language pair $\ell$ and we denote the dataset size of $D_{\ell}^{train}$ by $N_{\ell}$ . Since sampling probabilities of LRLs are substantially lower than those of HRLs, the optimization towards LRLs can be overwhelmed by those of HRLs. To resolve this issue, Arivazhagan et al. (2019) propose temperature-based sampling, introducing a hyper-parameter $\tau$ to re-scale the smoothness of training distribution. Concretely, the sampling probability of each language pair $\ell$ is set to:
|
| 47 |
+
|
| 48 |
+
$$
|
| 49 |
+
P (\ell) = \frac {N _ {\ell} ^ {1 / \tau}}{\sum_ {j} N _ {j} ^ {1 / \tau}}, \tag {2}
|
| 50 |
+
$$
|
| 51 |
+
|
| 52 |
+
increasing the value of $\tau$ produces smoother training distributions and stronger preferences on LRLs.
|
| 53 |
+
|
| 54 |
+
# 2.2 Mutual Distillation
|
| 55 |
+
|
| 56 |
+
Knowledge Distillation (KD) is a popular technology for knowledge transfer, which originates from compressing a static high-capacity model (teacher model) into a small compact model (student model) (Hinton et al., 2015). Mutual distillation is a variant of KD (Zhang et al., 2018; Guo et al., 2020). Instead of using a pre-trained teacher model, mutual distillation involves training more than one model simultaneously, with each model teaching the other throughout the training process. Mutual distillation takes the same loss function as vanilla knowledge distillation, that is:
|
| 57 |
+
|
| 58 |
+
$$
|
| 59 |
+
\begin{array}{l} \mathcal {L} _ {k d} = \sum_ {i \leqslant | y |} \sum_ {w \in \mathcal {V}} - p (w | x, y _ {< i}; \theta^ {T}) \tag {3} \\ \cdot \log p (w | x, y _ {< i}; \theta^ {S}), \\ \end{array}
|
| 60 |
+
$$
|
| 61 |
+
|
| 62 |
+
where $\mathcal{V}$ is the target-side vocabulary, $\theta^S$ and $\theta^T$ are the student model and teacher model. The major difference of Pareto-MD from vanilla mutual distillation is that we train two models with different sampling distributions to make them favor different sets of objectives.
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
Figure 2: Illustration of Pareto-MD, using different sampling distributions to train two models. At each step, both models additionally mimic the output of each other via knowledge distillation. The distillation learning of each model is weighted by language-specific distillation weights $\alpha_{i}[\ell]$ deduced with specific strategies.
|
| 66 |
+
|
| 67 |
+
# 3 Pareto Mutual Distillation
|
| 68 |
+
|
| 69 |
+
In this section, we first introduce our training framework Pareto-MD (§3.1). Next, two strategies that determine the important distillation weights, UNIPMD and Bi-PMD, are shown (§3.2). To overcome the flaws of these two strategies above, AUTO-PMD is further proposed (§3.3).
|
| 70 |
+
|
| 71 |
+
# 3.1 Framework
|
| 72 |
+
|
| 73 |
+
We illustrate our Pareto-MD in Fig. 2. Pareto-MD simultaneously trains two models, denoted by $\theta_{1}$ and $\theta_{2}$ , using different sampling distributions, $P_{1}$ and $P_{2}$ , that make each model favor a different set of language pairs. To obtain expected distributions, we adopt temperature-based sampling, as shown in Eq.2, and set $\tau = 1$ for $P_{1}$ , $\tau > 1$ (e.g., $\tau = 5$ commonly) for $P_{2}$ . In this way, $\theta_{1}$ prefers HRLs, and $\theta_{2}$ prefers LRLs.
|
| 74 |
+
|
| 75 |
+
At each training step, for each model $\theta_{i}$ where $i\in \{1,2\}$ , Pareto-MD first draws a language pair $\ell$ from training distribution $P_{i}$ , then a mini-batch of sentence pairs $B_{\ell} = \{x_{\ell},y_{\ell}\}$ are sampled from $D_{\ell}^{train}$ . Next, the model $\theta_{i}$ is trained to fit $B_{\ell}$ and match the output of the other model, i.e., $\theta_{3 - i}$ . The overall loss function for model $\theta_{i}$ is defined as:
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
\begin{array}{l} \mathcal {L} _ {P M D} = \left(1 - \boldsymbol {\alpha} _ {i} [ \ell ]\right) \times \mathcal {L} _ {c e} \left(B _ {l}; \theta_ {i}\right) \tag {4} \\ + \boldsymbol {\alpha} _ {i} [ \ell ] \times \mathcal {L} _ {k d} \left(B _ {\ell}; \theta_ {i}, \theta_ {3 - i}\right), \\ \end{array}
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
where $\alpha_{i}\in \mathbb{R}^{|L|}$ is the multilingual distillation weight vector of $\theta_{i}$ and $\pmb {\alpha}_i[\ell ]\in [0,1]$ is the distillation weight for language pair $\ell$ $\pmb {\alpha}_i[\ell ]$ is crucial as controlling the extent how much $\theta_{i}$ should learn from $\theta_{3 - i}$ in direction $\ell$ .When $\alpha_{i}[\ell ] = 0$ $\theta_{i}$ does not acquire information from $\theta_{3 - i}$ in $\ell$ . The values
|
| 82 |
+
|
| 83 |
+
Algorithm 1: Pareto-MD
|
| 84 |
+
Input : Datasets $\{D_{\ell}^{train}\}_{\ell = 1}^{|L|}$ , two training distributions $P_{1}, P_{2}$ , learning rate $\eta$ , distillation weights updating strategy $S$ , updating interval $\mathcal{T}$ Initialize: Randomly initialize model $\theta_{1}$ and $\theta_{2}$ , set multilingual distillation weights $\alpha_{1}, \alpha_{2} = 0$ , training step $t = 0$ while not converged do
|
| 85 |
+
$t \gets t + 1$
|
| 86 |
+
for $i \gets 1$ to 2 do
|
| 87 |
+
Sample a language pair $\ell$ from $P_{i}$
|
| 88 |
+
Draw a batch of samples $B_{\ell}$ from $D_{\ell}^{train}$ $\theta_{i} \gets \theta_{i} - \eta \nabla_{\theta_{i}} \mathcal{L}_{PMD}(B_{\ell}; \theta_{i}, \theta_{3-i}, \alpha_{i}[\ell])$
|
| 89 |
+
end
|
| 90 |
+
if $t \% T = 0$ then
|
| 91 |
+
Update $\alpha_{1}, \alpha_{2}$ with the specific strategy $S$
|
| 92 |
+
end
|
| 93 |
+
|
| 94 |
+
of $\alpha_{i}$ are determined by the specific strategy. We summarize the whole training framework in Alg.1.
|
| 95 |
+
|
| 96 |
+
# 3.2 UNI-PMD and BI-PMD
|
| 97 |
+
|
| 98 |
+
Multilingual distillation weights $\alpha_{i}$ play important roles in Pareto-MD. We present two strategies, unidirectional Pareto mutual distillation (UNI-PMD) and bidirectional Pareto mutual distillation (BIPMD), for determining the values of $\alpha_{i}$ based on different design philosophies.
|
| 99 |
+
|
| 100 |
+
UNI-PMD. UNI-PMD is designed based on the intuition that each model should only learn from the strengths and avoid mimicking the shortcomings of the other model. Therefore, in each translation direction $\ell$ , UNI-PMD lets the model that performs less well, denoted by $\theta_{\ell}^{worse}$ , be distilled by the model that performs better in this direction, denoted by $\theta_{\ell}^{better}$ , via setting a positive distillation weight. Conversely, UNI-PMD zeros the weight to forbid $\theta_{\ell}^{better}$ from being influenced by $\theta_{\ell}^{worse}$ .
|
| 101 |
+
|
| 102 |
+
Formally, given multilingual validation datasets $\{D_{\ell}^{valid}\}_{\ell = 1}^{|L|}$ and a pre-defined hyper-parameter $\alpha \in [0,1]$ , in each direction $\ell \in L$ , UNI-PMD sets the distillation weight of $\theta_{i}$ as:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\boldsymbol {\alpha} _ {i} [ \ell ] = \alpha \times \mathbb {1} \left\{i = \underset {j \in \{1, 2 \}} {\arg \max } \mathcal {L} _ {c e} \left(D _ {l} ^ {\text {v a l i d}}; \theta_ {j}\right) \right\}, \tag {5}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
where the $\mathbb{1}\{\cdot\}$ is an indicator function, indicating whether the model $\theta_{i}$ performs less well on the translation of $\ell$ . UNI-PMD updates the distillation weights every $\mathcal{T}$ steps.
|
| 109 |
+
|
| 110 |
+
Bi-PMD. Besides, we design another strategy Bi-PMD based on the hypothesis that among the two models that are trained with Pareto-MD, in
|
| 111 |
+
|
| 112 |
+

|
| 113 |
+
Figure 3: Process of AUTO-PMD updating the distillation weights. At the $k$ -th update, AUTO-PMD makes three trials that perform three actions to all language pairs' weights and then train the current model. Finally, the language-specific optimal actions are selected to update the previous weights. Note that the value of each weight will change by different magnitudes when increased or decreased due to the non-linear nature of sigmoid function.
|
| 114 |
+
|
| 115 |
+
each translation direction $\ell$ , $\theta_{\ell}^{worse}$ is also possible to improve $\theta_{\ell}^{better}$ via knowledge distillation. This hypothesis is motivated by the recently proposed theoretical framework of Multi-View Data (Allen-Zhu and Li, 2020; He and Ozay, 2021), which theoretically reveals that each well-trained network only captures a different subset of relevant features, limiting their generalization. The mechanism of knowledge distillation is to help one model to learn the relevant features of another model.
|
| 116 |
+
|
| 117 |
+
The discovery motivates us to suspect that $\theta_{\ell}^{worse}$ can also improve $\theta_{\ell}^{better}$ using distillation, as $\theta_{\ell}^{worse}$ may possess relevant features that $\theta_{\ell}^{better}$ lacks. Therefore, Bi-PMD allows $\theta_{\ell}^{worse}$ to affect $\theta_{\ell}^{better}$ in direction $\ell$ . Our implementation is simple: Bi-PMD sets all distillation weights to a positive value. Formally, given a hyper-parameter $\alpha$ , the distillation weight of $\theta_{i}$ in direction $\ell$ is:
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
\boldsymbol {\alpha} _ {i} [ \ell ] = \alpha , \tag {6}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
meaning that each model affects the other equally.
|
| 124 |
+
|
| 125 |
+
# 3.3 AUTO-PMD
|
| 126 |
+
|
| 127 |
+
Desiderata. Both UNI-PMD and BI-PMD determine the distillation weights of all translation directions based on a pre-defined hyper-parameter $\alpha$ , which dissatisfies the following three expected properties of distillation weights: 1) Language-Adaptability: the optimal distillation weights for different language pairs vary. However, the current strategies set a uniform weight for all language pairs, resulting in sub-optimal performance; 2) Dynamics: existing research on mutual distillation uses a fixed distillation weight throughout the training process, which fails to adapt to the evolving models; 3) Generality: it is empirically discovered that the optimal value of distillation weight
|
| 128 |
+
|
| 129 |
+
varies across different datasets, incurring the extra cost of the manual hyper-parameter search. To satisfy these three properties, we propose Automatic Pareto Mutual Distillation (AUTO-PMD) to automatically decide the value of each direction's distillation weight according to training dynamics.
|
| 130 |
+
|
| 131 |
+
Approach. AUTO-PMD updates multilingual distillation weight vector $\alpha_{i}$ every $\mathcal{T}$ steps. We denote the values of $\alpha_{i}$ after the $k$ -th update by $\alpha^{k}$ . Note that the subscript $i$ of $\alpha_{i}$ is omitted for clarity. The update process is modeled as Markov Chain (Norris and Norris, 1998). All distillation weights are initialized at the beginning of training as a small value, i.e., $\alpha^0 [\ell ] = 0.1$ . Three actions on distillation weight are defined:
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\mathcal {F} = \left\{f _ {\uparrow} (\cdot), f _ {\downarrow} (\cdot), f _ {=} (\cdot) \right\}, \tag {7}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
which aim to increase, decrease and keep the value of distillation weight unchanged. At the $k$ -th update, AUTO-PMD decides the values of $\alpha^k$ according to the previous state $\alpha^{k-1}$ . We exemplify the process of each update step in Fig. 3 and precisely describe it in Alg. 2. As illustrated in Fig. 3, the update process is divided into three steps.
|
| 138 |
+
|
| 139 |
+
In the first step, given the previous distillation weights $\alpha^{k - 1}$ , AUTO-PMD makes three trials, generating three multilingual distillation weight vectors for the trial training of the next step. Each vector is obtained by performing an action (e.g., increasing) on all values of $\alpha^{k - 1}$ . These three vectors, corresponding to three colorful vectors in Fig. 3, form a set which is referred to as search space $\widetilde{O}^k$ . In fact, the trial training of next step should be conducted over the entire search space $O^k$ , which is the Cartesian product of possible subsequent states of each language-specific distillation
|
| 140 |
+
|
| 141 |
+
# Algorithm 2: AUTO-PMD
|
| 142 |
+
|
| 143 |
+
Input : Multilingual trial datasets $\{D_{\ell}^{trial}\}_{\ell = 1}^{|L|}$ , validation datasets $\{D_{\ell}^{valid}\}_{\ell = 1}^{|L|}$ , the training model $\theta_{1}$ and $\theta_{2}$ , search space $\widetilde{O}_1^k$ , $\widetilde{O}_2^k$ , distillation weights $\alpha_1^{k-1}, \alpha_2^{k-1}$
|
| 144 |
+
|
| 145 |
+
Output: $\alpha_{1}^{k},\alpha_{2}^{k}$
|
| 146 |
+
Initialize: Initialize trial results $\mathcal{R} \in \mathbb{R}^{|L| \times |\tilde{\mathcal{O}}_i^k|}$ to a zero matrix
|
| 147 |
+
|
| 148 |
+
1 for $i\gets 1$ to 2 do
|
| 149 |
+
2 for $j\gets 1$ to $|\widetilde{O}_i^k |$ do
|
| 150 |
+
3 $\alpha_{i}^{\prime}\gets \widetilde{O}_{i}^{k}[j]$
|
| 151 |
+
4 Copy model $\theta_i^\prime \leftarrow \theta_i$
|
| 152 |
+
5 Train $\theta_{i}^{\prime}$ on $D^{trial}$ for one epoch using teacher model $\theta_{3 - i}$ and $\alpha_{i}^{\prime}$ with Eq.4
|
| 153 |
+
6 for $\ell \gets 1$ to $|L|$ do
|
| 154 |
+
7 $\mathcal{R}[\ell ][j]\gets \mathcal{L}_{ce}(D_{\ell}^{valid};\theta_{i}^{\prime})$
|
| 155 |
+
8 end
|
| 156 |
+
9 end
|
| 157 |
+
10 for $\ell \gets 1$ to $|L|$ do
|
| 158 |
+
11 $\hat{j}\gets \operatorname *{argmin}_{j}\mathcal{R}[\ell ][j]$
|
| 159 |
+
12 $\alpha_{i}^{k}[\ell ]\gets \widetilde{O}_{i}^{k}[\hat{j}][\ell ]$
|
| 160 |
+
13 end
|
| 161 |
+
14 end
|
| 162 |
+
|
| 163 |
+
weight $\alpha^{k - 1}[\ell ]$
|
| 164 |
+
|
| 165 |
+
$$
|
| 166 |
+
O ^ {k} = \times_ {\ell \in L} \left\{f \left(\boldsymbol {\alpha} ^ {k - 1} [ \ell ]\right) \mid f \in \mathcal {F} \right\}. \tag {8}
|
| 167 |
+
$$
|
| 168 |
+
|
| 169 |
+
However, this search space grows exponentially as the number of languages increases, that is, $|O^k| = |\mathcal{F}|^{|L|}$ . To overcome the non-trivial cost, the subspace $\tilde{O}^k$ is adopted. Furthermore, we prove that based on the Distillation Weights Independence assumption, the optimal solution searched in $\tilde{O}^k$ is equivalent to that of $O^k$ . The mathematical description of this assumption and the proof are demonstrated in §A.
|
| 170 |
+
|
| 171 |
+
Next, AUTO-PMD uses each distillation weight vector in $\widetilde{O}^k$ to train the current model on trial set $D^{trial}$ , which is constructed by sampling $\rho$ of $D^{train}$ , for one epoch. The three trained models are evaluated on the validation set, and the language-specific dev losses of these models form a matrix, which is represented by trial results $\mathcal{R} \in \mathbb{R}^{|\widetilde{O}^k| \times |L|}$ . The model training of this step incurs overhead, which is proportional to the value of $\rho \times |\widetilde{O}^k|$ . In this work, we set $\rho = 0.1$ . Thereby, the extra overhead is $30\%$ of the actual model training.
|
| 172 |
+
|
| 173 |
+
Finally, the language-specific optimal actions are selected according to the trial results and then performed on $\alpha^{k - 1}[\ell ]$ , obtaining the results of $\alpha^k [\ell ]$ . We exemplify this step with Fig. 3. The red model, trained using the increased version of $\alpha^{k - 1}$ (the vector in red), achieves the best performance of
|
| 174 |
+
|
| 175 |
+
$Fr \to En$ . Thus, the $\alpha^k[\ell]$ of $Fr \to En$ is obtained by increasing the $\alpha^{k-1}[\ell]$ of $Fr \to En$ .
|
| 176 |
+
|
| 177 |
+
Implementation of Actions. As aforementioned, three actions for updating distillation weights are defined (in Eq.7). The $f_{=}(\cdot)$ is simple:
|
| 178 |
+
|
| 179 |
+
$$
|
| 180 |
+
f _ {=} (\boldsymbol {\alpha} [ \ell ]) = \boldsymbol {\alpha} [ \ell ]. \tag {9}
|
| 181 |
+
$$
|
| 182 |
+
|
| 183 |
+
For $f_{\uparrow}(\cdot)$ and $f_{\downarrow}(\cdot)$ , it must ensure that the output is always between [0, 1]. Therefore, the input is first mapped into $(-\infty, +\infty)$ using the inverse of sigmoid function and then increased/decreased the value by $\mu$ , named step size. Finally, the increased/decreased value is mapped back into [0, 1] using sigmoid function. Formally:
|
| 184 |
+
|
| 185 |
+
$$
|
| 186 |
+
f _ {\uparrow} (\boldsymbol {\alpha} [ \ell ]) = \sigma \left(\sigma^ {- 1} (\boldsymbol {\alpha} [ \ell ]) + \mu\right) \tag {10}
|
| 187 |
+
$$
|
| 188 |
+
|
| 189 |
+
$$
|
| 190 |
+
f _ {\downarrow} (\boldsymbol {\alpha} [ \ell ]) = \sigma \left(\sigma^ {- 1} (\boldsymbol {\alpha} [ \ell ]) - \mu\right) \tag {11}
|
| 191 |
+
$$
|
| 192 |
+
|
| 193 |
+
where $\sigma (\cdot)$ is sigmoid function. The step size $\mu$ is crucial for weights search. A smaller step size could improve the precision of searched weights while may delay convergence to the optimal weight. Therefore, we design a step size scheduler, setting a large step size in the early training stage and then deducing the step size:
|
| 194 |
+
|
| 195 |
+
$$
|
| 196 |
+
\mu = \sqrt {\frac {\mathcal {T} _ {\text {m a x}} - t}{\mathcal {T} _ {\text {m a x}}}} \tag {12}
|
| 197 |
+
$$
|
| 198 |
+
|
| 199 |
+
where $\mathcal{T}_{\text{max}}$ is the max training steps.
|
| 200 |
+
|
| 201 |
+
# 4 Experiments
|
| 202 |
+
|
| 203 |
+
# 4.1 Settings
|
| 204 |
+
|
| 205 |
+
Datasets. We conduct experiments on two datasets: the WMT-6 dataset provided by Huang et al. (2022) and the widely-used TED-8-Diverse dataset constructed by Wang et al. (2020). The WMT-6 dataset involves the language pairs of 3 LRLs (et, ro, tr) and 3 HRLs (fr, de, zh) to English. This dataset has around 5M training sentences from parallel corpora that WMT provides over multiple years, and the corresponding validation and test sets are used. The data statistics are detailed in Appendix B. The TED-8-Diverse contains the language pairs of 4 LRLs (bos, mar, hin, mkd) and 4 HRLs (ell, bul, fra, kor) to English. This dataset comprises around 570K sentence pairs. The data statistics and the interpretation of language codes are demonstrated in Appendix B. Compared to TED-8-Diverse, the size of WMT-6 dataset is more considerable and distributed more unevenly.
|
| 206 |
+
|
| 207 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Sampling</td><td colspan="2">WMT-6</td><td colspan="2">TED-8-DIVERSE</td></tr><tr><td>Many-to-One</td><td>One-to-Many</td><td>Many-to-One</td><td>One-to-Many</td></tr><tr><td colspan="6">Existing Balancing Training Strategies</td></tr><tr><td>TEMPERATURE SAMPLING</td><td>τ = 1</td><td>20.57</td><td>18.92</td><td>29.00</td><td>22.75</td></tr><tr><td>TEMPERATURE SAMPLING</td><td>τ > 1</td><td>19.93</td><td>18.63</td><td>28.35</td><td>22.23</td></tr><tr><td>MULTIDDS-S (Wang et al., 2020)*</td><td>dyn.</td><td>-</td><td>-</td><td>27.00</td><td>18.24</td></tr><tr><td>MULTIUAT (Wu et al., 2021)*</td><td>dyn.</td><td>-</td><td>-</td><td>27.83</td><td>19.76</td></tr><tr><td>CCL-M (Zhang et al., 2021)*</td><td>dyn.</td><td>-</td><td>-</td><td>28.34</td><td>19.53</td></tr><tr><td>χ-IBR (Zhou et al., 2021)*</td><td>dyn.</td><td>-</td><td>-</td><td>29.74</td><td>23.44</td></tr><tr><td colspan="6">Existing Knowledge Distillation-based Strategies</td></tr><tr><td>MULTI-DISTILL (Tan et al., 2019)</td><td>τ = 1</td><td>20.18</td><td>18.57</td><td>29.52</td><td>22.31</td></tr><tr><td>LSSD (Huang et al., 2022)</td><td>τ = 1</td><td>21.17</td><td>19.76</td><td>30.77</td><td>23.55</td></tr><tr><td colspan="6">Our Pareto Mutual Distillation</td></tr><tr><td rowspan="2">UNI-PMD</td><td>τ = 1</td><td>20.76†</td><td>18.96</td><td>29.76†</td><td>22.92</td></tr><tr><td>τ > 1</td><td>21.74†</td><td>19.76†</td><td>29.97†</td><td>22.91</td></tr><tr><td rowspan="2">Bi-PMD</td><td>τ = 1</td><td>21.61†</td><td>19.53†</td><td>30.31†</td><td>23.00†</td></tr><tr><td>τ > 1</td><td>21.92†</td><td>20.09†</td><td>30.42†</td><td>22.77</td></tr><tr><td rowspan="2">AUTO-PMD</td><td>τ = 1</td><td>21.89†</td><td>20.16†</td><td>31.05†</td><td>23.31†</td></tr><tr><td>τ > 1</td><td>22.39†</td><td>20.48†</td><td>30.71†</td><td>23.28†</td></tr></table>
|
| 208 |
+
|
| 209 |
+
Table 1: BLEU scores on the WMT-6 and TED-8-Diverse dataset. Bold indicates the highest BLEU score in each setting. \* means results taken from the original paper. $\dagger$ indicates significantly better than temperature-based sampling with t-test $p < 0.001$ . The temperature-based sampling is tried with $\tau = \{1,5\}$ on WMT-6 and $\tau = \{1,3\}$ on TED-8-Diverse. For each of our approaches, the first row is the result of model-1, and the second row is the result of model-2. dyn.' is the abbreviation for "dynamic sampling."
|
| 210 |
+
|
| 211 |
+
For each dataset, our approach is evaluated in two multilingual translation scenarios: 1) MANY-TO-ONE (M2O): translating multiple languages to English in this work; 2) ONE-TO-MANY (O2M): translating English to other languages.
|
| 212 |
+
|
| 213 |
+
Hyper-parameters. Even though our proposed training framework can be applied to any model architecture, we verify its effectiveness on the popular Transformer (Vaswani et al., 2017) implemented in fairseq (Ott et al., 2019) with the base version. We use the same model configuration, hyper-parameters, and preprocess procedure as those of Huang et al. (2022) for all baselines and our method. The only difference is that the dropout rate is modified into 0.2 on WMT-6, to accelerate the convergence without performance loss. The complete set of hyper-parameters is demonstrated in Appendix C. The performance is evaluated with the BLEU score (Papineni et al., 2002) using the SacreBLEU toolkit (Post, 2018).
|
| 214 |
+
|
| 215 |
+
As illustrated in §3.1, our Pareto-MD trains two models using different sampling distributions, $P_{1}$ and $P_{2}$ , and we adopt temperature-based sampling with different values of $\tau$ to produce these two distributions. We set $\tau = 1$ for $P_{1}$ and $\tau = 5$ for $P_{2}$ on WMT-6. On TED-8-Diverse, we set $\tau = 1$
|
| 216 |
+
|
| 217 |
+
for model-1 and $\tau = 3$ for model-2 since an overly large value leads to poor performance. For the UNI-PMD and B1-PMD, we manually search the optimal $\alpha$ (in Eq.5 and Eq.6) among $\{0.2, 0.4, 0.6, 0.8\}$ . The update interval of distillation weights $\mathcal{T}$ is set to the step number of one epoch.
|
| 218 |
+
|
| 219 |
+
Baselines. We primarily compare our Pareto-MD with: (1) Temperature-based Sampling: the method most related to our work; 2) $\chi$ -IBR (Zhou et al., 2021), the state-of-the-art (SOTA) dynamic sampling method, which enables the balancing training based on distributionally robust optimization; 3) LSSD (Huang et al., 2022), another distillation-based training strategy which achieves SOTA performance on TED-8-Diverse and WMT-6 dataset via alleviating the convergence inconsistency problem of MNMT using self-distillation. More details of baselines are demonstrated in Appendix D.
|
| 220 |
+
|
| 221 |
+
# 4.2 Main Results
|
| 222 |
+
|
| 223 |
+
We summarize the main results in Table 1. As we observed, our methods significantly outperform the temperature-based sampling under M2O and O2M settings on both datasets. The model-2 trained with AUTO-PMD has improved by up to +2.46 BLEU under the M2O setting of WMT-6.
|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
(a) Many-to-One
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
(b) One-to-Many
|
| 230 |
+
Figure 4: Multilingual performance Pareto frontier on the WMT-6 dataset. Gray dotted curves indicate the Pareto frontier of baselines and the colorful ones mark the frontier made by AUTO-PMD. This figure shows that the Pareto frontier is pushed outwards significantly.
|
| 231 |
+
|
| 232 |
+
Furthermore, Pareto-MD achieves higher BLEU scores than previous methods in most settings. At best, AUTO-PMD outperforms the previous SOTA (LSSD) by $+1.22$ BLEU scores under the M2O setting of WMT-6. When comparing UNI-PMD and B1-PMD, it is obvious that B1-PMD consistently exceeds UNI-PMD, verifying the motivation that the worse model is also possible to improve the better model via knowledge distillation. AUTO-PMD further surpasses B1-PMD by $+0.3 \sim 0.5$ BLEU. This improvement proves that our automatic search of distillation weights is indeed reliable. Moreover, AUTO-PMD is more general than UNI-PMD and B1-PMD since it eliminates the need to search for the hyper-parameter $\alpha$ manually<sup>3</sup>.
|
| 233 |
+
|
| 234 |
+
# 5 Analysis
|
| 235 |
+
|
| 236 |
+
# 5.1 Visualization of Pareto Frontier
|
| 237 |
+
|
| 238 |
+
In order to clearly assess the impact of our methods on HRLs and LRLs, we visualize the Pareto frontier in Fig. 4. Three important observations
|
| 239 |
+
|
| 240 |
+
<table><tr><td>Method</td><td>Sampling</td><td>BLEU</td></tr><tr><td rowspan="2">Vanilla MD</td><td>τ = 1</td><td>20.93</td></tr><tr><td>τ = 1</td><td>20.97</td></tr><tr><td rowspan="2">Vanilla MD</td><td>τ = 5</td><td>21.13</td></tr><tr><td>τ = 5</td><td>21.29</td></tr><tr><td rowspan="2">BI-PMD</td><td>τ = 1</td><td>21.61</td></tr><tr><td>τ = 5</td><td>21.92</td></tr><tr><td rowspan="2">AUTO-PMD</td><td>τ = 1</td><td>21.89</td></tr><tr><td>τ = 5</td><td>22.39</td></tr></table>
|
| 241 |
+
|
| 242 |
+
Table 2: Comparison between our method with vanilla mutual distillation (Vanilla MD) under the Many-to-One setting of the WMT-6 dataset.
|
| 243 |
+
|
| 244 |
+
can be drawn: 1) overall, the model-1 has been significantly shifted right, and the model-2 has been shifted upwards, proving that Pareto-MD effectively alleviates the shortcomings of each model as we expected; 2) both of model-1 and model-2 are shifted right beyond the original model-2, indicating that the performance of LRLs is improved beyond the original performance bound. The reason may be that the transfer learning from HRLs to LRLs is more effective when the model achieves high performance on both HRLs and LRLs; 3) the model-1 degenerates on the translation of HRLs in the O2M setting. One potential cause is the representation space of HRLs undergoing more intense squeezing in the O2M than in the M2O when the model learns well on LRLs.
|
| 245 |
+
|
| 246 |
+
# 5.2 Effect of Diverse Sampling Strategies
|
| 247 |
+
|
| 248 |
+
In the Pareto-MD training framework, two models corresponding to different Pareto optimal solutions are trained collaboratively using distinct training distributions. One natural question that arises is, how would the performance be affected if we trained two models with the same training distribution? This approach, in fact, degenerates into the vanilla mutual distillation method. Therefore, we conduct a comparison experiment on the WMT-6 dataset (M2O setting) shown in Table 2. The results indicate that vanilla mutual distillation underperforms our B1-PMD by about 0.6 BLEU, which supports the effectiveness of using different sampling distributions for our Pareto-MD. Moreover, we propose AUTO-PMD to improve vanilla mutual distillation by $+1.1$ BLEU totally.
|
| 249 |
+
|
| 250 |
+
# 5.3 Evolution of Distillation Weights
|
| 251 |
+
|
| 252 |
+
To better understand the process of AUTO-PMD, we visualize the automatically searched distillation weights in Fig. 5. As it depicts, the distillation
|
| 253 |
+
|
| 254 |
+

|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
(a) $\mathrm{Fr}\rightarrow \mathrm{En}$
|
| 258 |
+
(b) $\mathrm{Tr}\rightarrow \mathrm{En}$
|
| 259 |
+
Figure 5: Visualization of automatically search distillation weights in the many-to-one setting of WMT-6 dataset. Due to the space limitation, we only show the weights of one HRL $(\mathrm{Fr}\rightarrow \mathrm{En})$ and one LRL $(\mathrm{Tr}\rightarrow \mathrm{En})$
|
| 260 |
+
|
| 261 |
+
weights constantly vary to adapt the dynamic models with a decreasing variance made by the decay of search step size (Eq.12). Besides, it is discovered that the low-resource $\mathrm{Tr} \rightarrow \mathrm{En}$ translation favors a higher value of distillation weight than the high-resource $\mathrm{Fr} \rightarrow \mathrm{En}$ translation. This phenomenon makes sense since LRLs suffer from more serious over-fitting (Huang et al., 2022), requiring stronger distillation learning.
|
| 262 |
+
|
| 263 |
+
# 5.4 Effect of Step Size Scheduler $\mu$
|
| 264 |
+
|
| 265 |
+
The performance of different step size schedulers is listed in Table 3. The simple scheduler-1 fixes the step size to 1.0, performing relatively poorly. The scheduler-2 decreases the step size from 1.0 to 0.2. The scheduler-4 decreases the step size from 1.0 to 0.0, achieving the best performance. The scheduler-3 also decrease the step size from 1.0 to 0.0, while not performing searching of distillation weights at the end of training. We finally adopt the scheduler-4 in our AUTO-PMD.
|
| 266 |
+
|
| 267 |
+
# 6 Related Work
|
| 268 |
+
|
| 269 |
+
For a long time, data imbalance has been a problem hindering multilingual models from performing evenly across different languages. Existing methods pursue balanced performance via designing heuristics (Arivazhagan et al., 2019) or automatic sampling strategies (Arivazhagan et al., 2019; Wang et al., 2020; Zhou et al., 2021; Wu et al.,
|
| 270 |
+
|
| 271 |
+
<table><tr><td>#</td><td>Scheduler</td><td>BLEU (τ = 1 / τ = 5)</td></tr><tr><td>1</td><td>μ = 1</td><td>20.71 / 21.80</td></tr><tr><td>2</td><td>μ = √Tmax-0.8×t/Tmax</td><td>21.90 / 22.21</td></tr><tr><td>3</td><td>μ = max(√Tmax-1.2×t/Tmax,0)</td><td>21.74 / 22.31</td></tr><tr><td>4</td><td>μ = √Tmax-t/Tmax</td><td>21.89 / 22.39</td></tr></table>
|
| 272 |
+
|
| 273 |
+
Table 3: Effect of step size scheduler $\mu$ in the many-to-one translation of WMT-6 dataset. We have tried for four implementations of the step size scheduler.
|
| 274 |
+
|
| 275 |
+
2021; Zhang et al., 2021). For example, Wang et al. (2020) design a Reinforce Learning based method to automatically adjust the sampling probability of each language pair towards an overall optimal solution. Zhou et al. (2021) vary the distribution via distributional robust optimization. However, their improvement is limited since increasing the training weights of some languages leads to relative decreases in the weights of other languages, resulting in a trade-off on the Pareto frontier. Different from their methods, we overcome this issue by training two models collaboratively.
|
| 276 |
+
|
| 277 |
+
Before our work, there were two approaches also based on knowledge distillation in MNMT. Tan et al. (2019) use pre-defined bilingual models to teach the multilingual model via knowledge distillation. Huang et al. (2022) propose language-specific self-distillation to remedy the convergence inconsistency problem in MNMT using self-distillation. Our Pareto-MD is an extension of mutual distillation on the Pareto optimization problems.
|
| 278 |
+
|
| 279 |
+
# 7 Conclusion
|
| 280 |
+
|
| 281 |
+
In this work, we propose a training framework Pareto-MD to reach a higher Pareto frontier for MNMT. The core of Pareto-MD is the synergy between diverse Pareto optimal solutions via mutual distillation. Besides, we design a novel strategy for deducing distillation weights automatically, achieving better performance and getting rid of hyperparameter searching. Experimental results on the WMT and TED datasets show the effectiveness of our method. Even though we experiment with training two models in this work, our method can naturally apply to train more models. In the fu
|
| 282 |
+
|
| 283 |
+
ture, we are interested in exploring how to apply our Pareto-MD to the training of large language models (Zhao et al., 2023).
|
| 284 |
+
|
| 285 |
+
# Limitations
|
| 286 |
+
|
| 287 |
+
Our Pareto-MD doubles computational cost due to training two models simultaneously, which can be a limitation of our approach. However, Pareto-MD obtains significant improvement that is hard to achieve for previous methods of training individual models, thus worthy. Besides, our approach would not necessarily result in double training time because these two models can be trained in parallel as implemented by Guo et al. (2020). Moreover, Pareto-MD does not affect inference efficiency.
|
| 288 |
+
|
| 289 |
+
# Acknowledgements
|
| 290 |
+
|
| 291 |
+
Xiaocheng Feng is the corresponding author of this work. We thank the anonymous reviewers for their insightful comments. This work was supported by the National Key R&D Program of China via grant 2020AAA0106502, National Natural Science Foundation of China (NSFC) via grant 62276078, the Key R&D Program of Heilongjiang via grant 2022ZX01A32 and the International Cooperation Project of PCL, PCL2022D01.
|
| 292 |
+
|
| 293 |
+
# References
|
| 294 |
+
|
| 295 |
+
Roee Aharoni, Melvin Johnson, and Orhan First. 2019. Massively multilingual neural machine translation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 3874-3884, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 296 |
+
Zeyuan Allen-Zhu and Yuanzhi Li. 2020. Towards understanding ensemble, knowledge distillation and self-distillation in deep learning.
|
| 297 |
+
Naveen Arivazhagan, Ankur Bapna, Orhan First, Dmitry Lepikhin, Melvin Johnson, Maxim Krikun, Mia Xu Chen, Yuan Cao, George F. Foster, Colin Cherry, Wolfgang Macherey, Zhifeng Chen, and Yonghui Wu. 2019. Massively multilingual neural machine translation in the wild: Findings and challenges. CoRR, abs/1907.05019.
|
| 298 |
+
Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2015. Neural machine translation by jointly learning to align and translate. In ICLR.
|
| 299 |
+
Raj Dabre, Chenhui Chu, and Anoop Kunchukuttan. 2020. A survey of multilingual neural machine translation. ACM Computing Surveys (CSUR), 53(5):1-38.
|
| 300 |
+
|
| 301 |
+
Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, et al. 2021. Beyond english-centric multilingual machine translation. J. Mach. Learn. Res., 22(107):1-48.
|
| 302 |
+
Orhan First, Kyunghyun Cho, and Yoshua Bengio. 2016. Multi-way, multilingual neural machine translation with a shared attention mechanism. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 866-875, San Diego, California. Association for Computational Linguistics.
|
| 303 |
+
Qiushan Guo, Xinjiang Wang, Yichao Wu, Zhipeng Yu, Ding Liang, Xiaolin Hu, and Ping Luo. 2020. Online knowledge distillation via collaborative learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11020-11029.
|
| 304 |
+
Thanh-Le Ha, Jan Niehues, and Alex Waibel. 2016. Toward multilingual neural machine translation with universal encoder and decoder. In Proceedings of the 13th International Conference on Spoken Language Translation, Seattle, Washington D.C. International Workshop on Spoken Language Translation.
|
| 305 |
+
Bobby He and Mete Ozay. 2021. Feature kernel distillation. In International Conference on Learning Representations.
|
| 306 |
+
Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. 2015. Distilling the knowledge in a neural network.
|
| 307 |
+
Yichong Huang, Xiaocheng Feng, Xinwei Geng, and Bing Qin. 2022. Unifying the convergences in multilingual neural machine translation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics.
|
| 308 |
+
Melvin Johnson, Mike Schuster, Quoc V. Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Viégas, Martin Wattenberg, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2017. Google's multilingual neural machine translation system: Enabling zero-shot translation. Transactions of the Association for Computational Linguistics, 5:339-351.
|
| 309 |
+
Nal Kalchbrenner and Phil Blunsom. 2013. Recurrent continuous translation models. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1700-1709, Seattle, Washington, USA. Association for Computational Linguistics.
|
| 310 |
+
Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In *ICLR*.
|
| 311 |
+
Taku Kudo and John Richardson. 2018. SentencePiece: A simple and language independent subword tokenizer and tokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical
|
| 312 |
+
|
| 313 |
+
Methods in Natural Language Processing: System Demonstrations, pages 66-71, Brussels, Belgium. Association for Computational Linguistics.
|
| 314 |
+
NLLB Team, Marta R. Costa-jussà, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. 2022. No language left behind: Scaling human-centered machine translation.
|
| 315 |
+
James R Norris and James Robert Norris. 1998. Markov chains. 2. Cambridge university press.
|
| 316 |
+
Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. *fairoseq: A fast, extensible toolkit for sequence modeling*. In *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics* (Demonstrations), pages 48–53, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 317 |
+
Myle Ott, Sergey Edunov, David Grangier, and Michael Auli. 2018. Scaling neural machine translation. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 1-9, Brussels, Belgium. Association for Computational Linguistics.
|
| 318 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.
|
| 319 |
+
Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186-191, Brussels, Belgium. Association for Computational Linguistics.
|
| 320 |
+
Aditya Siddhant, Ankur Bapna, Orhan First, Yuan Cao, Mia Xu Chen, Isaac Caswell, and Xavier Garcia. 2022. Towards the next 1000 languages in multilingual machine translation: Exploring the synergy between supervised and self-supervised learning. arXiv preprint arXiv:2201.03110.
|
| 321 |
+
Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: A simple way to prevent neural networks from overfitting. JMLR.
|
| 322 |
+
|
| 323 |
+
Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural networks. In NeurIPS.
|
| 324 |
+
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. 2016. Rethinking the inception architecture for computer vision. In CVPR.
|
| 325 |
+
Xu Tan, Yi Ren, Di He, Tao Qin, and Tie-Yan Liu. 2019. Multilingual neural machine translation with knowledge distillation. In International Conference on Learning Representations.
|
| 326 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In NeurIPS.
|
| 327 |
+
Xinyi Wang, Yulia Tsvetkov, and Graham Neubig. 2020. Balancing training for multilingual neural machine translation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8526-8537, Online. Association for Computational Linguistics.
|
| 328 |
+
Minghao Wu, Yitong Li, Meng Zhang, Liangyou Li, Gholamreza Haffari, and Qun Liu. 2021. Uncertainty-aware balancing for multilingual and multi-domain neural machine translation training. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7291-7305, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 329 |
+
Jian Yang, Shuming Ma, Haoyang Huang, Dongdong Zhang, Li Dong, Shaohan Huang, Alexandre Muzio, Saksham Singhal, Hany Hassan, Xia Song, and Furu Wei. 2021. Multilingual machine translation systems from Microsoft for WMT21 shared task. In Proceedings of the Sixth Conference on Machine Translation, pages 446-455, Online. Association for Computational Linguistics.
|
| 330 |
+
Mingliang Zhang, Fandong Meng, Yunhai Tong, and Jie Zhou. 2021. Competence-based curriculum learning for multilingual machine translation. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, pages 2481-2493, Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 331 |
+
Ying Zhang, Tao Xiang, Timothy M Hospedales, and Huchuan Lu. 2018. Deep mutual learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4320-4328.
|
| 332 |
+
Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, Yifan Du, Chen Yang, Yushuo Chen, Zhipeng Chen, Jinhao Jiang, Ruiyang Ren, Yifan Li, Xinyu Tang, Zikang Liu, Peiyu Liu, Jian-Yun Nie, and Ji-Rong Wen. 2023. A survey of large language models.
|
| 333 |
+
|
| 334 |
+
Chunting Zhou, Daniel Levy, Xian Li, Marjan Ghazvininejad, and Graham Neubig. 2021. Distributionally robust multilingual machine translation. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 5664-5674, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 335 |
+
|
| 336 |
+
# A Equivalence Between Searching in $O^k$ and $\widetilde{O}^k$
|
| 337 |
+
|
| 338 |
+
As illustrated in §3.3, our strategy AUTO-PMD first searches the language-specific optimal multilingual distillation weight vector $\hat{\alpha}^{\ell}$ for each translation direction $\ell$ from a search space and then take the $\hat{\alpha}^{\ell}[\ell]$ as the searching result of $\alpha^{k}[\ell]$ . To search the optimal solution, the search space should be the entire space $O^{k}$ , which is formalized as:
|
| 339 |
+
|
| 340 |
+
$$
|
| 341 |
+
O ^ {k} = \underset {\ell \in L} {\times} \left\{f \left(\boldsymbol {\alpha} ^ {k - 1} [ \ell ]\right) \mid f \in \mathcal {F} \right\},
|
| 342 |
+
$$
|
| 343 |
+
|
| 344 |
+
However, the size of $O^k$ grows exponentially as the number of languages increases. Therefore, we instead search in $\widetilde{O}^k$ , a subset of $O^k$ , which is formalized as:
|
| 345 |
+
|
| 346 |
+
$$
|
| 347 |
+
\begin{array}{l} \tilde {O} ^ {k} = \{\{f _ {\uparrow} (\boldsymbol {\alpha} ^ {k - 1} [ \ell ]) \} _ {\ell \in L}, \\ \left\{f _ {\downarrow} \left(\boldsymbol {\alpha} ^ {k - 1} [ \ell ]\right) \right\} _ {\ell \in L}, \\ \left\{f _ {=} \left(\boldsymbol {\alpha} ^ {k - 1} [ \ell ]\right) \right\} _ {\ell \in L} \}. \\ \end{array}
|
| 348 |
+
$$
|
| 349 |
+
|
| 350 |
+
In this section, we initially give a formal definition of the searching process. Subsequently, the Distillation Weights Independence (DWI) assumption is introduced. Ultimately, we prove the equivalence between searching in $O^k$ and $\widetilde{O}^k$ based on the DWI assumption.
|
| 351 |
+
|
| 352 |
+
Definition A.1 (Searching Process). Given the multilingual trial set $D^{trial} = \{D_{\ell}^{trial}\}_{\ell=1}^{|L|}$ , validation set $D^{valid} = \{D_{\ell}^{valid}\}_{\ell=1}^{|L|}$ , student mode $\theta^S$ , teacher model $\theta^T$ , and the search space $O$ , for each translation direction $\ell$ , the searching process of $\alpha^k[\ell]$ is:
|
| 353 |
+
|
| 354 |
+
$$
|
| 355 |
+
\begin{array}{l} \boldsymbol {\alpha} ^ {k} [ \ell ] = \hat {\boldsymbol {\alpha}} ^ {\ell} [ \ell ] \\ \hat {\boldsymbol {\alpha}} ^ {\ell} = \underset {\boldsymbol {\alpha} \in O} {\arg \min} \mathcal {L} _ {c e} (D _ {\ell} ^ {v a l i d}; \hat {\boldsymbol {\theta}} (\boldsymbol {\alpha})) \\ \hat {\theta} (\pmb {\alpha}) = \underset {\theta} {\arg \min} \mathcal {L} _ {P M D} (D ^ {t r i a l}; \theta^ {S}, \theta^ {T}, \pmb {\alpha}). \\ \end{array}
|
| 356 |
+
$$
|
| 357 |
+
|
| 358 |
+
Hypothesis A.1 (Distillation Weights Independence). Given two multilingual distillation weight vectors $\alpha_{1}$ and $\alpha_{2}$ :
|
| 359 |
+
|
| 360 |
+
$$
|
| 361 |
+
\begin{array}{l} \exists \ell \in L, \alpha_ {1} [ \ell ] = \alpha_ {2} [ \ell ] \\ \Rightarrow \mathcal {L} _ {c e} (D _ {\ell} ^ {v a l i d}; \hat {\theta} (\boldsymbol {\alpha} _ {1})) = \mathcal {L} _ {c e} (D _ {\ell} ^ {v a l i d}; \hat {\theta} (\boldsymbol {\alpha} _ {2})) \\ \end{array}
|
| 362 |
+
$$
|
| 363 |
+
|
| 364 |
+
Theorem A.1. Let $\hat{\alpha}^{\ell}[\ell]$ denote the searching result in the search space $O^{k}$ for direction $\ell$ , $\widetilde{\alpha}^{\ell}[\ell]$ denotes the searching result in the search space $\widetilde{O}^{k}$ for direction $\ell$ , based on the Distillation Weights Independence assumption, it is satisfied that:
|
| 365 |
+
|
| 366 |
+
$$
|
| 367 |
+
\hat {\boldsymbol {\alpha}} ^ {\ell} [ \ell ] = \widetilde {\boldsymbol {\alpha}} ^ {\ell} [ \ell ].
|
| 368 |
+
$$
|
| 369 |
+
|
| 370 |
+
Proof. Let $\hat{\alpha}^{\ell}[\ell] = \hat{f}^{l}(\alpha^{k-1}[\ell])$ , where $\hat{f}^{l} \in \mathcal{F}$ is the language-specific action, the following equation holds:
|
| 371 |
+
|
| 372 |
+
$$
|
| 373 |
+
\mathcal {L} _ {c e} (D _ {\ell} ^ {v a l i d}; \theta (\hat {\boldsymbol {\alpha}} ^ {\ell})) = \mathcal {L} _ {c e} (D _ {\ell} ^ {v a l i d}; \theta (\{\hat {f} ^ {l} (\boldsymbol {\alpha} ^ {k - 1} [ \ell^ {\prime} ]) \} _ {\ell^ {\prime} \in L})),
|
| 374 |
+
$$
|
| 375 |
+
|
| 376 |
+
based on hypothesis A.1. Because $\{\hat{f}^l (\pmb {\alpha}^{k - 1}[\ell '])\}_{\ell '\in L}\in \widetilde{O}^k$ , and $\widetilde{O}^k\subseteq O^k$ , then we can infer that:
|
| 377 |
+
|
| 378 |
+
$$
|
| 379 |
+
\begin{array}{l} \Rightarrow \mathcal {L} _ {c e} \left(D _ {\ell} ^ {v a l i d}; \left\{\hat {f} ^ {l} \left(\boldsymbol {\alpha} ^ {k - 1} \left[ \ell^ {\prime} \right]\right) \right\} _ {\ell^ {\prime} \in L}\right) = \min _ {\boldsymbol {\alpha} \in \tilde {O} ^ {k}} \mathcal {L} _ {c e} \left(D _ {\ell} ^ {v a l i d}; \hat {\theta} (\boldsymbol {\alpha})\right) \\ \Rightarrow \qquad \qquad \{\hat {f} ^ {l} (\boldsymbol {\alpha} ^ {k - 1} [ \ell^ {\prime} ]) \} _ {\ell^ {\prime} \in L} = \underset {\boldsymbol {\alpha} \in \tilde {O} ^ {k}} {\arg \min } \mathcal {L} _ {c e} (D _ {\ell} ^ {v a l i d}; \hat {\theta} (\boldsymbol {\alpha})) \\ \Rightarrow \quad \hat {f} ^ {l} \left(\boldsymbol {\alpha} ^ {k - 1} [ \ell ]\right) = \tilde {\boldsymbol {\alpha}} ^ {\ell} [ \ell ] \\ \Rightarrow \quad \hat {\boldsymbol {\alpha}} ^ {\ell} [ \ell ] = \tilde {\boldsymbol {\alpha}} ^ {\ell} [ \ell ] \\ \end{array}
|
| 380 |
+
$$
|
| 381 |
+
|
| 382 |
+

|
| 383 |
+
|
| 384 |
+
# B Data Statistics
|
| 385 |
+
|
| 386 |
+
We list data statistic of TED-8-Diverse dataset in Table 4. Data statistics of WMT-6 dataset is listed in Table 5.
|
| 387 |
+
|
| 388 |
+
<table><tr><td>Language</td><td>Num</td></tr><tr><td>bos (Bosnian)</td><td>5,664</td></tr><tr><td>mar (Marathi)</td><td>9,840</td></tr><tr><td>hin (Hindi)</td><td>18,798</td></tr><tr><td>mkd (Macedonian)</td><td>25,335</td></tr><tr><td>ell (Greek)</td><td>134,327</td></tr><tr><td>bul (Bulgarian)</td><td>174,444</td></tr><tr><td>fra (French)</td><td>192,304</td></tr><tr><td>kor (Korean)</td><td>205,640</td></tr></table>
|
| 389 |
+
|
| 390 |
+
Table 4: Data statistics for the TED-8-Diverse dataset. 'num' refers to the number of sentence pairs in the training set.
|
| 391 |
+
|
| 392 |
+
<table><tr><td>Language</td><td>Data Source</td><td>Num</td></tr><tr><td>tr (Turkish)</td><td>WMT17</td><td>5,000</td></tr><tr><td>ro (Romanian)</td><td>WMT16</td><td>10,000</td></tr><tr><td>et (Estonian)</td><td>WMT18</td><td>80,000</td></tr><tr><td>zh (Chinese)</td><td>WMT17</td><td>400,000</td></tr><tr><td>de (German)</td><td>WMT14</td><td>1,500,000</td></tr><tr><td>fr (French)</td><td>WMT14</td><td>3,000,000</td></tr></table>
|
| 393 |
+
|
| 394 |
+
Table 5: Data statistics for the WMT dataset. 'num' refers to the number of sentence pairs in the training set.
|
| 395 |
+
|
| 396 |
+
# C Hyper-parameters
|
| 397 |
+
|
| 398 |
+
In this section, we report the hyper-parameters used in our experiments.
|
| 399 |
+
|
| 400 |
+
- We adopt the base-version of Transformer architecture with 6 layers encoders/decoders and 8 attention heads.
|
| 401 |
+
- The embedding dimension is 512 and the Feed-Forward Network has a dimension of 2048.
|
| 402 |
+
- We train models with learning rate $\eta = 0.0015$ and use Adam optimizer (Kingma and Ba, 2015) with $\beta_{1} = 0.9$ , $\beta_{2} = 0.98$ , and the same learning rate schedule as Vaswani et al. (2017).
|
| 403 |
+
- Batch size is set to 64K and half-precision training is adopted (Ott et al., 2018).
|
| 404 |
+
|
| 405 |
+

|
| 406 |
+
(a) Many-to-One
|
| 407 |
+
|
| 408 |
+

|
| 409 |
+
(b) One-to-Many
|
| 410 |
+
Figure 6: Effect of different values of $\alpha$ on WMT-6 dataset. For clarity, we only depict the results of model2 trained with $\tau = 5$ .
|
| 411 |
+
|
| 412 |
+
- For regularization, we use the label smoothing as 0.1 (Szegedy et al., 2016). We set the dropout as 0.3 (Srivastava et al., 2014) on the TED-8-Diverse dataset and as 0.2 on the WMT-6 dataset.
|
| 413 |
+
- Models are trained for 70 epochs on WMT-6 and 300 epochs on TED-8-Diverse according to the convergence.
|
| 414 |
+
- For TED-8-Diverse, we preprocess sentececes using sentencepiece (Kudo and Richardson, 2018) with a vocabulary size of $8K$ for each language. For WMT-6, the vocabulary size is $64K$ for all languages.
|
| 415 |
+
- For inference, we use beam search with beam size 5.
|
| 416 |
+
|
| 417 |
+
All models are trained on Tesla V100 GPUs.
|
| 418 |
+
|
| 419 |
+
# D Details about Baselines
|
| 420 |
+
|
| 421 |
+
For temperature-based sampling (Arivazhagan et al., 2019), we adopt the official implementation in fairseq. LSSD is re-implemented successfully with the code released by Huang et al. (2022).
|
| 422 |
+
|
| 423 |
+
<table><tr><td>Setting</td><td>Method</td><td>Sampling</td><td>fr</td><td>de</td><td>zh</td><td>et</td><td>ro</td><td>tr</td><td>Avg.</td></tr><tr><td rowspan="4">M2O</td><td rowspan="2">Temperature Sampling</td><td>τ = 1</td><td>34.40</td><td>28.70</td><td>13.27</td><td>16.41</td><td>22.65</td><td>7.99</td><td>20.57</td></tr><tr><td>τ > 1</td><td>31.59</td><td>26.61</td><td>12.56</td><td>16.48</td><td>23.06</td><td>9.29</td><td>19.93</td></tr><tr><td rowspan="2">AUTO-PMD</td><td>τ = 1</td><td>34.96</td><td>28.79</td><td>13.81</td><td>17.9</td><td>25.22</td><td>10.65</td><td>21.89</td></tr><tr><td>τ > 1</td><td>34.09</td><td>28.77</td><td>14.05</td><td>19.22</td><td>26.62</td><td>11.60</td><td>22.39</td></tr><tr><td rowspan="4">O2M</td><td rowspan="2">Temperature Sampling</td><td>τ = 1</td><td>36.16</td><td>23.89</td><td>21.49</td><td>11.53</td><td>14.85</td><td>5.58</td><td>18.92</td></tr><tr><td>τ > 1</td><td>31.21</td><td>20.76</td><td>20.76</td><td>13.28</td><td>17.54</td><td>8.20</td><td>18.63</td></tr><tr><td rowspan="2">AUTO-PMD</td><td>τ = 1</td><td>35.38</td><td>23.12</td><td>20.84</td><td>13.2</td><td>18.79</td><td>9.65</td><td>20.16</td></tr><tr><td>τ > 1</td><td>34.47</td><td>23.00</td><td>21.51</td><td>14.15</td><td>19.54</td><td>10.23</td><td>20.48</td></tr></table>
|
| 424 |
+
|
| 425 |
+
Table 6: BLEU score per language pair on the WMT-6 dataset. 'Avg.' is the abbreviation of "average values". Bold indicates the best performance of each language pair. Languages are sorted in decreasing order from left to right according to data size.
|
| 426 |
+
|
| 427 |
+
<table><tr><td>Setting</td><td>Method</td><td>Sampling</td><td>kor</td><td>fra</td><td>bul</td><td>ell</td><td>mkd</td><td>hin</td><td>mar</td><td>bos</td><td>Avg.</td></tr><tr><td rowspan="4">M2O</td><td rowspan="2">Temperature Sampling</td><td>τ = 1</td><td>19.73</td><td>40.73</td><td>39.74</td><td>38.71</td><td>34.34</td><td>23.38</td><td>11.13</td><td>24.88</td><td>29.08</td></tr><tr><td>τ > 1</td><td>18.79</td><td>40.1</td><td>39.00</td><td>38.11</td><td>32.89</td><td>22.55</td><td>10.36</td><td>24.98</td><td>28.35</td></tr><tr><td rowspan="2">AUTO-PMD</td><td>τ = 1</td><td>21.14</td><td>42.41</td><td>41.52</td><td>40.67</td><td>36.49</td><td>25.9</td><td>12.32</td><td>27.94</td><td>31.05</td></tr><tr><td>τ > 1</td><td>20.51</td><td>42.03</td><td>40.93</td><td>40.00</td><td>36.04</td><td>25.71</td><td>12.44</td><td>28.02</td><td>30.71</td></tr><tr><td rowspan="4">O2M</td><td rowspan="2">Temperature Sampling</td><td>τ = 1</td><td>9.06</td><td>40.26</td><td>36.10</td><td>33.63</td><td>25.67</td><td>15.56</td><td>4.90</td><td>16.82</td><td>22.75</td></tr><tr><td>τ > 1</td><td>8.87</td><td>39.96</td><td>35.91</td><td>33.31</td><td>24.35</td><td>14.81</td><td>4.75</td><td>15.87</td><td>22.23</td></tr><tr><td rowspan="2">AUTO-PMD</td><td>τ = 1</td><td>9.13</td><td>40.94</td><td>36.56</td><td>34.03</td><td>27.15</td><td>15.89</td><td>5.13</td><td>17.64</td><td>23.31</td></tr><tr><td>τ > 1</td><td>8.90</td><td>40.65</td><td>36.55</td><td>33.64</td><td>27.44</td><td>16.29</td><td>4.90</td><td>17.89</td><td>23.28</td></tr></table>
|
| 428 |
+
|
| 429 |
+
Table 7: BLEU score per language pair on the TED-8-DIVERSE dataset. 'Avg.' is the abbreviation of "average values". Bold indicates the best performance of each language pair. Languages are sorted in decreasing order from left to right according to data size.
|
| 430 |
+
|
| 431 |
+
We have tried to set Dropout rate to $\{0.2, 0.3\}$ for LSSD, and report the best results in terms of BLEU for fair comparison. The code of $\chi$ -IBR (Zhou et al., 2021) is also released. However, the result of $\chi$ -IBR evaluated in our experiments is lower than the original paper. Therefore, we report the results in the original paper.
|
| 432 |
+
|
| 433 |
+
# E BLEU scores on Individual Languages
|
| 434 |
+
|
| 435 |
+
In this section, we report the BLEU scores of individual language pairs. For clarity, we only show the results of the temperature-based sampling and our AUTO-PMD. As illustrated in Table. 6 and Table. 7, our method achieves consistent improvements in 3 out of 4 settings.
|
| 436 |
+
|
| 437 |
+
In the one-to-many setting of WMT-6 dataset, the performance of HRLs (i.e., $fr$ and $de$ ) drops about 0.7 BLEU. This may be due to the parameter interference from the significantly improved LRLs.
|
| 438 |
+
|
| 439 |
+
# F Effect of $\alpha$ for UNI-PMD and BI-PMD
|
| 440 |
+
|
| 441 |
+
In this section, we show the experimental results of UNI-PMD and BI-PMD with different values of $\alpha$ in Fig. 6. As demonstrated, the value of $\alpha$ is crucial for the performance. The optimal value of $\alpha$ varies across different settings. This conclusion is consistent with former work related to knowledge distillation (Huang et al., 2022), which highlights the importance of deducing distillation weights automatically.
|
| 442 |
+
|
| 443 |
+
# G Other Variants of Mutual Distillation
|
| 444 |
+
|
| 445 |
+
In this work, we design another two mutual distillation-based strategies beyond AUTO-PMD: Dynamic Mutual Distillation (DYNAMIC-MD) and Language-Specific Mutual Distillation (LSMD). DYNAMIC-MD adopts the same update process of distillation weights as AUTO-PMD. That is, DYNAMIC-MD also makes three trials and uses the
|
| 446 |
+
|
| 447 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Sampling</td><td colspan="2">BLEU</td></tr><tr><td>M2O</td><td>O2M</td></tr><tr><td rowspan="2">AUTO-PMD</td><td>τ = 1</td><td>21.89</td><td>20.16</td></tr><tr><td>τ = 5</td><td>22.39</td><td>20.48</td></tr><tr><td rowspan="2">DYNAMIC-MD</td><td>τ = 1</td><td>22.06</td><td>20.33</td></tr><tr><td>τ = 5</td><td>22.11</td><td>20.24</td></tr><tr><td rowspan="2">LSMD</td><td>τ = 1</td><td>21.47</td><td>18.94</td></tr><tr><td>τ = 5</td><td>21.03</td><td>19.46</td></tr></table>
|
| 448 |
+
|
| 449 |
+
Table 8: Other variants of mutual distillation designed by us. DYNAMIC-MD is the abbreviation of Dynamic Mutual Distillation. LSMD is the abbreviation of Language-Specific Mutual Distillation.
|
| 450 |
+
|
| 451 |
+
optimal action to uptake the distillation weight. Differently, DYNAMIC-MD selects a uniform optimal action instead of language-specific optimal actions. LSMD sets fixed and language-specific distillation weights for each language pair. To obtain suitable language-specific distillation weights, we use the distillation weights searched by AUTO-PMD at the last update. The results of these two strategies are listed in Table 8. As the results show, AUTO-PMD achieves higher performance upper-bound than these two strategies.
|
| 452 |
+
|
| 453 |
+
A For every submission:
|
| 454 |
+
|
| 455 |
+
A1. Did you describe the limitations of your work? 8
|
| 456 |
+
A2. Did you discuss any potential risks of your work? We haven't find any risk of our work.
|
| 457 |
+
A3. Do the abstract and introduction summarize the paper's main claims?
|
| 458 |
+
A4. Have you used AI writing assistants when working on this paper? Left blank.
|
| 459 |
+
|
| 460 |
+
B Did you use or create scientific artifacts?
|
| 461 |
+
|
| 462 |
+
Left blank.
|
| 463 |
+
|
| 464 |
+
B1. Did you cite the creators of artifacts you used? Left blank.
|
| 465 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts? No response.
|
| 466 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)? 4
|
| 467 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it? We use widely-used and open datasets.
|
| 468 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.? 4
|
| 469 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be.
|
| 470 |
+
|
| 471 |
+
C Did you run computational experiments?
|
| 472 |
+
|
| 473 |
+
4
|
| 474 |
+
|
| 475 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used? 8
|
| 476 |
+
|
| 477 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values? 4
|
| 478 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run?
|
| 479 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)?
|
| 480 |
+
|
| 481 |
+
# D Did you use human annotators (e.g., crowdworkers) or research with human participants?
|
| 482 |
+
|
| 483 |
+
Left blank.
|
| 484 |
+
|
| 485 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.? No response.
|
| 486 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)? No response.
|
| 487 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used? No response.
|
| 488 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board? No response.
|
| 489 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data? No response.
|
2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:de28448cc42c77652eb878a4c0e3808b88b6c363502f5e05ede1cbb8266166b1
|
| 3 |
+
size 721305
|
2023/Towards Higher Pareto Frontier in Multilingual Machine Translation/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/c7e93c62-eea3-4f99-ab1c-c34da618031b_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/c7e93c62-eea3-4f99-ab1c-c34da618031b_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/c7e93c62-eea3-4f99-ab1c-c34da618031b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e3e3c1af1be999441cc0a6015f1bef230c0df6dba7e7715c9069681545ad16bc
|
| 3 |
+
size 7675123
|
2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/full.md
ADDED
|
@@ -0,0 +1,389 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Towards Identifying Fine-Grained Depression Symptoms from Memes
|
| 2 |
+
|
| 3 |
+
Shweta Yadav*, Cornelia Caragea*, Chenye Zhao*, Naincy Kumari†, Marvin Solberg*, and Tanmay Sharma‡
|
| 4 |
+
|
| 5 |
+
*Department of Computer Science, University of Illinois Chicago, USA
|
| 6 |
+
|
| 7 |
+
†Central University of Rajasthan, India
|
| 8 |
+
|
| 9 |
+
\*Wayne State University, USA
|
| 10 |
+
|
| 11 |
+
$^{\ddagger}$ Indian Institute of Technology Gandhinagar, India
|
| 12 |
+
|
| 13 |
+
*{shwetay, cornelia, czhao43}@uic.edu, †knaincy818@gmail.com,
|
| 14 |
+
|
| 15 |
+
*marvin.solberg@wayne.edu, ‡tanmay.sharma@iitgn.ac.in
|
| 16 |
+
|
| 17 |
+
# Abstract
|
| 18 |
+
|
| 19 |
+
The past decade has observed significant attention toward developing computational methods for classifying social media data based on the presence or absence of mental health conditions. In the context of mental health, for clinicians to make an accurate diagnosis or provide personalized intervention, it is crucial to identify fine-grained mental health symptoms. To this end, we conduct a focused study on depression disorder and introduce a new task of identifying fine-grained depressive symptoms from memes. Toward this, we create a high-quality dataset (RESTORE) annotated with 8 fine-grained depression symptoms based on the clinically adopted PHQ-9 questionnaire. We benchmark RESTORE on 20 strong monomodal and multimodal methods. Additionally, we show how imposing orthogonal constraints on textual and visual feature representations in a multimodal setting can enforce the model to learn non-redundant and de-correlated features leading to a better prediction of fine-grained depression symptoms. Further, we conduct an extensive human analysis and elaborate on the limitations of existing multimodal models that often overlook the implicit connection between visual and textual elements of a meme.
|
| 20 |
+
|
| 21 |
+
# 1 Introduction
|
| 22 |
+
|
| 23 |
+
Mental health disorders have a profound impact on society. Almost 1 billion people worldwide suffer from mental health disorders, predominantly depression, anxiety, mood, and substance use disorders (WHO, 2022). Two of the most common mental health disorder, depression and anxiety account for the US $1 trillion in economic losses worldwide annually (Health, 2020). This cost is projected to rise to a staggering US $6 trillion by 2030 (Bloom et al., 2012). Apart from the economic burden, the social burden of mental health disorders is huge. Suicide is now the fourth leading cause of death among those aged 15 to 29 years old (Fleischmann
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 1: Example of a depressive meme. If we merely evaluate the textual content ("These would give me a peaceful scene in a land of trouble"), it is difficult to establish the author's true feelings. However, the meme-image can provide complementary information to help recognize the depression symptom (self-harm) correctly.
|
| 27 |
+
|
| 28 |
+
et al., 2021). However, considering its preponderance and global burden, depression continues to be significantly undertreated in all practice settings, where fewer than one-third of adults with depression receive effective treatment. Denial of illness and stigma are the two most common obstacles to appropriate diagnosis and treatment of depression (Sirey et al., 2001).
|
| 29 |
+
|
| 30 |
+
Recently, social media data has emerged as a powerful "lens" for tracking and detecting depression (De Choudhury et al., 2013; Yates et al., 2017). The vast majority of the existing works on depression have utilized the textual or multi-modal information available in the social media data to primarily classify the posts based on the perceived depressive behavior (depressive or non-depressive) (De Choudhury et al., 2014; Coppersmith et al., 2015; Gui et al., 2019). However, for healthcare professionals to intervene and provide effective treatment, it is crucial for them to understand the leading symptoms of that depressive behavior.
|
| 31 |
+
|
| 32 |
+
Motivated by this, we aim to develop a practical decision support system that swift through social media posts and can provide healthcare professionals deeper insights into one's depressive behaviors by capturing the fine-grained depressive symptoms. In the past, there have been few attempts (Yadav et al., 2020; Yazdavar et al., 2017) to capture the depression symptoms; however, they are confined to only textual information. Recently, a new form of communication has emerged in social media: 'meme'. A meme usually consists of an expressive image embedded with a short block of text. It is designed to convey a complex idea or emotional state of mind, which is far easier to understand than a textual description of thoughts. They acknowledge a shared experience between the creator and the viewer and therefore have become a fast way of communication on social media. Outside the mental health domain, there are numerous studies on meme processing and understanding for emotion detection (Sharma et al., 2020; Pramanick et al., 2021a), cyberbullying (Maity et al., 2022), and hateful meme detection (Zhou et al., 2021; Pramanick et al., 2021b).
|
| 33 |
+
|
| 34 |
+
However, to our knowledge, none of the existing studies have yet leveraged the visual information available in memes specifically to capture the fine-grained depression symptoms. There are two main reasons to consider the visual information: (i) according to a recent survey<sup>1</sup>, images appear in more than $42\%$ of tweet; and (ii) textual information alone cannot capture the overall semantic meaning. For example, in Figure 1, considering only the text, "These would give me a peaceful scene in a land of trouble", would not be sufficient to identify the depressive symptoms or even to distinguish whether the meme is depressive or not. However, it is evident from the image that the meme expresses suicidal thoughts/intent. Therefore, in order to obtain a holistic view of a post and accurately determine the depression symptoms, it is necessary to take into account the visual information available in social media posts.
|
| 35 |
+
|
| 36 |
+
To this end, we propose a new task - Fine-Grained Depression Symptom Identification from Memes and hypothesize that leveraging the multi-modal information available in the memes can more effectively help identify depression symptoms from social media posts. Towards this, we utilize clinically established 9-scale Pa
|
| 37 |
+
|
| 38 |
+
tient Health Questionnaire (PHQ-9) (Kroenke and Spitzer, 2002) depression symptoms categories to classify the depressive memes that we collected from two popular social media forums – Reddit and Twitter. In particular, we make the following contributions:
|
| 39 |
+
|
| 40 |
+
(1) We create a high-quality dataset (RESTORE) consisting of 9,837 depression memes, annotated with 8 fine-grained PHQ-9 symptom categories: Feeling Down, Lack of Interest, Eating Disorder, Sleeping Disorder, Concentration Problem, Lack of Energy, Low Self Esteem, and Self Harm.
|
| 41 |
+
(2) We perform extensive experiments with 20 state-of-the-art monomodal and multimodal approaches to benchmark our dataset and introduce orthogonality constraints in a multimodal setting to incorporate multiple perspectives present in the meme.
|
| 42 |
+
(3) We conduct a thorough human analysis and highlight the major findings and limitations of the monomodal and multimodal models. Our best-performing model obtains an F1-Score of only 65.01, demonstrating the challenge involved with meme processing for depression symptom identification task, and we believe that our dataset will promote further research in this direction.
|
| 43 |
+
|
| 44 |
+
# 2 Related Works
|
| 45 |
+
|
| 46 |
+
Based on the data modalities, we categorize the existing works on depression detection as follows:
|
| 47 |
+
|
| 48 |
+
Language As highlighted in Fine (2006) study, people's thoughts are frequently reflected in their language, and the linguistic cues, such as informal language usage, first-person referencing, and greater usage of negative emotion words, generally typify psychiatric disorders (Ramirez-Esparza et al., 2008; Resnik et al., 2015). Numerous research in computational linguistics has modeled the language usage in mental health-related discourse to predict mental health states (Tsugawa et al., 2015; Harman and Dredze, 2014) and infer risk to various mental disorders using social media data (Benton et al., 2017b; Coppersmith et al., 2016; Huang et al., 2015; Yadav et al., 2018, 2021). Most of the earlier works utilized a feature-driven approach (Resnik et al., 2015; Karmen et al., 2015) to detect depression. Recently with the availability of multiple benchmark datasets (Yates et al., 2017; Coppersmith et al., 2015), existing methods are designed using neural models (Orabi et al., 2018; Zhang et al., 2020). While most of these existing work studies depression at a coarser level, there
|
| 49 |
+
|
| 50 |
+
have been only a few efforts towards inferring depressive symptoms by analyzing the textual information in social media posts (Yadav et al., 2020).
|
| 51 |
+
|
| 52 |
+
Vision The visual information available in shared images offers valuable psychological cues for understanding a user's depression status. Previous studies (Girard et al., 2014; Scherer et al., 2013; Zhu et al., 2017) conducted in a clinical setting have established that certain non-verbal behaviors such as downward gaze angle, dull smiles, and shorter average lengths of a smile characterize depressive behaviors. Recently, with the popularity of photo and video-sharing social networking services such as Instagram have piqued the interest of researchers in investigating people's depressive behavior from their visual narratives. Reece and Danforth (2017); Manikonda and De Choudhury (2017) investigated the role of public Instagram profiles in identifying a depressive user.
|
| 53 |
+
|
| 54 |
+
Multimodal (Language+Vision+Speech) In recent years, there has been growing attention towards exploiting multimodal information such as speech, vision and text for depression detection (Valstar et al., 2013, 2014; Ringeval et al., 2018). Existing studies have devised several neural approaches to effectively combine the information from various modalities. For instance, Yin et al. (2019) utilized the hierarchical bidirectional LSTM network to extract and fuse the local video and audio features to predict the degree of depression. Gui et al. (2019) proposed a multi-agent reinforcement learning method for identifying depressive users. An et al. (2020) developed the topic-enriched multi-task learning framework that achieved state-of-the-art performance on multimodal depression detection tasks. In contrast to the above approaches, our study aims to find the fine-grained depression symptom from memes that have not yet been explored before.
|
| 55 |
+
|
| 56 |
+
# 3 Dataset Creation
|
| 57 |
+
|
| 58 |
+
In this section, we present a new benchmark dataset: RESTORE² for identifying fine-grained depReissivE SympTOms fRom mEmes, that was created following a clinically-guided approach and includes contributions from medical informatics experts and psychologist at each phase.
|
| 59 |
+
|
| 60 |
+
# 3.1 Task Structure
|
| 61 |
+
|
| 62 |
+
Dataset Selection and Curation. We collect posts from two popular social media platforms: Twitter and Reddit. We chose these platforms as a data source because of their rising popularity among depressive users to publicly express their thoughts, moods, emotions, and feelings. This, coupled with the greater degree of anonymity, facilitates self-disclosure and allows users to be more truthful and open in sharing sensitive issues and personal life without fear of being embarrassed or judged. Thus these user-generated self-narratives provide low-cost, large-scale, non-intrusive data to understand depressive behavior patterns and outcomes outside the controlled clinical environment, both in real-time and longitudinally.
|
| 63 |
+
|
| 64 |
+
To ensure that we capture a broader spectrum of depressive behaviors, we use a domain-specific depression lexicon (Yazdavar et al., 2017). The lexicon contains depression-related terms from 8 symptom categories following the PHQ-9 $^3$ questionnaire. We use the depression lexicon to collect tweets from Twitter public profiles that mention at least one of the words from the lexicon in their profile description. In a similar manner, we collect Reddit posts; however, we restrict ourselves to the following subreddits: "Mental Health", "depression", "suicide watch", "depression memes", "eating disorder", and "sleeping disorder".
|
| 65 |
+
|
| 66 |
+
Objective. Given a meme (containing image and an embedded text) and an 8 fine-grained PHQ-9 depression symptom categories, the goal is to identify all depression symptoms that are expressed in the meme.
|
| 67 |
+
|
| 68 |
+
# 3.2 Task Construction
|
| 69 |
+
|
| 70 |
+
Filtering Strategy. Since the focus of this study is to map the content in memes to the corresponding PHQ-9 symptoms categories, we filtered out the posts that do not have a meme. Further, we applied a series of filtering steps to remove any irrelevant memes: (i) the meme should contain both image and embedded text (refers to the text which is embedded in the meme); (ii) the meme text must be written in English; (iii) the embedded text in
|
| 71 |
+
|
| 72 |
+

|
| 73 |
+
(a) LOI
|
| 74 |
+
|
| 75 |
+

|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
|
| 79 |
+

|
| 80 |
+
|
| 81 |
+

|
| 82 |
+
(e) LOE
|
| 83 |
+
|
| 84 |
+

|
| 85 |
+
(b) FD
|
| 86 |
+
(f) LSE
|
| 87 |
+
|
| 88 |
+

|
| 89 |
+
(c) ED and FD
|
| 90 |
+
(g) CP
|
| 91 |
+
Figure 2: Sample memes with associated PHQ-9 symptoms (LOI: Lack of Interest, FD: Feeling Down, ED: Eating Disorder, SD: Sleeping Disorder, LOE: Lack of Energy, LSE: Low Self Esteem, CP: Concentration Problem, SH: Self Harm)
|
| 92 |
+
|
| 93 |
+

|
| 94 |
+
(d) SD
|
| 95 |
+
(h) SH and ED
|
| 96 |
+
|
| 97 |
+
the meme should be readable; (iv) the meme image should not be blurry and have a high resolution. Further, we filtered out those memes for which the $\mathrm{OCR}^4$ could not obtain the text. Following these filtering criteria, we obtain 11,000 posts.
|
| 98 |
+
|
| 99 |
+
Expert Annotation. We devised an annotation guideline based on the clinically adopted PHQ-9 depression questionnaire, which is a tool to assess the severity of depression. A team of 4 annotators (experts in psychology and medical informatics) independently annotated the collected memes. Each annotator was provided annotation guidelines and an interface to map the content in memes to the closest PHQ-9 symptom. Specifically, for a given meme, the annotators were instructed to label the depression symptom categories: Lack of Interest, Feeling Down, Sleeping Disorder, Lack of Energy, Eating Disorder, Low Self-Esteem, Concentration Problem, and Self Harm, that are the closest match to the meme based on the textual or visual information available in the meme. Note that symptoms can be one or multiple per meme, which renders the task as multi-label classification. If the meme does not contain any of these symptoms, annotators were instructed to label the meme in the "Other" class, which was not considered in our final dataset. For this task, the inter-annotator agreement (Krippendorff's alpha coefficient (Krippendorff, 2004)) is 81.55, which signifies a strong agreement amongst annotators. We provide examples for each symptom category corresponding to memes in Figure 2 and definition in Appendix-A.
|
| 100 |
+
|
| 101 |
+
# 3.3 Benchmark Dataset
|
| 102 |
+
|
| 103 |
+
Our final dataset includes 4,664 depressive memes, and the distribution of PHQ-9 symptoms corresponding to these memes, as well as the train, test, and validation split, are shown in Table-1. Based on the obtained PHQ-9 class distribution, we can notice that a few PHQ-9 symptom categories are prominent in our human-annotated set, such as 'FD', 'ED' and 'SH'. In contrast, 'LOI', 'SD', 'LOE', and 'CP' symptoms rarely appear in our human-annotated dataset.
|
| 104 |
+
|
| 105 |
+
To enrich and balance a few PHQ-9 symptom categories, we developed the training set with a portion of automatic curation. In our automatic curation of training samples, we followed two strategies to expand the human-annotated training set. In the first strategy, we conducted keyword-based search using "eating disorder memes", "feeling down memes", "sleep disorder memes", "lack of energy memes", "low self esteem memes", "concentration problem memes", "self-harm" on the Google Image and selected only top image search results. The second strategy considers selecting the top image results from Pinterest with the queries: "insomnia memes", "lack of interest memes", and "sleep disorder memes". To remove noise, we maintained strict filtering on the resolution of the meme and on the readability of the meme's text. We also de-duplicate the memes if their sources are the same. Following this process, we obtained additional 5,173 samples, which we used to enrich the training set. Also, it is to be noted that both the test and validation set only include manually annotated samples.
|
| 106 |
+
|
| 107 |
+
<table><tr><td>SYMPTOMS</td><td>LOI</td><td>FD</td><td>ED</td><td>SD</td><td>LOE</td><td>LSE</td><td>CP</td><td>SH</td><td>TOTAL</td></tr><tr><td>TRAIN</td><td>471</td><td>2085</td><td>1939</td><td>1562</td><td>122</td><td>855</td><td>595</td><td>1516</td><td>8814</td></tr><tr><td>AUTOMATIC</td><td>471</td><td>1070</td><td>454</td><td>1561</td><td>90</td><td>501</td><td>595</td><td>431</td><td>5173</td></tr><tr><td>HUMAN</td><td>-</td><td>1015</td><td>1485</td><td>1</td><td>32</td><td>354</td><td>-</td><td>1085</td><td>3641</td></tr><tr><td>TEST</td><td>97</td><td>294</td><td>98</td><td>99</td><td>95</td><td>128</td><td>100</td><td>106</td><td>662</td></tr><tr><td>VALIDATION</td><td>45</td><td>195</td><td>49</td><td>45</td><td>54</td><td>85</td><td>42</td><td>61</td><td>361</td></tr></table>
|
| 108 |
+
|
| 109 |
+
Table 1: Data distribution in train, validation and test set for PHQ-9 symptoms. Both the test and validation set is human annotated.
|
| 110 |
+
|
| 111 |
+
# 4 RESTORE Dataset Analysis
|
| 112 |
+
|
| 113 |
+
Visual Analysis. We conducted a visual analysis of the memes to study how depression symptoms are related to color features. We performed color analysis by computing the pixel-level average w.r.t HSV (hue, saturation, and value), in which lower hue scores imply more redness and higher hue scores suggest more blue. Saturation describes an image's vibrancy. Value relates to the brightness of an image, with lower scores indicating a darker image. We observe that most of the memes, irrespective of symptom categories, are less colorful and have lower saturation values, suggesting negative emotions. These cases were prominent in "low self esteem", "lack of interest", and "self harm", where users often share memes that were less vivid, darker (higher grayscale), and have a high hue. In contrast, the memes related to "eating disorder" are brighter and more colorful, mainly because of the presence of food in the memes.
|
| 114 |
+
|
| 115 |
+
Qualitative Language Analysis. To understand the psycho-linguistics patterns associated with each PHQ-9 symptom category, we employed the LIWC (Tausczik and Pennebaker, 2010) to measure various linguistic factors such as analytical reasoning, clout, originality, emotional tone, informal language markers, and pronouns. Our analysis reveals that "low self esteem" has the lowest analytic reasoning among all the depression symptoms, depicting a more intuitive and personal language. Surprisingly, "concentration problem" has the highest analytic reasoning, suggesting formal and logical thinking patterns. The clout feature, which measures individual confidence and clarity in speaking or writing, was found to be highest in the "feeling down" and lowest in the "eating disorder" category. A similar trend was observed with the authentic feature, which is one way of presenting themselves to others in an original way. Further, we notice that individuals expressing "self harm" behavior, "feeling down", and "low self esteem" symptoms use more first-person pronouns.
|
| 116 |
+
|
| 117 |
+
# 4.1 Benchmark Methods
|
| 118 |
+
|
| 119 |
+
We benchmark the RESTORE dataset on the following methods:
|
| 120 |
+
|
| 121 |
+
Monomodal (Language) Methods. We experiment with four pre-trained language models: BERT (Devlin et al., 2019), ROBERTA (Liu et al., 2019), XLNET (Yang et al., 2019) and MENTALBERT (Ji et al., 2021), fine-tuned on the RESTORE training set. For each model, we obtained the hidden state representations and utilized the feedforward network with the sigmoid activation function to predict the multi-label depression categories. Additionally, we also fine-tuned the BERT model by adding the LIWC features to integrate psycholinguistic information into BERT explicitly. We project the LIWC features using a feedforward network and concatenate these projected features with the BERT [CLS] token representation. The concatenated features are used to predict fine-grained depression symptoms. We call this network as the BERT+LIWC model.
|
| 122 |
+
|
| 123 |
+
Monomodal (Vision) Methods. To evaluate the effectiveness of visual information, we experiment with seven popular pre-trained vision models: DENSENET (Iandola et al., 2014), RESNET-152 (He et al., 2016), RESNEXT (Xie et al., 2017), CONVNEXT (Liu et al., 2022), REGNET (Schneider et al., 2017), EFFICIENTNET (Tan and Le, 2019), and VIT (Dosovitskiy et al., 2020). We fine-tuned these models on the RESTORE training set similar to the monomodal (language) models.
|
| 124 |
+
|
| 125 |
+
Multimodal Methods. We experiment with three state-of-the-art pre-trained multimodal models: VISUALBERT (Li et al., 2019), MMBT (Kiela et al., 2019), and CLIP (Radford et al., 2021), fine-tuned on the RESTORE training set. Additionally, we also experiment with the following models:
|
| 126 |
+
|
| 127 |
+
- Late Fusion: This model computes the mean prediction scores obtained from RESNET-152 and BERT model.
|
| 128 |
+
- Early Fusion: This approach concatenates features obtained from ResNET-152 and BERT, which are passed to a feed-forward network to make predictions.
|
| 129 |
+
- BERT+HSV: Here, we fine-tuned the BERT model by adding mean, max, and min values of HSV features of the image. Similar to BERT+LIWC, we concatenate HSV projected features with BERT [CLS] token representation to make predictions.
|
| 130 |
+
|
| 131 |
+
# 5 Proposed Approach
|
| 132 |
+
|
| 133 |
+
Existing multimodal approaches focus on generating text-image feature representations by detecting the objects in the image and learning an alignment between textual and visual tokens. However, a meme can convey multiple perspectives, and detecting the object alone may not be sufficient to generate a semantically-rich text-image representation. Therefore, to capture the image's multiple views that could be beneficial in effectively distinguishing the depression symptoms, we introduce orthogonal feature generation in a multimodal setting. We begin by first encoding the meme image $\mathcal{I}$ and its embedded text $\mathcal{T}$ with the pre-trained RESNET-152 model and BERT model, respectively. We selected these models because of their simplicity and comparable performance to other language-vision models. To capture multiple perspectives of the image, we perform the 2-dimensional adaptive average pooling (adaptive-avg-pool) of output size $S_0\times S_1$ on RESNET-152 model output $F$ that results in image representation $h_{\mathcal{I}}\in \mathcal{R}^{K\times S_0\times S_1}$ of $K$ feature map. With this approach, we obtained feature representations $h_{\mathcal{I}}^{1}\in \mathcal{R}^{K}$ and $h_{\mathcal{I}}^{2}\in \mathcal{R}^{K}$ by setting $S_0 = 2$ and $S_{1} = 1$ (based on the validation performance).
|
| 134 |
+
|
| 135 |
+
Orthogonal Feature Generation: We introduce orthogonal feature generation, where the features are regularized with orthogonality constraints. With this constraint, we generate new features that capture another perspective, which are nonredundant and de-correlated with the existing features. The resulting orthogonal features help the model fully utilize its capacity, improving the feature expressiveness. Formally, given the textual feature $h_{\mathcal{T}}$ which corresponds to the BERT [CLS] token representation and image feature $h_{\mathcal{I}}$ , we aim to generate the orthogonal feature $h_{\perp}$ to $h \in \{h_{\mathcal{I}}^1, h_{\mathcal{I}}^2, h_{\mathcal{T}}\}$ given another feature modality $\hat{h} \in \{h_{\mathcal{I}}^1, h_{\mathcal{I}}^2, h_{\mathcal{T}}\} - \{h\}$ . Towards this, we first project the feature vector $h$ into common vector space $\bar{h}$ thereafter, we compute the vector component $C$ and orthogonal projection as follows:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
C = \frac {\bar {h} ^ {T} \hat {h}}{\bar {h} ^ {T} \bar {h}} \bar {h} \quad \text {a n d} \quad h _ {\perp} = \hat {h} - C \tag {1}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
In this process, we obtained the orthogonal feature $h_{\perp}$ to $\bar{h}$ that also ensures (based on vector arithmetic) that it is non-redundant to $\hat{h}$ .
|
| 142 |
+
|
| 143 |
+
Multimodal Fusion: In order to fuse both modalities, we devise a multimodal fusion strategy based on conditional adaptive gating. Specifically, we first compute the bimodal scalars $g^{1}$ and $g^{2}$ with the gating mechanism (Rahman et al., 2020) by considering textual representation as one modality and one of the image features as another modality. These scalar values denote relevant information in the image feature conditioned on the textual feature. In the next step, we compute the multimodal representation considering both the image representation and the previously computed bimodal scalars with respect to the textual feature. Formally,
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
h _ {u} = g ^ {1} \mathbf {W} _ {f} ^ {1} h _ {\mathcal {I}} ^ {1} + g ^ {2} \mathbf {W} _ {f} ^ {2} h _ {\mathcal {I}} ^ {2} \tag {2}
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
where $\mathbf{W}_f^1$ and $\mathbf{W}_f^2$ are weight matrices for both the image representation. With this strategy, we obtained the multimodal feature $f = h_{\mathcal{T}} + h_u$ .
|
| 150 |
+
|
| 151 |
+
Depressive Symptoms Identification: Here, we first apply LayerNorm (Ba et al., 2016) operation on the multimodal feature $f$ and orthogonal feature $h_{\perp}$ . The resulting feature is concatenated with the textual feature $h_{\mathcal{T}}$ to form the final feature representation $z$ . Finally, we apply the sigmoid operation on $z$ to predict depression symptom categories.
|
| 152 |
+
|
| 153 |
+
# 6 Implementation Details
|
| 154 |
+
|
| 155 |
+
We utilized the pre-trained weights of BERT-base $^{5}$ , RoBERTa-large $^{6}$ , MentalBERT $^{7}$ and XLNet-base $^{8}$ from HuggingFace (Wolf et al., 2020). For the pre-trained vision models, we followed the torchvision API $^{9}$ and obtained the pre-trained weights of the vision models. In particularly, we use resnet152, resnext101_32x8d, densenet161, efficientnet_b4, regnet_y_800mf, vit_l_32, and convnext_large pre-trained weights to fine-tune on the PHQ-9 depression symptom identification task. We use the HuggingFace implementation $^{10}$ of VisualBERT to fine-tune the model on the PHQ-9 depression symptom
|
| 156 |
+
|
| 157 |
+
identification task. For $\mathsf{MMBT^{11}}$ and $\mathsf{CLIP^{12}}$ also we follow the HuggingFace implementation and fine-tune the model on PHQ-9 depression symptom identification task. For the visual analysis of the RESTORE dataset, we use the open-cv python library $^{13}$ . We fine-tuned each model on the RESTORE training dataset for 10 epochs. The length of the maximum original text is set to 256 tokens. We normalized the images with pixel mean and standard deviation values before feeding them to the monomodal and multimodal networks. We evaluate the performance of each model on the RESTORE validation dataset and use the best (maximum micro F1-score) model to evaluate the performance on the RESTORE test dataset. To update the monomodal (vision) model parameters, we used AdamW (Loshchilov and Hutter, 2018) optimizer with the learning rate of $4e - 5$ . For the monomodal (language) and multimodal approaches, we used the AdamW optimization algorithm with a learning rate of $4e - 5$ . We set the batch size 64 to train all the benchmark models. We train the proposed network with batch size 16 and AdamW optimization algorithm (with the learning rate of $2e - 5$ ) for 10 epochs. The dimension $(K)$ of feature map obtained from RESNET-152 is 2048. For LIWC and HSV experiments, we set the size of the hidden unit as 20. We performed all the experiments on a single NVIDIA Tesla V100x GPU having 32GB memory. We observed the average runtime to train our framework is 11.55 minutes per epoch. The proposed model has $\sim 170$ million parameters. All the libraries used in the experiment are licensed under the following:
|
| 158 |
+
|
| 159 |
+
- HuggingFace (3.5.0): Apache-2.0 License
|
| 160 |
+
- NLTK (3.6.3): Apache-2.0 License
|
| 161 |
+
- spacy (3.4.4): MIT License
|
| 162 |
+
- LIWC (22): Academic License
|
| 163 |
+
open-cv (4.5.4): Apache-2.0 License
|
| 164 |
+
PyTorch (1.10.1): modified BSD license
|
| 165 |
+
|
| 166 |
+
# 7 Results and Observations
|
| 167 |
+
|
| 168 |
+
Main Results Table 2 provides the summary of the results of monomodal and multimodal approaches. The obtained results (first two blocks
|
| 169 |
+
|
| 170 |
+
<table><tr><td>Models</td><td>Precision</td><td>Recall</td><td>F1-score</td></tr><tr><td>BERT</td><td>0.6770.01</td><td>0.5880.01</td><td>0.630.01</td></tr><tr><td>XLNET</td><td>0.6470.03</td><td>0.5770.06</td><td>0.6090.05</td></tr><tr><td>MLP+LIWC</td><td>0.2240.04</td><td>0.5270.07</td><td>0.3110.02</td></tr><tr><td>BERT+LIWC</td><td>0.6840.05</td><td>0.5570.03</td><td>0.6130.01</td></tr><tr><td>MENTALBERT</td><td>0.6950.02</td><td>0.5790.02</td><td>0.6310.01</td></tr><tr><td>ROBERTA</td><td>0.6750.07</td><td>0.5580.01</td><td>0.6110.03</td></tr><tr><td>DENSENET-161</td><td>0.3850.0</td><td>0.4480.01</td><td>0.4140.01</td></tr><tr><td>RESNET-152</td><td>0.3680.03</td><td>0.4250.04</td><td>0.3940.03</td></tr><tr><td>RESNEXT-101</td><td>0.3750.01</td><td>0.4360.01</td><td>0.4030.01</td></tr><tr><td>CONVNEXT</td><td>0.350.08</td><td>0.4620.01</td><td>0.3950.04</td></tr><tr><td>MLP+HSV</td><td>0.2050.02</td><td>0.4160.05</td><td>0.2740.02</td></tr><tr><td>REGNET</td><td>0.3770.01</td><td>0.4270.0</td><td>0.40.01</td></tr><tr><td>EFFICIENTNET</td><td>0.3150.05</td><td>0.4490.01</td><td>0.3690.03</td></tr><tr><td>ViT</td><td>0.360.02</td><td>0.4040.03</td><td>0.380.02</td></tr><tr><td>LATE FUSION</td><td>0.6370.08</td><td>0.580.07</td><td>0.6010.01</td></tr><tr><td>CONCAT BERT</td><td>0.6540.05</td><td>0.5940.04</td><td>0.620.01</td></tr><tr><td>BERT+HSV</td><td>0.6880.05</td><td>0.5650.04</td><td>0.6180.01</td></tr><tr><td>VISUALBERT</td><td>0.680.03</td><td>0.5690.03</td><td>0.6270.01</td></tr><tr><td>MMBT</td><td>0.660.03</td><td>0.580.03</td><td>0.6160.01</td></tr><tr><td>CLIP</td><td>0.5670.13</td><td>0.5340.08</td><td>0.5370.01</td></tr><tr><td>PROPOSED</td><td>0.6930.01</td><td>0.6070.01</td><td>0.6510.01</td></tr></table>
|
| 171 |
+
|
| 172 |
+
Table 2: Performance of the monomodal-language (first block), monomodal-vision (second block), multimodal (third-block) and proposed method on RESTORE test dataset. The MLP refers to multi-layer perceptron network. The reported results are the mean value of three runs with different random seed values. The subscript denotes the corresponding standard deviation.
|
| 173 |
+
|
| 174 |
+
of the table) show that pre-trained language models are better at capturing depression symptoms than the pre-trained vision models. We hypothesize that the existing vision models are pre-trained on generic IMAGENET (Deng et al., 2009) classes. Thus, these models lack the deeper semantic understanding of images that are required to effectively encode the memes' visual information in order to distinguish the depression symptoms categories precisely. While our finding reveals that the monomodal (language) model performs better than the monomodal (vision) model, we found that multimodal models having sophisticated fusion mechanisms such as VISUALBERT, and MMBT obtain significant improvement over the BERT on multiple symptom categories. This signifies that visual content is helpful in accurately classifying depression symptoms if used with a better mechanism to fuse the visual information with language features. Further, we observed that among all the competitive methods, our approach obtained the best performance in terms of the F1-score (cf. Table 3). For two classes (SH and LOI), MENTALBERT outperformed all the other models. We speculate that this
|
| 175 |
+
|
| 176 |
+
<table><tr><td>MODELS</td><td>LOI</td><td>FD</td><td>ED</td><td>SD</td><td>LOE</td><td>LSE</td><td>CP</td><td>SH</td><td>AVG</td></tr><tr><td>BERT (Devlin et al., 2019)</td><td>0.3690.0</td><td>0.7320.01</td><td>0.8120.03</td><td>0.7420.02</td><td>0.0470.04</td><td>0.410.07</td><td>0.7880.01</td><td>0.5560.01</td><td>0.630.01</td></tr><tr><td>XLNET (Yang et al., 2019)</td><td>0.3290.04</td><td>0.7180.03</td><td>0.7770.07</td><td>0.7260.04</td><td>0.0840.06</td><td>0.3950.11</td><td>0.7540.04</td><td>0.5340.07</td><td>0.6090.05</td></tr><tr><td>MLP+LIWC</td><td>0.1590.14</td><td>0.5580.07</td><td>0.1610.04</td><td>0.1510.12</td><td>0.1210.11</td><td>0.2650.05</td><td>0.2530.04</td><td>0.2490.05</td><td>0.3110.02</td></tr><tr><td>BERT+LIWC</td><td>0.3660.05</td><td>0.710.01</td><td>0.8230.03</td><td>0.7490.04</td><td>0.0550.1</td><td>0.3830.04</td><td>0.7260.06</td><td>0.5570.05</td><td>0.6130.01</td></tr><tr><td>MENTALBERT (Ji et al., 2021)</td><td>0.4050.01</td><td>0.7220.02</td><td>0.8210.02</td><td>0.7390.03</td><td>0.1170.02</td><td>0.4050.07</td><td>0.7590.01</td><td>0.6030.03</td><td>0.6310.01</td></tr><tr><td>ROBERTA (Liu et al., 2019)</td><td>0.3480.05</td><td>0.710.02</td><td>0.8110.04</td><td>0.7850.06</td><td>0.1120.01</td><td>0.3440.05</td><td>0.7320.03</td><td>0.5350.06</td><td>0.6110.03</td></tr><tr><td>DENSENET-161 (Iandola et al., 2014)</td><td>0.1430.05</td><td>0.6110.01</td><td>0.3860.01</td><td>0.4140.03</td><td>0.0.0</td><td>0.1840.12</td><td>0.4670.03</td><td>0.2950.02</td><td>0.4140.01</td></tr><tr><td>RESNET-152 (He et al., 2016)</td><td>0.1630.06</td><td>0.570.04</td><td>0.3980.04</td><td>0.4250.04</td><td>0.0.0</td><td>0.1550.11</td><td>0.430.06</td><td>0.3270.03</td><td>0.3940.03</td></tr><tr><td>RESNEXT-101 (Xie et al., 2017)</td><td>0.0520.04</td><td>0.6080.03</td><td>0.4030.01</td><td>0.3550.05</td><td>0.0.0</td><td>0.1310.04</td><td>0.4060.06</td><td>0.3040.02</td><td>0.4030.01</td></tr><tr><td>CONVNEXT (Liu et al., 2022)</td><td>0.1290.16</td><td>0.6150.01</td><td>0.350.07</td><td>0.4670.03</td><td>0.0.0</td><td>0.0890.15</td><td>0.3060.28</td><td>0.3050.04</td><td>0.3950.04</td></tr><tr><td>MLP+HSV</td><td>0.2080.09</td><td>0.4620.09</td><td>0.1790.16</td><td>0.1810.03</td><td>0.1790.12</td><td>0.1130.1</td><td>0.2150.07</td><td>0.140.14</td><td>0.2740.02</td></tr><tr><td>REGNET (Schneider et al., 2017)</td><td>0.0640.04</td><td>0.5960.01</td><td>0.3860.02</td><td>0.4240.03</td><td>0.0.0</td><td>0.0940.06</td><td>0.4490.04</td><td>0.310.02</td><td>0.40.01</td></tr><tr><td>EFFICIENTNET (Tan and Le, 2019)</td><td>0.0190.03</td><td>0.6240.0</td><td>0.3080.07</td><td>0.3380.07</td><td>0.0.0</td><td>0.0050.01</td><td>0.1190.21</td><td>0.2780.02</td><td>0.3690.03</td></tr><tr><td>ViT (Dosovitskiy et al., 2020)</td><td>0.1070.14</td><td>0.6010.02</td><td>0.3680.06</td><td>0.2630.06</td><td>0.0.0</td><td>0.0870.15</td><td>0.210.21</td><td>0.220.13</td><td>0.380.02</td></tr><tr><td>LATE FUSION</td><td>0.3550.01</td><td>0.7040.01</td><td>0.720.06</td><td>0.720.0</td><td>0.020.04</td><td>0.3080.16</td><td>0.790.02</td><td>0.5430.02</td><td>0.6010.01</td></tr><tr><td>CONCAT BERT</td><td>0.3670.0</td><td>0.7270.02</td><td>0.8370.01</td><td>0.7180.04</td><td>0.0270.05</td><td>0.4010.05</td><td>0.7610.02</td><td>0.5540.03</td><td>0.620.01</td></tr><tr><td>BERT+HSV</td><td>0.3560.02</td><td>0.7120.03</td><td>0.8190.01</td><td>0.720.05</td><td>0.0820.07</td><td>0.3920.05</td><td>0.7560.01</td><td>0.5880.04</td><td>0.6180.01</td></tr><tr><td>VISUALBERT (Li et al., 2019)</td><td>0.3730.01</td><td>0.7290.01</td><td>0.8110.01</td><td>0.7470.03</td><td>0.0860.03</td><td>0.4010.05</td><td>0.7750.02</td><td>0.5390.02</td><td>0.6270.01</td></tr><tr><td>MMBT(Kiela et al., 2019)</td><td>0.3740.04</td><td>0.7160.01</td><td>0.8420.02</td><td>0.7470.05</td><td>0.0330.06</td><td>0.4110.06</td><td>0.6970.08</td><td>0.5550.01</td><td>0.6160.01</td></tr><tr><td>CLIP (Radford et al., 2021)</td><td>0.2470.21</td><td>0.6680.02</td><td>0.6960.12</td><td>0.6170.09</td><td>0.0130.02</td><td>0.220.18</td><td>0.6750.11</td><td>0.4570.08</td><td>0.5370.01</td></tr><tr><td>PROPOSED</td><td>0.3810.01</td><td>0.7390.01</td><td>0.8240.01</td><td>0.7690.02</td><td>0.080.03</td><td>0.4470.06</td><td>0.790.01</td><td>0.5890.02</td><td>0.6510.01</td></tr></table>
|
| 177 |
+
|
| 178 |
+
Table 3: Class-wise performance of monomodal (language and vision), multimodal and proposed model on RESTORE test dataset.
|
| 179 |
+
|
| 180 |
+
<table><tr><td>Method</td><td>Precision</td><td>Recall</td><td>F1-score</td></tr><tr><td>Proposed Method</td><td>0.6930.01</td><td>0.6070.01</td><td>0.6510.01</td></tr><tr><td>(-) Multimodal Fusion</td><td>0.6710.01</td><td>0.5940.01</td><td>0.6390.01</td></tr><tr><td>(-) Orthogonal Feature</td><td>0.6860.01</td><td>0.5680.01</td><td>0.6250.01</td></tr><tr><td>h⊥ to hT given h1T</td><td>0.6730.01</td><td>0.5980.01</td><td>0.6370.01</td></tr><tr><td>h⊥ to hT given h2T</td><td>0.6610.01</td><td>0.6020.01</td><td>0.6280.01</td></tr><tr><td>h⊥ to h1T given hT</td><td>0.6930.01</td><td>0.6070.01</td><td>0.6510.01</td></tr><tr><td>h⊥ to h1T given h2T</td><td>0.6580.01</td><td>0.6080.01</td><td>0.6320.01</td></tr><tr><td>h⊥ to h2T given hT</td><td>0.6710.01</td><td>0.6010.01</td><td>0.6410.01</td></tr><tr><td>h⊥ to h2T given h1T</td><td>0.6670.01</td><td>0.5940.01</td><td>0.6390.01</td></tr></table>
|
| 181 |
+
|
| 182 |
+
Table 4: Ablation results for the proposed approach.
|
| 183 |
+
|
| 184 |
+
is because a major portion of the corpus used to pretrain the MENTALBERT was centered on suicide and stress. For the $LOE$ class, basic MLP+HSV model performs best because memes of these categories have higher grayscale and lower brightness values, which were effectively captured by HSV features. Though some of these approaches perform well in a particular depression category, they could not translate their performance across all the categories. In contrast, our proposed model shows competitive performance across most categories, which signifies the superiority of our proposed approach.
|
| 185 |
+
|
| 186 |
+
Ablation Study. To analyze the role of each component of the proposed method, we performed an ablation study and reported the results in Table 4 (top). We observe a performance drop of 1.2 and 2.6 points in the F1-score by removing multimodal fusion and orthogonal components. The significant performance drop confirms the importance of each component in predicting the symptoms category. We also analyze the role (Table 4, bottom) of imposing an orthogonal constraint on visual and
|
| 187 |
+
|
| 188 |
+
textual features and find that feature orthogonal to $h_{\mathcal{I}}^{1}$ given $h_{\mathcal{T}}$ performs better compared to others.
|
| 189 |
+
|
| 190 |
+
# 7.1 Analysis and Observations
|
| 191 |
+
|
| 192 |
+
We conducted an in-depth human analysis of models' predictions and came up with the following observations:
|
| 193 |
+
|
| 194 |
+
(a) Language. We noticed the memes that were correctly classified have clear depressive words. For example, consider Fig 3 (a), here the LM correctly predicted it as 'self-harm' because of the presence of word 'dead' in the text. This type of case was relatively higher for the classes, 'eating disorder' and 'sleeping disorder'.
|
| 195 |
+
(b) Vision. The vision models were also able to make correct predictions when a certain object in the meme correlated with the particular symptom class. For example, in Fig 3 (b) due to the presence of the 'cake', most of the models correctly predicted it as 'eating disorder'.
|
| 196 |
+
(c) Implied Meaning. We observed that most of the models fail to infer an implicit sense of the memes. Fig 3 (c) shows an example of this error category made by all the models. Here, to correctly infer the depressive symptom, 'lack of interest', it is crucial to consider both the text and image which share complementary information. However, the multimodal models fail to judiciously fuse this complementary information leading to misclassification. The majority of the vision models predicted it as 'eating disorder', since the person is sitting on the dining chair and the models relates dining with
|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
(b) Vision
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
(a) Language
|
| 209 |
+
(e) Figurative Speech
|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
FRIEND: "Why don't you open up to people?" ME:
|
| 213 |
+
(f) Figurative Speech
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
(c) Implied Meaning
|
| 217 |
+
(g) Generic Images
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
(d) Artistic Texts
|
| 221 |
+
(h) Generic Images
|
| 222 |
+
Figure 3: Human Analysis on the prediction obtained from monomodal and multimodal approaches.
|
| 223 |
+
|
| 224 |
+
eating.
|
| 225 |
+
|
| 226 |
+
(d) Figurative Speech. The usage of figurative speech is highly predominant in memes, mainly to compete with other memes and gain the attention and engagement of their followers. Our analysis reveals that both unimodal and multimodal models were not capable of dealing with figurative memes. For example, in Fig 3 (e), the word 'loop' is used in the metaphoric sense, and neither the vision nor the LM understand the sense of the word 'loop' or relate the 'rope' with the 'self-harm'.
|
| 227 |
+
(e) Artistic Texts. Another way of making the meme more appealing to others is by using a variety of styling options on the texts. This brings a unique challenge for the OCR system to correctly extract all the text. For example, in Fig 3 (d), the OCR extracted the word 'changing' instead of 'hanging' leading to misclassification.
|
| 228 |
+
(f) Generic Images. We observed that few images which share the same aesthetic features do provide any symptom-specific visual cues. For example, in Fig 3 (g) and (h), if we just consider the image, we can only infer that person is feeling sad. It is in these cases the linguistic features are crucial in identifying the correct depression symptom class.
|
| 229 |
+
|
| 230 |
+
# 8 Conclusion
|
| 231 |
+
|
| 232 |
+
This work presents the first study towards identifying fine-grained depression symptoms from memes. We created a high-quality dataset - RESTORE, consisting of 9,837 depressive memes annotated with PHQ-9 symptom categories and benchmark the dataset on 20 monomodal and multimodal models. Further, we introduce a novel method to incorpo
|
| 233 |
+
|
| 234 |
+
rate various perspective in the meme that obtained best F1-Score over other approaches. Finally, our thorough human analysis of the model predictions indicates the model's limitation while dealing with memes, which will be considered in the future.
|
| 235 |
+
|
| 236 |
+
# 9 Limitations
|
| 237 |
+
|
| 238 |
+
This paper aims to make advancements toward automatically identifying fine-grained depressive symptoms from memes shared on social media. Although we used only those memes shared by users who self-declared themselves as depressive, we did not conduct any further clinical assessment to judge whether the user was depressive or not, nor we clinically evaluated their depression severity. Therefore, deploying this system without expert advice could compromise patient safety and lead to undesirable outcomes. We further acknowledge that determining the depression symptoms based on the visual and textual cues present in the meme can be subjective, and therefore the created gold-standard dataset may contain explicit and demographic biases. In this study, we focused on training the models using only the social media data, leaving their performance unchecked if tested on other medical data sources. Finally, our study is not indented to provide any diagnosis; instead, we envision the methods we provide being used as aids by healthcare professionals.
|
| 239 |
+
|
| 240 |
+
# 10 Ethical Consideration
|
| 241 |
+
|
| 242 |
+
Given that the created dataset is derived from social media and is focused on a sensitive mental health topic, we follow various ethical concerns regard-
|
| 243 |
+
|
| 244 |
+
ing user privacy and confidentiality as inspired by (Benton et al., 2017a) to access and analyze the data. We adhere to the data usage privacy as provided by Twitter and Reddit to crawl the public profiles of their users. To ensure that we maintain the user's privacy, we anonymized the user profiles prior to the annotations, and we did not keep any meta-data information that would disclose the user. Further, we did not make any efforts to interact, deanonymize, or connect users on their other social media handles. The ethics review board approved the study under Human Subjects Research Exemption 4 because it is limited to publicly available social media posts. We believe that the created data would be highly beneficial to the community and to avoid any misuse (Hovy and Spruit, 2016), we will share the data with other researchers who will not deanonymize any of the users and will follow all the ethical considerations as established in this study.
|
| 245 |
+
|
| 246 |
+
# References
|
| 247 |
+
|
| 248 |
+
Minghui An, Jingjing Wang, Shoushan Li, and Guodong Zhou. 2020. Multimodal topic-enriched auxiliary learning for depression detection. In Proceedings of the 28th International Conference on Computational Linguistics, pages 1078-1089.
|
| 249 |
+
Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. 2016. Layer normalization. arXiv preprint arXiv:1607.06450.
|
| 250 |
+
Adrian Benton, Glen Coppersmith, and Mark Dredze. 2017a. Ethical research protocols for social media health research. In Proceedings of the first ACL workshop on ethics in natural language processing, pages 94-102.
|
| 251 |
+
Adrian Benton, Margaret Mitchell, and Dirk Hovy. 2017b. Multitask learning for mental health conditions with limited social media data. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers, pages 152-162.
|
| 252 |
+
David E Bloom, Elizabeth Cafiero, Eva Jané-Llopis, Shafika Abrahams-Gessel, Lakshmi Reddy Bloom, Sana Fathima, Andrea B Feigl, Tom Gaziano, Ali Hamandi, Mona Mowafi, et al. 2012. The global economic burden of noncommunicable diseases. Technical report, Program on the Global Demography of Aging.
|
| 253 |
+
Mayo Clinic. 2022. Depression (major depressive disorder). Accessed: 2022-05-10.
|
| 254 |
+
Glen Coppersmith, Mark Dredze, Craig Harman, Kristy Hollingshead, and Margaret Mitchell. 2015. Clpsych
|
| 255 |
+
|
| 256 |
+
2015 shared task: Depression and ptsd on twitter. In Proceedings of the 2nd Workshop on Computational Linguistics and Clinical Psychology: From Linguistic Signal to Clinical Reality, pages 31-39.
|
| 257 |
+
Glen Coppersmith, Kim Ngo, Ryan Leary, and Anthony Wood. 2016. Exploratory analysis of social media prior to a suicide attempt. In Proceedings of the Third Workshop on Computational Linguistics and Clinical Psychology, pages 106-117.
|
| 258 |
+
Munmun De Choudhury, Scott Counts, Eric J Horvitz, and Aaron Hoff. 2014. Characterizing and predicting postpartum depression from shared facebook data. In Proceedings of the 17th ACM conference on Computer supported cooperative work & social computing, pages 626-638.
|
| 259 |
+
Munmun De Choudhury, Michael Gamon, Scott Counts, and Eric Horvitz. 2013. Predicting depression via social media. ICWSM, 13:1-10.
|
| 260 |
+
Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. 2009. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE.
|
| 261 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186.
|
| 262 |
+
Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. 2020. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations.
|
| 263 |
+
Jonathan Fine. 2006. Language in psychiatry: A handbook of clinical practice. Equinox London.
|
| 264 |
+
Alexandra Fleischmann, Elise Paul, Devora Kestel, Bochen Cao, Jessica Ho, and Wahyu Retno Mahanani. 2021. Suicide worldwide in 2019.
|
| 265 |
+
Jeffrey M Girard, Jeffrey F Cohn, Mohammad H Ma-hoor, S Mohammad Mavadati, Zakia Hammal, and Dean P Rosenwald. 2014. Nonverbal social withdrawal in depression: Evidence from manual and automatic analyses. Image and vision computing, 32(10):641-647.
|
| 266 |
+
Tao Gui, Liang Zhu, Qi Zhang, Minlong Peng, Xu Zhou, Keyu Ding, and Zhigang Chen. 2019. Cooperative multimodal approach to depression detection in twitter. In Proceedings of the AAAI conference on artificial intelligence, volume 33, pages 110-117.
|
| 267 |
+
|
| 268 |
+
GACCT Harman and Mark H Dredze. 2014. Measuring post traumatic stress disorder in twitter. In ICWSM.
|
| 269 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778.
|
| 270 |
+
The Lancet Global Health. 2020. Mental health matters. The Lancet. Global Health, 8(11):e1352.
|
| 271 |
+
Dirk Hovy and Shannon L Spruit. 2016. The social impact of natural language processing. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 591-598.
|
| 272 |
+
Xiaolei Huang, Xin Li, Tianli Liu, David Chiu, Tingshao Zhu, and Lei Zhang. 2015. Topic model for identifying suicidal ideation in chinese microblog. In Proceedings of the 29th Pacific Asia Conference on Language, Information and Computation, pages 553-562.
|
| 273 |
+
Forrest Iandola, Matt Moskewicz, Sergey Karayev, Ross Girshick, Trevor Darrell, and Kurt Keutzer. 2014. Densenet: Implementing efficient convnet descriptor pyramids. arXiv preprint arXiv:1404.1869.
|
| 274 |
+
Shaoxiong Ji, Tianlin Zhang, Luna Ansari, Jie Fu, Prayag Tiwari, and Erik Cambria. 2021. Mentalbert: Publicly available pretrained language models for mental healthcare. arXiv preprint arXiv:2110.15621.
|
| 275 |
+
Christian Karmen, Robert C Hsiung, and Thomas Wetter. 2015. Screening internet forum participants for depression symptoms by assembling and enhancing multiple nlp methods. Computer methods and programs in biomedicine, 120(1):27-36.
|
| 276 |
+
Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Ethan Perez, and Davide Testuggine. 2019. Supervised multimodal bitransformers for classifying images and text. arXiv preprint arXiv:1909.02950.
|
| 277 |
+
Klaus Krippendorff. 2004. Measuring the reliability of qualitative text analysis data. *Quality and quantity*, 38:787-800.
|
| 278 |
+
Kurt Kroenke and Robert L Spitzer. 2002. The phq-9: a new depression diagnostic and severity measure.
|
| 279 |
+
Lianian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. 2019. Visualbert: A simple and performant baseline for vision and language. arXiv preprint arXiv:1908.03557.
|
| 280 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692.
|
| 281 |
+
|
| 282 |
+
Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. 2022. A convnet for the 2020s. arXiv preprint arXiv:2201.03545.
|
| 283 |
+
Ilya Loshchilov and Frank Hutter. 2018. Decoupled weight decay regularization. In International Conference on Learning Representations.
|
| 284 |
+
Krishanu Maity, Prince Jha, Sriparna Saha, and Pushpak Bhattacharyya. 2022. A multitask framework for sentiment, emotion and sarcasm aware cyberbullying detection from multi-modal code-mixed memes.
|
| 285 |
+
Lydia Manikonda and Munmun De Choudhury. 2017. Modeling and understanding visual attributes of mental health disclosures in social media. In Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems, pages 170-181. ACM.
|
| 286 |
+
Ahmed Hussein Irabi, Prasadith Buddhitha, Mahmoud Hussein Irabi, and Diana Inkpen. 2018. Deep learning for depression detection of twitter users. In Proceedings of the Fifth Workshop on Computational Linguistics and Clinical Psychology: From Keyboard to Clinic, pages 88-97.
|
| 287 |
+
Shraman Pramanick, Md Shad Akhtar, and Tanmoy Chakraborty. 2021a. Exercise? i thought you said'extra fries': Leveraging sentence demarcations and multi-hop attention for meme affect analysis. In ICWSM, pages 513-524.
|
| 288 |
+
Shraman Pramanick, Shivam Sharma, Dimitar Dimitrov, Md Shad Akhtar, Preslav Nakov, and Tanmoy Chakraborty. 2021b. Momenta: A multimodal framework for detecting harmful memes and their targets. In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 4439-4455.
|
| 289 |
+
Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR.
|
| 290 |
+
Wasifur Rahman, Md Kamrul Hasan, Sangwu Lee, Amir Zadeh, Chengfeng Mao, Louis-Philippe Morency, and Ehsan Hoque. 2020. Integrating multimodal information in large pretrained transformers. In Proceedings of the conference. Association for Computational Linguistics. Meeting, volume 2020, page 2359. NIH Public Access.
|
| 291 |
+
Nairan Ramirez-Esparza, Cindy Chung, Ewa Kacewic, and James Pennebaker. 2008. The psychology of word use in depression forums in english and in spanish: Testing two text analytic approaches. In Proceedings of the International AAAI Conference on Web and Social Media, volume 2, pages 102-108.
|
| 292 |
+
Andrew G Reece and Christopher M Danforth. 2017. Instagram photos reveal predictive markers of depression. EPJ Data Science, 6(1):15.
|
| 293 |
+
|
| 294 |
+
Philip Resnik, William Armstrong, Leonardo Claudino, Thang Nguyen, Viet-An Nguyen, and Jordan Boyd-Graber. 2015. Beyond lda: exploring supervised topic modeling for depression-related language in twitter. In Proceedings of the 2nd workshop on computational linguistics and clinical psychology: from linguistic signal to clinical reality, pages 99-107.
|
| 295 |
+
Fabien Ringeval, Björn Schuller, Michel Valstar, Roddy Cowie, Heysem Kaya, Maximilian Schmitt, Shahin Amiriparian, Nicholas Cummins, Denis Lalanne, Adrien Michaud, et al. 2018. Avec 2018 workshop and challenge: Bipolar disorder and cross-cultural affect recognition. In Proceedings of the 2018 on audio/visual emotion challenge and workshop, pages 3-13.
|
| 296 |
+
Stefan Scherer, Giota Stratou, Marwa Mahmoud, Jill Boberg, Jonathan Gratch, Albert Rizzo, and Louis Philippe Morency. 2013. Automatic behavior descriptors for psychological disorder analysis. In 2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), pages 1-8. IEEE.
|
| 297 |
+
Nick Schneider, Florian Piewak, Christoph Stiller, and Uwe Franke. 2017. Regnet: Multimodal sensor registration using deep neural networks. In 2017 IEEE intelligent vehicles symposium (IV), pages 1803-1810. IEEE.
|
| 298 |
+
Chhavi Sharma, Deepesh Bhageria, William Scott, Srinivas Pykl, Amitava Das, Tanmoy Chakraborty, Viswanath Pulabaigari, and Björn Gambäck. 2020. Semeval-2020 task 8: Memotion analysis-the visuolinguial metaphor! In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 759-773.
|
| 299 |
+
Jo Anne Sirey, Martha L Bruce, George S Alexopoulos, Deborah A Perlick, Steven J Friedman, and Barnett S Meyers. 2001. Stigma as a barrier to recovery: Perceived stigma and patient-rated severity of illness as predictors of antidepressant drug adherence. *Psychiatric services*, 52(12):1615-1620.
|
| 300 |
+
Mingxing Tan and Quoc Le. 2019. Efficientnet: Rethinking model scaling for convolutional neural networks. In International conference on machine learning, pages 6105-6114. PMLR.
|
| 301 |
+
Yla R Tausczik and James W Pennebaker. 2010. The psychological meaning of words: Liwc and computerized text analysis methods. Journal of language and social psychology, 29(1):24-54.
|
| 302 |
+
Sho Tsugawa, Yusuke Kikuchi, Fumio Kishino, Kosuke Nakajima, Yuichi Itoh, and Hiroyuki Ohsaki. 2015. Recognizing depression from twitter activity. In Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems, pages 3187-3196. ACM.
|
| 303 |
+
Michel Valstar, Björn Schuller, Kirsty Smith, Timur Almaev, Florian Eyben, Jarek Krajewski, Roddy Cowie, and Maja Pantic. 2014. Avec 2014: 3d dimensional
|
| 304 |
+
|
| 305 |
+
affect and depression recognition challenge. In Proceedings of the 4th international workshop on audio/visual emotion challenge, pages 3-10.
|
| 306 |
+
Michel Valstar, Björn Schuller, Kirsty Smith, Florian Eyben, Bihan Jiang, Sanjay Bilakhia, Sebastian Schnieder, Roddy Cowie, and Maja Pantic. 2013. Avec 2013: the continuous audio/visual emotion and depression recognition challenge. In Proceedings of the 3rd ACM international workshop on Audio/visual emotion challenge, pages 3-10.
|
| 307 |
+
WHO. 2022. World Mental Health Day: an opportunity to kick-start a massive scale-up in investment in mental health. Accessed: 2022-05-10.
|
| 308 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 conference on empirical methods in natural language processing: system demonstrations, pages 38-45.
|
| 309 |
+
Saining Xie, Ross Girshick, Piotr Dólar, Zhuowen Tu, and Kaiming He. 2017. Aggregated residual transformations for deep neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1492-1500.
|
| 310 |
+
Shweta Yadav, Jainish Chauhan, Joy Prakash Sain, Krishnaprasad Thirunarayan, Amit Sheth, and Jeremiah Schumm. 2020. Identifying depressive symptoms from tweets: Figurative language enabled multitask learning framework. In Proceedings of the 28th International Conference on Computational Linguistics, pages 696-709.
|
| 311 |
+
Shweta Yadav, Asif Ekbal, Sriparna Saha, Pushpak Bhattacharyya, and Amit Sheth. 2018. Multi-task learning framework for mining crowd intelligence towards clinical treatment. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 271-277.
|
| 312 |
+
Shweta Yadav, Usha Lokala, Raminta Daniulaitye, Krishnaprasad Thirunarayan, Francois Lamy, and Amit Sheth. 2021. "when they say weed causes depression, but it's your fav antidepressant": knowledge-aware attention framework for relationship extraction. PloS one, 16(3):e0248299.
|
| 313 |
+
Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. Advances in neural information processing systems, 32.
|
| 314 |
+
Andrew Yates, Arman Cohan, and Nazli Goharian. 2017. Depression and self-harm risk assessment in online forums. arXiv preprint arXiv:1709.01848.
|
| 315 |
+
|
| 316 |
+
Amir Hossein Yazdavar, Hussein S Al-Olimat, Monireh Ebrahimi, Goonmeet Bajaj, Tanvi Banerjee, Krishnaprasad Thirunarayan, Jyotishman Pathak, and Amit Sheth. 2017. Semi-supervised approach to monitoring clinical depressive symptoms in social media. In Proceedings of the 2017 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining 2017, pages 1191-1198. ACM.
|
| 317 |
+
|
| 318 |
+
Shi Yin, Cong Liang, Heyan Ding, and Shangfei Wang. 2019. A multi-modal hierarchical recurrent neural network for depression detection. In Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop, pages 65-71.
|
| 319 |
+
|
| 320 |
+
Yipeng Zhang, Hanjia Lyu, Yubao Liu, Xiyang Zhang, Yu Wang, and Jiebo Luo. 2020. Monitoring depression trend on twitter during the COVID-19 pandemic. arXiv preprint arXiv:2007.00228.
|
| 321 |
+
|
| 322 |
+
Yi Zhou, Zhenhao Chen, and Huiyuan Yang. 2021. Multimodal learning for hateful memes detection. In 2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW), pages 1-6. IEEE.
|
| 323 |
+
|
| 324 |
+
Yu Zhu, Yuanyuan Shang, Zhuhong Shao, and Guodong Guo. 2017. Automated depression diagnosis based on deep networks to encode facial appearance and dynamics. IEEE Transactions on Affective Computing, 9(4):578-584.
|
| 325 |
+
|
| 326 |
+
# A PHQ-9 Depression Symptom Categories
|
| 327 |
+
|
| 328 |
+
Following are the depression symptom categories definition as provided by the Mayo Clinic (Clinic, 2022):
|
| 329 |
+
|
| 330 |
+
1. Loss of Interest: A decline in interest or pleasure in the majority or all normal activities, such as sexual activity, hobbies, or sports.
|
| 331 |
+
2. Feeling Down: Feelings of sadness, tearfulness, emptiness, or hopelessness.
|
| 332 |
+
3. Sleeping Disorder: Sleep disturbances, including insomnia, sleeping too much, or trouble falling or staying asleep.
|
| 333 |
+
4. Lack of Energy: Tiredness and lack of energy, so even small tasks take extra effort.
|
| 334 |
+
5. Eating Disorder: Reduced appetite and weight loss or increased cravings for food and weight gain.
|
| 335 |
+
6. Low Self-Esteem: Feelings of worthlessness or guilt, fixating on past failures or self-blame.
|
| 336 |
+
7. Concentration Problem: Trouble thinking, concentrating, making decisions, and remembering things.
|
| 337 |
+
8. Self-Harm: Frequent or recurrent thoughts of death, suicidal thoughts, suicide attempts, or suicide.
|
| 338 |
+
|
| 339 |
+
# B RESTORE Dataset Analysis
|
| 340 |
+
|
| 341 |
+
# B.1 PHQ-9 Symptom Co-occurrence.
|
| 342 |
+
|
| 343 |
+
Given that a single meme can have multiple depressive symptoms, we analyzed what symptoms occur together in a similar context through a co-occurrence heatmap, depicted in Figure 4. As can be observed, most of the samples had a single symptom. Only a few symptoms such as "feeling down" are more likely to occur with other symptoms, frequently with "lack of self-esteem", "self-harm" and "lack of energy". This is because these symptoms share common overlapping expressions with more generic "feeling down" symptoms. We also noticed for few cases where user expressing self-harm behaviors suffers from low self-esteem issues. This similar trend was also observed for eating disorder. Surprisingly, we observed a few uncommon co-occurrences, for instance, "concentration problem" and "self harm".
|
| 344 |
+
|
| 345 |
+

|
| 346 |
+
Figure 4: PHQ-9 Symptom Co-occurrence.
|
| 347 |
+
|
| 348 |
+
We have also provided the distribution of the memes with faces detected using Face++ API in Fig. 5. The study reveals that memes with eating disorder category contain a maximum of $60\%$ faces and sleeping disorder memes contains $28\%$ faces minimum amongst all the depression symptom category.
|
| 349 |
+
|
| 350 |
+

|
| 351 |
+
Figure 5: Distribution of the identified faces on respective depression symptom categories in the RESTORE dataset.
|
| 352 |
+
|
| 353 |
+
A For every submission:
|
| 354 |
+
|
| 355 |
+
A1. Did you describe the limitations of your work? 9
|
| 356 |
+
A2. Did you discuss any potential risks of your work? 9
|
| 357 |
+
A3. Do the abstract and introduction summarize the paper's main claims?
|
| 358 |
+
A4. Have you used AI writing assistants when working on this paper? Left blank.
|
| 359 |
+
|
| 360 |
+
B Did you use or create scientific artifacts?
|
| 361 |
+
|
| 362 |
+
3,4,5
|
| 363 |
+
|
| 364 |
+
B1. Did you cite the creators of artifacts you used? 3,4,5
|
| 365 |
+
B2. Did you discuss the license or terms for use and / or distribution of any artifacts? 6
|
| 366 |
+
B3. Did you discuss if your use of existing artifact(s) was consistent with their intended use, provided that it was specified? For the artifacts you create, do you specify intended use and whether that is compatible with the original access conditions (in particular, derivatives of data accessed for research purposes should not be used outside of research contexts)? 6
|
| 367 |
+
B4. Did you discuss the steps taken to check whether the data that was collected / used contains any information that names or uniquely identifies individual people or offensive content, and the steps taken to protect / anonymize it? 10
|
| 368 |
+
B5. Did you provide documentation of the artifacts, e.g., coverage of domains, languages, and linguistic phenomena, demographic groups represented, etc.? 3,4
|
| 369 |
+
B6. Did you report relevant statistics like the number of examples, details of train / test / dev splits, etc. for the data that you used / created? Even for commonly-used benchmark datasets, include the number of examples in train / validation / test splits, as these provide necessary context for a reader to understand experimental results. For example, small differences in accuracy on large test sets may be significant, while on small test sets they may not be. 3
|
| 370 |
+
|
| 371 |
+
C Did you run computational experiments?
|
| 372 |
+
|
| 373 |
+
4,5,6,7
|
| 374 |
+
|
| 375 |
+
C1. Did you report the number of parameters in the models used, the total computational budget (e.g., GPU hours), and computing infrastructure used? 6
|
| 376 |
+
|
| 377 |
+
The Responsible NLP Checklist used at ACL 2023 is adopted from NAACL 2022, with the addition of a question on AI writing assistance.
|
| 378 |
+
|
| 379 |
+
C2. Did you discuss the experimental setup, including hyperparameter search and best-found hyperparameter values? 6
|
| 380 |
+
C3. Did you report descriptive statistics about your results (e.g., error bars around results, summary statistics from sets of experiments), and is it transparent whether you are reporting the max, mean, etc. or just a single run?
|
| 381 |
+
C4. If you used existing packages (e.g., for preprocessing, for normalization, or for evaluation), did you report the implementation, model, and parameter settings used (e.g., NLTK, Spacy, ROUGE, etc.)? 6
|
| 382 |
+
|
| 383 |
+
D Did you use human annotators (e.g., crowdworkers) or research with human participants? 3
|
| 384 |
+
|
| 385 |
+
D1. Did you report the full text of instructions given to participants, including e.g., screenshots, disclaimers of any risks to participants or annotators, etc.? 3, Appendix A
|
| 386 |
+
D2. Did you report information about how you recruited (e.g., crowdsourcing platform, students) and paid participants, and discuss if such payment is adequate given the participants' demographic (e.g., country of residence)? Not applicable. Annotators are authors of the paper.
|
| 387 |
+
D3. Did you discuss whether and how consent was obtained from people whose data you're using/curating? For example, if you collected data via crowdsourcing, did your instructions to crowdworkers explain how the data would be used? 3,10
|
| 388 |
+
D4. Was the data collection protocol approved (or determined exempt) by an ethics review board? 10
|
| 389 |
+
D5. Did you report the basic demographic and geographic characteristics of the annotator population that is the source of the data? Annotators are co-authors of this paper.
|
2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f2f9a3b2acd51d28bf38d83588a0ca3efbe3aa78dbdfcc1fe6e6b772397c095
|
| 3 |
+
size 595798
|
2023/Towards Identifying Fine-Grained Depression Symptoms from Memes/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/e0a7a239-b27a-4248-b7cc-b08e9f53d50c_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/e0a7a239-b27a-4248-b7cc-b08e9f53d50c_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/e0a7a239-b27a-4248-b7cc-b08e9f53d50c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:74bcd11b693c8d33a35510b47f58a1b4d2c13095aa6c54e2024dfabe7944db2e
|
| 3 |
+
size 1272924
|
2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0dc1543fa592964f62239ea60179d126f55dfb65dd97c87780769aa11b1fd0f8
|
| 3 |
+
size 1498622
|
2023/Towards Leaving No Indic Language Behind_ Building Monolingual Corpora, Benchmark and Models for Indic Languages/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2023/Towards Open-World Product Attribute Mining_ A Lightly-Supervised Approach/fe9cab5a-1dad-4066-bceb-65ab3c7270b6_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|