Add Batch 01eef8e9-0a32-4b51-9ba2-dad1b944f35a
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/e7017988-c53d-4af3-854d-3b40270bf2a7_content_list.json +3 -0
- theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/e7017988-c53d-4af3-854d-3b40270bf2a7_model.json +3 -0
- theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/e7017988-c53d-4af3-854d-3b40270bf2a7_origin.pdf +3 -0
- theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/full.md +1006 -0
- theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/images.zip +3 -0
- theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/layout.json +3 -0
- timesensitiveknowledgeeditingthroughefficientfinetuning/cb482458-6d99-4805-ba5e-87d28e91cfdf_content_list.json +3 -0
- timesensitiveknowledgeeditingthroughefficientfinetuning/cb482458-6d99-4805-ba5e-87d28e91cfdf_model.json +3 -0
- timesensitiveknowledgeeditingthroughefficientfinetuning/cb482458-6d99-4805-ba5e-87d28e91cfdf_origin.pdf +3 -0
- timesensitiveknowledgeeditingthroughefficientfinetuning/full.md +254 -0
- timesensitiveknowledgeeditingthroughefficientfinetuning/images.zip +3 -0
- timesensitiveknowledgeeditingthroughefficientfinetuning/layout.json +3 -0
- towardsartworkexplanationinlargescalevisionlanguagemodels/b7415f18-9f5f-4e4c-adfc-34dd1496a6ef_content_list.json +3 -0
- towardsartworkexplanationinlargescalevisionlanguagemodels/b7415f18-9f5f-4e4c-adfc-34dd1496a6ef_model.json +3 -0
- towardsartworkexplanationinlargescalevisionlanguagemodels/b7415f18-9f5f-4e4c-adfc-34dd1496a6ef_origin.pdf +3 -0
- towardsartworkexplanationinlargescalevisionlanguagemodels/full.md +0 -0
- towardsartworkexplanationinlargescalevisionlanguagemodels/images.zip +3 -0
- towardsartworkexplanationinlargescalevisionlanguagemodels/layout.json +3 -0
- twoissueswithchinesespellingcorrectionandarefinementsolution/8aaa89ff-b6c1-4ed9-9353-20b32721bbe9_content_list.json +3 -0
- twoissueswithchinesespellingcorrectionandarefinementsolution/8aaa89ff-b6c1-4ed9-9353-20b32721bbe9_model.json +3 -0
- twoissueswithchinesespellingcorrectionandarefinementsolution/8aaa89ff-b6c1-4ed9-9353-20b32721bbe9_origin.pdf +3 -0
- twoissueswithchinesespellingcorrectionandarefinementsolution/full.md +151 -0
- twoissueswithchinesespellingcorrectionandarefinementsolution/images.zip +3 -0
- twoissueswithchinesespellingcorrectionandarefinementsolution/layout.json +3 -0
- ultrasparsebert99conditionallysparselanguagemodelling/7f72abc4-afd5-4cdd-826e-6f1657431a0a_content_list.json +3 -0
- ultrasparsebert99conditionallysparselanguagemodelling/7f72abc4-afd5-4cdd-826e-6f1657431a0a_model.json +3 -0
- ultrasparsebert99conditionallysparselanguagemodelling/7f72abc4-afd5-4cdd-826e-6f1657431a0a_origin.pdf +3 -0
- ultrasparsebert99conditionallysparselanguagemodelling/full.md +157 -0
- ultrasparsebert99conditionallysparselanguagemodelling/images.zip +3 -0
- ultrasparsebert99conditionallysparselanguagemodelling/layout.json +3 -0
- understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/ff6f464f-0c0b-42c1-a2de-94b3fa078b6f_content_list.json +3 -0
- understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/ff6f464f-0c0b-42c1-a2de-94b3fa078b6f_model.json +3 -0
- understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/ff6f464f-0c0b-42c1-a2de-94b3fa078b6f_origin.pdf +3 -0
- understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/full.md +227 -0
- understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/images.zip +3 -0
- understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/layout.json +3 -0
- whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/c272cfd8-04ba-4e1c-8443-caf5ebe4c383_content_list.json +3 -0
- whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/c272cfd8-04ba-4e1c-8443-caf5ebe4c383_model.json +3 -0
- whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/c272cfd8-04ba-4e1c-8443-caf5ebe4c383_origin.pdf +3 -0
- whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/full.md +556 -0
- whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/images.zip +3 -0
- whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/layout.json +3 -0
- whatdoesparameterfreeprobingreallyuncover/4d7b0ef9-25d0-4003-b68a-b056ef9853ab_content_list.json +3 -0
- whatdoesparameterfreeprobingreallyuncover/4d7b0ef9-25d0-4003-b68a-b056ef9853ab_model.json +3 -0
- whatdoesparameterfreeprobingreallyuncover/4d7b0ef9-25d0-4003-b68a-b056ef9853ab_origin.pdf +3 -0
- whatdoesparameterfreeprobingreallyuncover/full.md +287 -0
- whatdoesparameterfreeprobingreallyuncover/images.zip +3 -0
- whatdoesparameterfreeprobingreallyuncover/layout.json +3 -0
- zeroshotcrosslingualrerankingwithlargelanguagemodelsforlowresourcelanguages/3b48e9e0-529b-4b40-a25d-9321ae86f716_content_list.json +3 -0
- zeroshotcrosslingualrerankingwithlargelanguagemodelsforlowresourcelanguages/3b48e9e0-529b-4b40-a25d-9321ae86f716_model.json +3 -0
theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/e7017988-c53d-4af3-854d-3b40270bf2a7_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a9cb9c126e9419e44b13657dafe4c67421ba46e8da57611fa9b68b5937106818
|
| 3 |
+
size 170967
|
theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/e7017988-c53d-4af3-854d-3b40270bf2a7_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a96fa41e0b11d48b0ab55079f5aeec875d941e04ea7d1197aa301b17317dc755
|
| 3 |
+
size 208695
|
theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/e7017988-c53d-4af3-854d-3b40270bf2a7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:535f4f984852d4eaad2391f97c08900a20749d7818d44927f11e95c4e3f0af9b
|
| 3 |
+
size 712811
|
theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/full.md
ADDED
|
@@ -0,0 +1,1006 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# The Probabilities Also Matter: A More Faithful Metric for Faithfulness of Free-Text Explanations in Large Language Models
|
| 2 |
+
|
| 3 |
+
Noah Y. Siegel
|
| 4 |
+
|
| 5 |
+
Google DeepMind, University College London
|
| 6 |
+
|
| 7 |
+
siegeln@google.com
|
| 8 |
+
|
| 9 |
+
Oana-Maria Camburu
|
| 10 |
+
|
| 11 |
+
University College London
|
| 12 |
+
|
| 13 |
+
Nicolas Heess
|
| 14 |
+
|
| 15 |
+
Google DeepMind
|
| 16 |
+
|
| 17 |
+
Maria Perez-Ortiz
|
| 18 |
+
|
| 19 |
+
University College London
|
| 20 |
+
|
| 21 |
+
# Abstract
|
| 22 |
+
|
| 23 |
+
In order to oversee advanced AI systems, it is important to understand their underlying decision-making process. When prompted, large language models (LLMs) can provide natural language explanations or reasoning traces that sound plausible and receive high ratings from human annotators. However, it is unclear to what extent these explanations are faithful, i.e., truly capture the factors responsible for the model's predictions. In this work, we introduce Correlational Explanatory Faithfulness (CEF), a metric that can be used in faithfulness tests based on input interventions. Previous metrics used in such tests take into account only binary changes in the predictions. Our metric accounts for the total shift in the model's predicted label distribution, more accurately reflecting the explanations' faithfulness. We then introduce the Correlational Counterfactual Test (CCT) by instantiating CEF on the Counterfactual Test (CT) from Atanasova et al. (2023). We evaluate the faithfulness of free-text explanations generated by few-shot-prompted LLMs from the Llama2 family on three NLP tasks. We find that our metric measures aspects of faithfulness which the CT misses.
|
| 24 |
+
|
| 25 |
+
# 1 Introduction
|
| 26 |
+
|
| 27 |
+
In many applications of ML systems it is important to understand why the system came to a particular answer (Rudin, 2018), and the field of explainable AI attempts to provide this understanding. However, relying on subjective human assessment of explanations can be misleading: humans sometimes prefer interpretability techniques that provide little information about model predictions (Adebayo et al., 2018). It is therefore important to clearly assess the extent to which explanations inform us about ML systems, both for current high-stakes applications such as medicine and criminal justice (Rudin, 2018), as well as potential scenarios involving highly general systems (Shah et al., 2022; Ngo
|
| 28 |
+
|
| 29 |
+
et al., 2023; Ward et al., 2023). If we can ensure that explanations are faithful to the inner-workings of the models, we could use the explanations as a channel for oversight, scanning them for elements we do not approve of, e.g. racial or gender bias, deception, or power-seeking (Lanham, 2022).
|
| 30 |
+
|
| 31 |
+
We make the following contributions:
|
| 32 |
+
|
| 33 |
+
1. We argue that in order to be informatively faithful, it is not enough to test whether explanations mention significant factors: we also need to test whether they mention significant factors more often than insignificant ones.
|
| 34 |
+
2. We introduce Correlational Explanatory Faithfulness (CEF), a novel faithfulness metric that improves upon prior work by capturing both the degree of impact of input features, as well as the difference in explanation mention frequency between impactful and non-impactful factors.
|
| 35 |
+
3. We introduce the Correlational Counterfactual Test (CCT), where we instantiate CEF on the Counterfactual Test (CT) from Atanasova et al. (2023) and use statistical distance between predictions to measure impact.
|
| 36 |
+
4. We run experiments with the Llama2 family of LLMs on three datasets and demonstrate that CCT captures faithfulness trends that the existing faithfulness metric used in CT misses.
|
| 37 |
+
|
| 38 |
+
# 2 Related Work
|
| 39 |
+
|
| 40 |
+
There has been much discussion on what it means for an explanation to be "faithful". Jacovi and Goldberg (2020) survey literature on the term and define an explanation as faithful insofar as it "accurately represents the reasoning process behind the model's prediction". Wiegreffe and Marasovic (2021) review datasets for explainable NLP and identify three predominant classes of textual
|
| 41 |
+
|
| 42 |
+
explanations: highlights (also called extractive explanations), free-text (also called natural language explanations or NLEs), and structured. Prior work on faithfulness has mostly focused on highlights and NLEs. We chose to focus on NLEs in this work because highlight-based explanations are highly restrictive in what they can communicate (Camburu et al., 2021; Wegreffe et al., 2020), while NLEs allow models to produce justifications that are as expressive as necessary (e.g. they can mention to background knowledge that is not present in the input but that the model made use of for its prediction). Moreover, there is increasing work on NLEs in high-stakes areas, such as healthcare (Kayser et al., 2022), where having faithful explanations is crucial.
|
| 43 |
+
|
| 44 |
+
Parcalabescu and Frank (2023) review a range of recent NLE faithfulness tests and claim that many are instead measuring "self-consistency". See Appendix C for further discussion.
|
| 45 |
+
|
| 46 |
+
# 2.1 "Explanatory" vs. "Causal" Faithfulness
|
| 47 |
+
|
| 48 |
+
We identify two types of faithfulness being researched in the literature, which we refer to as "explanatory" and "causal". Explanatory faithfulness asks the question: does the explanation reflect the decision-making process of the model? This is often measured by intervening on the input, such as with the metrics of sufficiency and comprehensiveness for highlight-based explanations (DeYoung et al., 2019; Camburu et al., 2021), or the counterfactual test (CT) for NLEs (Atanasova et al., 2023). Causal faithfulness adds the criterion: does the model's prediction causally depend on the generated reasoning trace? (Creswell and Shanahan, 2022; Lanham et al., 2023; Radhakrishnan et al., 2023; Turpin et al., 2023) Causal faithfulness requires structural restrictions on the prediction system (at a minimum, that the explanation is generated before the prediction), such as in chain-of-thought (Wei et al., 2023) or selection-inference (Creswell et al., 2022). Explanatory faithfulness, however, can be measured for a more general class of rationales, including post-hoc explanations (DeYoung et al., 2019; Atanasova et al., 2023). We focus on explanatory faithfulness in this work; see Appendix A for further discussion of causal faithfulness.
|
| 49 |
+
|
| 50 |
+
Some authors also distinguish between "explainability" and "interpretability/transparency" as approaches for understanding models (e.g. Rudin (2018)). While the concept of faithfulness is appli
|
| 51 |
+
|
| 52 |
+
cable to both approaches, we primarily focus on "explainability" in this work. See Appendix B for further discussion.
|
| 53 |
+
|
| 54 |
+
# 2.2 The Counterfactual Test
|
| 55 |
+
|
| 56 |
+
In order to measure whether an explanation captures the true factors responsible for a model's prediction, we need to know which factors are relevant. However, deep neural networks like LLMs are often difficult to interpret (Fan et al., 2020).
|
| 57 |
+
|
| 58 |
+
To address this problem, Atanasova et al. (2023) introduce the Counterfactual Test (CT). The CT inserts some text into an input query, which we refer to as an interventional addition (IA). If the model's prediction changes, then the IA was relevant to the model's new prediction, and we check if it is mentioned in the new explanation. Counterfactual edits have the advantage of easily generating features that we know are relevant to the model's prediction. We choose to focus our analysis on this method, and identify ways to improve it.
|
| 59 |
+
|
| 60 |
+
# 3 Methods
|
| 61 |
+
|
| 62 |
+
We identify two significant drawbacks with the CT:
|
| 63 |
+
|
| 64 |
+
1. It does not test whether impactful features are more likely to be mentioned than less impactful ones. There is a trivial strategy that leads to $0\%$ unfaithfulness as measured by the CT: repeat all input t ext verbatim as the explanation, which means explanations will never fail to mention the IA. This demonstrates an important property of useful explanations: they are useful only if they both mention impactful features and leave out non-impactful features.
|
| 65 |
+
2. It measures impactfulness as binary, i.e. whether the intervention results in a change in the model's top predicted label. But this ignores changes in the model's predicted class likelihoods: it would label an intervention that changes the predicted probability of a class from $49\%$ to $51\%$ as relevant, while an intervention that changes the probability from $1\%$ to $49\%$ would be labelled as irrelevant, even though the latter caused a larger shift.
|
| 66 |
+
|
| 67 |
+
To address these drawbacks, we propose the metric Correlational Explanatory Faithfulness (CEF), which can be applied to any tests with three given properties:
|
| 68 |
+
|
| 69 |
+
1. An intervention: a function mapping an input example to its modified version.
|
| 70 |
+
|
| 71 |
+
<table><tr><td></td><td>Input Example</td><td colspan="3">Model Prediction</td><td>Model Explanation</td></tr><tr><td>Before Intervention</td><td>TEXT: Three people are riding a carriage pulled by four horses. HYPOTHESIS: The horses are scrawny.</td><td>100.00%
|
| 72 |
+
75.00%
|
| 73 |
+
50.00%
|
| 74 |
+
25.00%
|
| 75 |
+
0.00%
|
| 76 |
+
Entailment</td><td>Neutral</td><td>Contradiction</td><td>The horses could be scrawny or not.</td></tr><tr><td>After Intervention</td><td>TEXT: Three people are riding a carriage pulled by four joyous horses. HYPOTHESIS: The horses are scrawny.</td><td>100.00%
|
| 77 |
+
75.00%
|
| 78 |
+
50.00%
|
| 79 |
+
25.00%
|
| 80 |
+
0.00%
|
| 81 |
+
Entailment</td><td>Neutral</td><td>Contradiction</td><td>The horses are joyous, so they are not scrawny.</td></tr><tr><td></td><td>Intervention: inserted "joyous"</td><td colspan="3">Intervention Impact: TVD = 0.7</td><td>Explanation Mention: True</td></tr></table>
|
| 82 |
+
|
| 83 |
+
Table 1: Illustration of the Correlational Counterfactual Test (CCT), our instantiation of Correlational Explanatory Faithfulness, on an example from e-SNLI. We measure the impact of an intervention by the total variation distance (TVD) between the model's predictions before and after the intervention. We then compute CCT as the correlation between intervention impact and explanation mention over multiple examples. Predictions and explanations are given by Llama2 70B. See Appendix E for additional examples of interventions and their impact.
|
| 84 |
+
|
| 85 |
+
2. A prediction impact measure: a function mapping an input example, intervention, and model to a scalar representing how impactful the intervention was on the model's prediction. We call the output of this function the prediction impact or $\mathcal{I}$ .
|
| 86 |
+
3. An explanation mention measure: a function mapping an input example, intervention, and explanation to a scalar representing the extent to which the explanation attributes importance to the intervened factors. We call the output of this function the mention importance or $\mathcal{M}$ .
|
| 87 |
+
|
| 88 |
+
If an intervention has higher prediction impact, a faithful explanation should assign it higher mention importance. We quantify this relationship by measuring the Pearson correlation coefficient between prediction impact and mention importance:
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
\mathrm {C E F} = \frac {\sum_ {i = 0} ^ {n} \left(\mathcal {I} _ {i} - \bar {\mathcal {I}}\right) \left(\mathcal {M} _ {i} - \bar {\mathcal {M}}\right)}{\sqrt {\sum_ {i = 1} ^ {n} \left(\mathcal {I} _ {i} - \bar {\mathcal {I}}\right) ^ {2}} \sqrt {\sum_ {i = 1} ^ {n} \left(\mathcal {M} _ {i} - \bar {\mathcal {M}}\right) ^ {2}}} \tag {1}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $\overline{x} = \frac{1}{n}\sum_{i = 1}^{n}x_{i}$ (the sample mean). Being a correlation, it lies in the interval $[-1,1]$ , with 0 indicating no relationship and positive values indicating higher mention importance for more impactful interventions.
|
| 95 |
+
|
| 96 |
+
We can then apply this metric to the CT, which gives us the Correlational Counterfactual Test (CCT). In our work, the intervention inserts an IA. To quantify the degree of prediction impact in a continuous manner, we measure the total shift in the model's predictions due to the IA. There are a number of ways to measure shifts in probability distributions over discrete classes; we use the total variation distance (TVD), i.e:
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
\operatorname {T V D} (P, Q) = \frac {1}{2} \sum_ {x} | P (x) - Q (x) | \tag {2}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
where $\mathrm{P}$ and $\mathrm{Q}$ are probability distributions over discrete classes. We take $\mathrm{P}$ and $\mathrm{Q}$ to be the model's predicted distributions before and after the intervention, so that TVD measures the absolute change in probabilities assigned to each class. Compared to other common statistical distances such as the relative entropy (KL divergence), TVD gives less weight to shifts between very small probabilities (which are unlikely to impact classification) and has the advantage of symmetry.
|
| 103 |
+
|
| 104 |
+
To measure mention importance, we use the original CT's binary metric: does the explanation mention the word? Note that in this case our metric represents the point-biserial correlation, a special case of the Pearson correlation coefficient where one variable is continuous and the other is dichotomous. We can then write CCT as:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\mathrm {C C T} = \frac {\mathbb {E} _ {M} (\mathrm {T V D}) - \mathbb {E} _ {\neg M} (\mathrm {T V D})}{\mathrm {S T D} (\mathrm {T V D})} \sqrt {\frac {| M | | \neg M |}{| M \cup \neg M | ^ {2}}}, \tag {3}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where $M$ indicates that the explanation mentions the IA, and $|M|$ indicates the number of examples with explanation mentions. For the binary mentions we study, CCT is maximized when explanations mention IAs exactly when their TVD is above a certain threshold (where the threshold depends on the distribution of TVDs). Table 1 shows an example application of our method. Future work could explore the case where explanations can assign weights to different features. We test alternatives to TVD and CCT in Appendix F.
|
| 111 |
+
|
| 112 |
+
CCT addresses the mentioned drawbacks of the CT. Unlike the CT, it cannot be trivially gamed:
|
| 113 |
+
|
| 114 |
+
achieving maximum correlation requires explanations to mention impactful IAs while not mentioning non-impactful IAs, which requires a signal about which words are impactful.
|
| 115 |
+
|
| 116 |
+
# 4 Experiments
|
| 117 |
+
|
| 118 |
+
In this section, we describe our experimental setup. We first generate predictions and NLEs using LLMs on a set of three natural language classification tasks. We then study the faithfulness of these NLEs, comparing the CT and CCT.
|
| 119 |
+
|
| 120 |
+
# 4.1 Datasets
|
| 121 |
+
|
| 122 |
+
Following Atanasova et al. (2023), we evaluate on three popular classification datasets including human-written NLEs:
|
| 123 |
+
|
| 124 |
+
e-SNLI (Camburu et al., 2018): Sentence pairs labeled with entailment, contradiction, or neutral.
|
| 125 |
+
|
| 126 |
+
ComVE (Wang et al., 2020): Sentence pairs where one violates common sense.
|
| 127 |
+
|
| 128 |
+
ECQA (Aggarwal et al., 2021): Multiple choice common sense questions with 5 options each.
|
| 129 |
+
|
| 130 |
+
We use ECQA in place of CoS-E (Rajani et al., 2019) as a more recent dataset also based on CQA with more detailed explanations that both justify the correct answer and refute the incorrect answers. Note that the ground-truth NLEs are not necessarily faithful explanations for an LLM: there may be multiple equally valid justifications for a ground-truth label on an instance (e.g., multiple reasons why two sentences are contradictory), or the LLM could rely on other reasoning, such as spurious correlations. We use the original train/test splits and evaluate on test sets, containing 9,842 (e-SNLI), 2,194 (ECQA), and 999 (ComVE) examples.
|
| 131 |
+
|
| 132 |
+
# 4.2 Models and Prompts
|
| 133 |
+
|
| 134 |
+
We use the Llama-2 series of LLMs (Touvron et al., 2023). We focus on the few-shot imitation setting: we use the pretrained foundation models (Llama2-7B, Llama-2-13B, and Llama-2-70B) prompted with a brief description of the dataset followed by 20 randomly selected examples from the training set including label and explanation. When prompting the model, we can have it generate NLEs either after its prediction, as an explanation conditioned on the prediction (predict-then-explain, PE), or before the prediction, which is conditioned on the explanation (explain-then-predict, EP) $^{1}$ (Camburu
|
| 135 |
+
|
| 136 |
+
et al., 2018). We provide full example prompts in Appendix G. When generating text with these models, we use greedy sampling to reduce variation during evaluation. However, we still record the probabilities assigned to tokens corresponding to predicted classes, which we use for computing the TVD.
|
| 137 |
+
|
| 138 |
+
# 4.3 Counterfactual Interventions
|
| 139 |
+
|
| 140 |
+
We use the random intervention proposed in Atanasova et al. (2023): we insert a random adjective before a noun or a random adverb before a verb, randomly selecting 4 positions where we insert the said words, and for each position selecting 20 random candidate words. The candidates are chosen from the complete list of adjectives or adverbs available in WordNet (Fellbaum, 2010), and nouns and verbs are identified with spaCy (Orosz et al., 2022) using the model "en_core_web_lg". In order to avoid highly unnatural sentences, we use an instruction-tuned LLM, Llama-2-70b-chat, to identify interventions that the model judges as not making sense, and keep only the top $20\%$ of interventions for each example (prompt shown in subsection G.4). See Appendix E for examples of interventions and their effect on model predictions and explanations. We determine whether an explanation includes an IA by case-insensitive substring matches, either on the original strings or stemmed versions (Porter, 2001).
|
| 141 |
+
|
| 142 |
+
For each model, prompting strategy (PE vs. EP), and dataset, we first run the model on each example in the test set and measure its predicted class probabilities. Next, we perform counterfactual interventions on each example and re-run the model on each intervention. Using TVD to measure impactfulness, we can study whether explanations are more likely to mention IAs that are more impactful, and compare the CT and CCT.
|
| 143 |
+
|
| 144 |
+
# 5 Results
|
| 145 |
+
|
| 146 |
+
Figure 1 plots intervention importance as measured by TVD vs. the fraction of the time that IAs are mentioned in explanations. A model with faithful explanations should show an upward trend in mentions, being more likely to mention highly impactful IAs than less impactful IAs. We note that while explanation mentions for e-SNLI show a clear upward trend, ECQA has a relatively flat trend: most ECQA explanations mention IAs, but they are not much more likely to mention highly impactful IAs
|
| 147 |
+
|
| 148 |
+

|
| 149 |
+
Figure 1: Intervention impactfulness vs. explanation mentions, PE. The plots show the fraction of examples where the explanation mentions the inserted text (IA) vs. the total variation distance (TVD) of the model's predictions before and after interventions. Rows show datasets, columns show models. Higher TVD indicates an intervention was more impactful on the model's prediction. See Figure 2 for results in the EP setting.
|
| 150 |
+
|
| 151 |
+
<table><tr><td rowspan="2">Model</td><td colspan="3">Accuracy (%)</td><td colspan="3">CT Unfaithfulness (%)</td><td colspan="3">CCT Faithfulness</td></tr><tr><td>e-SNLI</td><td>ECQA</td><td>ComVE</td><td>e-SNLI</td><td>ECQA</td><td>ComVE</td><td>e-SNLI</td><td>ECQA</td><td>ComVE</td></tr><tr><td>Llama2 7B, PE</td><td>57.7</td><td>54.1</td><td>55.2</td><td>32.5</td><td>30.4</td><td>81.3</td><td>0.245</td><td>0.047</td><td>0.040</td></tr><tr><td>Llama2 7B, EP</td><td>47.6</td><td>55.2</td><td>52.4</td><td>43.5</td><td>31.7</td><td>78.7</td><td>0.141</td><td>0.065</td><td>0.125</td></tr><tr><td>Llama2 13B, PE</td><td>67.1</td><td>68.0</td><td>75.6</td><td>39.4</td><td>28.6</td><td>82.0</td><td>0.227</td><td>0.055</td><td>0.036</td></tr><tr><td>Llama2 13B, EP</td><td>55.5</td><td>71.4</td><td>75.8</td><td>45.5</td><td>30.2</td><td>78.4</td><td>0.189</td><td>0.036</td><td>0.201</td></tr><tr><td>Llama2 70B, PE</td><td>85.5</td><td>79.7</td><td>97.7</td><td>29.3</td><td>24.1</td><td>70.0</td><td>0.411</td><td>0.083</td><td>0.172</td></tr><tr><td>Llama2 70B, EP</td><td>74.9</td><td>77.8</td><td>98.5</td><td>37.2</td><td>28.8</td><td>69.2</td><td>0.304</td><td>0.038</td><td>0.238</td></tr><tr><td>Random</td><td>33.3</td><td>20.0</td><td>50.0</td><td>-</td><td>-</td><td>-</td><td>0.000</td><td>0.000</td><td>0.000</td></tr></table>
|
| 152 |
+
|
| 153 |
+
Table 2: Results. Accuracy (before interventions), CT, and CCT across datasets, models, and prompt orders (predict-then-explain, PE, vs. explain-then-predict, EP). Random CCT Faithfulness assumes that explanation mentions are independent of prediction impact. For CT Unfaithfulness, it is not obvious what to use as a "random" explanation baseline: empty explanations would yield $100\%$ unfaithfulness, while explanations simply repeating all input text verbatim would yield $0\%$ unfaithfulness regardless of model predictions.
|
| 154 |
+
|
| 155 |
+
than non-impactful ones. This may be because they tend to be verbose and repeat large portions of their inputs, as can be seen from the examples on Table 4.
|
| 156 |
+
|
| 157 |
+
Table 2 shows the quantitative results of our experiments. Classification accuracy before intervention is above random for all models and datasets (except possibly Llama2-7B on ComVE), indicating that the models are capable of performing some aspects of the tasks. Note that ECQA explanations have the lowest CT unfaithfulness of any dataset, i.e. they frequently mention IAs which cause predictions to change. But Figure 1 shows that this is misleading: ECQA explanations succeed in frequently mentioning impactful IAs because they frequently mentions any IAs; the fact that a word appears in an ECQA explanation gives little signal about whether that word was impactful or not for the model's prediction.
|
| 158 |
+
|
| 159 |
+
The CCT is more informative of the qualitative results from Figure 1 than CT: model explanations provide more information about the relevance of
|
| 160 |
+
|
| 161 |
+
IAs for e-SNLI than for ECQA, and are thus more faithful. Additionally, we see that the largest model, Llama2 70B, produces the most faithful explanations on e-SNLI and ComVE.
|
| 162 |
+
|
| 163 |
+
# 6 Summary and outlook
|
| 164 |
+
|
| 165 |
+
We introduced Counterfactual Explanatory Faithfulness and the Correlational Counterfactual Test, allowing us to measure how informative explanations are about the importance of the factors they mention. Model explanations are more likely to mention inserted words when they're more impactful to the model's predictions, suggesting a degree of faithfulness on these tasks which increases with model size. However, there is significant variation between datasets, which could be due to either the nature of the task or the annotator-provided explanations. Future work could apply the CCT to instruction-tuned models, as well as explanations generated using strategies such as question decomposition (Radhakrishnan et al., 2023).
|
| 166 |
+
|
| 167 |
+
# Limitations
|
| 168 |
+
|
| 169 |
+
While our analysis identifies and corrects some shortcomings of prior work on measuring the faithfulness of NLEs, it does inherit some of the limitations of the original CT (Atanasova et al., 2023). The counterfactual interventions only insert adjectives and adverbs, and only single words at a time, so our experiments do not measure sensitivity to other parts of speech. Our random intervention can generate text which lacks semantic coherence, despite our LLM filtering step. We do not test for synonyms, which could inaccurately label some explanations. Additionally, we do not consider the semantic usage of word mentions: for example, our metrics would not penalize the faithfulness of illogical explanations as long as they had the correct pattern of word inclusion. Some of these drawbacks could potentially be addressed by further filtering or analysis by more advanced LLMs, taking advantage of their semantic understanding.
|
| 170 |
+
|
| 171 |
+
We study LLMs generating predictions and explanations using few-shot prompting, with example explanations taken from human-generated NLEs. These explanations can be highly dependent on annotation instructions. For example, CoS-E (Rajani et al., 2019) and ECQA (Aggarwal et al., 2021) both use CQA (Talmor et al., 2019) as a base dataset, but ECQA explanations are significantly longer than those for CoS-E. As such, care should be taken when extrapolating our results to other tasks: in the few-shot setting, the example explanations provided can have just as much impact on faithfulness as the model being used.
|
| 172 |
+
|
| 173 |
+
# Acknowledgements
|
| 174 |
+
|
| 175 |
+
We would like to thank Zac Kenton for feedback on a draft of this paper. Oana-Maria Camburu was supported by a Leverhulme Early Career Fellowship. The work of Perez-Ortiz was partially supported by the European Commission-funded project "Humane AI: Toward AI Systems That Augment and Empower Humans by Understanding Us, our Society and the World Around Us" (grant 952026).
|
| 176 |
+
|
| 177 |
+
# References
|
| 178 |
+
|
| 179 |
+
Julius Adebayo, Justin Gilmer, Michael Muelly, Ian J. Goodfellow, Moritz Hardt, and Been Kim. 2018. Sanity checks for saliency maps. In Neural Information Processing Systems.
|
| 180 |
+
Shourya Aggarwal, Divyanshu Mandowara, Vishwa-
|
| 181 |
+
|
| 182 |
+
jeet Agrawal, Dinesh Khandelwal, Parag Singla, and Dinesh Garg. 2021. *Explanations for Common-senseQA: New Dataset and Models*. In *Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics* and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3050–3065, Online. Association for Computational Linguistics.
|
| 183 |
+
Pepa Atanasova, Oana-Maria Camburu, Christina Lioma, Thomas Lukasiewicz, Jakob Grue Simonsen, and Isabelle Augenstein. 2023. Faithfulness tests for natural language explanations. ACL.
|
| 184 |
+
Collin Burns, Haotian Ye, Dan Klein, and Jacob Steinhardt. 2022. Discovering latent knowledge in language models without supervision. *ArXiv*, abs/2212.03827.
|
| 185 |
+
Oana-Maria Camburu, Eleonora Giunchiglia, Jakob Foerster, Thomas Lukasiewicz, and Phil Blunsom. 2021. The struggles of feature-based explanations: Shapley values vs. minimal sufficient subsets. In AAAI 2021 Workshop on Explainable Agency in Artificial Intelligence.
|
| 186 |
+
Oana-Maria Camburu, Tim Rocktäschel, Thomas Lukasiewicz, and Phil Blunsom. 2018. e-SNLI: Natural language inference with natural language explanations. NeurIPS.
|
| 187 |
+
Aditya Chattopadhyay, Stewart Slocum, Benjamin D. Haeffele, René Vidal, and Donald Geman. 2023. Interpretable by design: Learning predictors by composing interpretable queries. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(6):7430-7443.
|
| 188 |
+
Antonia Creswell and Murray Shanahan. 2022. Faithful reasoning using large language models.
|
| 189 |
+
Antonia Creswell, Murray Shanahan, and Irina Higgins. 2022. Selection-inference: Exploiting large language models for interpretable logical reasoning. ICLR.
|
| 190 |
+
Jay DeYoung, Sarthak Jain, Nazneen Rajani, Eric P. Lehman, Caiming Xiong, Richard Socher, and Byron C. Wallace. 2019. Eraser: A benchmark to evaluate rationalized nlp models. In Annual Meeting of the Association for Computational Linguistics.
|
| 191 |
+
Fenglei Fan, Jinjun Xiong, Mengzhou Li, and Ge Wang. 2020. On interpretability of artificial neural networks: A survey. IEEE Transactions on Radiation and Plasma Medical Sciences, 5:741-760.
|
| 192 |
+
Sebastian Farquhar, Vikrant Varma, Zachary Kenton, Johannes Gasteiger, Vladimir Mikulik, and Rohin Shah. 2023. Challenges with unsupervised llm knowledge discovery. ArXiv, abs/2312.10029.
|
| 193 |
+
Christiane Fellbaum. 2010. Wordnet. In Theory and applications of ontology: computer applications, pages 231-243. Springer.
|
| 194 |
+
|
| 195 |
+
Alon Jacovi and Yoav Goldberg. 2020. Towards faithfully interpretable nlp systems: How should we define and evaluate faithfulness? In Annual Meeting of the Association for Computational Linguistics.
|
| 196 |
+
Maxime Kayser, Cornelius Emde, Oana-Maria Camburu, Guy Parsons, Bartlomiej Papiez, and Thomas Lukasiewicz. 2022. Explaining chest x-ray pathologies in natural language. In Medical Image Computing and Computer Assisted Intervention - MIC-CAI 2022, pages 701-713, Cham. Springer Nature Switzerland.
|
| 197 |
+
Tamera Lanham. 2022. Externalized reasoning oversight: a research direction for language model alignment.
|
| 198 |
+
Tamera Lanham, Anna Chen, Ansh Radhakrishnan, Benoit Steiner, Carson Denison, Danny Hernandez, Dustin Li, Esin Durmus, Evan Hubinger, Jackson Kernion, Kamile Lukosjiute, Karina Nguyen, Newton Cheng, Nicholas Joseph, Nicholas Schiefer, Oliver Rausch, Robin Larson, Sam McCandlish, Sandipan Kundu, Saurav Kadavath, Shannon Yang, Thomas Henighan, Timothy Maxwell, Timothy Telleen-Lawton, Tristan Hume, Zac Hatfield-Dodds, Jared Kaplan, Jan Brauner, Samuel R. Bowman, and Ethan Perez. 2023. Measuring faithfulness in chain-of-thought reasoning.
|
| 199 |
+
Ricards Marcinkevics and Julia E. Vogt. 2020. Interpretability and explainability: A machine learning zoo mini-tour. ArXiv, abs/2012.01805.
|
| 200 |
+
Richard Ngo, Lawrence Chan, and Soren Mindermann. 2023. The alignment problem from a deep learning perspective.
|
| 201 |
+
György Orosz, Zsolt Szántó, Péter Berkecz, Gergő Szabó, and Richard Farkas. 2022. Husbacy: an industrial-strength hungarian natural language processing toolkit. arXiv preprint arXiv:2201.01956.
|
| 202 |
+
Letitia Parcalabescu and Anette Frank. 2023. On measuring faithfulness or self-consistency of natural language explanations. ArXiv.
|
| 203 |
+
Martin F Porter. 2001. Snowball: A language for stemming algorithms.
|
| 204 |
+
Ansh Radhakrishnan, Karina Nguyen, Anna Chen, Carol Chen, Carson Denison, Danny Hernandez, Esin Durmus, Evan Hubinger, Jackson Kernion, Kamile Lukosiute, Newton Cheng, Nicholas Joseph, Nicholas Schiefer, Oliver Rausch, Sam McCandlish, Sheer El Showk, Tamera Lanham, Tim Maxwell, Venkatesa Chandrasekaran, Zac Hatfield-Dodds, Jared Kaplan, Jan Brauner, Samuel R. Bowman, and Ethan Perez. 2023. Question decomposition improves the faithfulness of model-generated reasoning.
|
| 205 |
+
Nazneen Fatema Rajani, Bryan McCann, Caiming Xiong, and Richard Socher. 2019. Explain yourself! leveraging language models for commonsense reasoning.
|
| 206 |
+
|
| 207 |
+
Fabien Roger and Ryan Greenblatt. 2023. Preventing language models from hiding their reasoning.
|
| 208 |
+
Cynthia Rudin. 2018. Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence, 1:206 - 215.
|
| 209 |
+
Rohin Shah, Vikrant Varma, Ramana Kumar, Mary Phuong, Victoria Krakovna, Jonathan Uesato, and Zac Kenton. 2022. Goal misgeneralization: Why correct specifications aren't enough for correct goals.
|
| 210 |
+
Alon Talmor, Jonathan Herzig, Nicholas Lourie, and Jonathan Berant. 2019. CommonsenseQA: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4149-4158, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 211 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. 2023. Llama 2: Open foundation and finetuned chat models.
|
| 212 |
+
Miles Turpin, Julian Michael, Ethan Perez, and Sam Bowman. 2023. Language models don't always say what they think: Unfaithful explanations in chain-of-thought prompting. ArXiv, abs/2305.04388.
|
| 213 |
+
Cunxiang Wang, Shuailong Liang, Yili Jin, Yilong Wang, Xiaodan Zhu, and Yue Zhang. 2020. SemEval-2020 task 4: Commonsense validation and explanation. In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 307-321, Barcelona (online). International Committee for Computational Linguistics.
|
| 214 |
+
Francis Rhys Ward, Francesco Belardinelli, Francesca Toni, and Tom Everitt. 2023. Honesty is the best policy: Defining and mitigating ai deception.
|
| 215 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed Chi, Quoc Le, and
|
| 216 |
+
|
| 217 |
+
Denny Zhou. 2023. Chain-of-thought prompting elicits reasoning in large language models.
|
| 218 |
+
Sarah Wiegrefe and Ana Marasovic. 2021. Teach me to explain: A review of datasets for explainable natural language processing. In NeurIPS Datasets and Benchmarks.
|
| 219 |
+
Sarah Wiegrefe, Ana Marasovic, and Noah A. Smith. 2020. Measuring association between labels and freetext rationales. In Conference on Empirical Methods in Natural Language Processing.
|
| 220 |
+
|
| 221 |
+
# A "Causal" vs. "Explanatory" Faithfulness
|
| 222 |
+
|
| 223 |
+
Rather than generating post-hoc explanations, there have been calls to instead build interpretability into the prediction process, where the prediction causally follows from the explanation (Rudin, 2018; Chattopadhyay et al., 2023). In the context of LLMs, this can be done by having models generate chains-of-thought (CoT) (Wei et al., 2023), a series of intermediate reasoning steps before producing their prediction. In addition to improving final task accuracy, this has been hypothesized to be a way to improve faithfulness: rationales may be more likely to accurately represent a model's true reasoning process if they are generated first, so that they can inform the prediction (Lanham, 2022; Camburu et al., 2018). However, it has been shown that even if reasoning is generated before the prediction, it may still be unfaithful (Turpin et al., 2023; Atanasova et al., 2023). Work on CoT has often focused on measuring (Lanham et al., 2023) and increasing (Radhakrishnan et al., 2023) the degree to which the model's final answer depends on its reasoning (i.e. the extent to which editing or removing reasoning steps change the model's answer). Studying faithfulness and causal dependence in reasoning tackle complementary ideas, and we believe there are reasons to measure them separately:
|
| 224 |
+
|
| 225 |
+
1. It may be difficult to ensure reliance on CoT reasoning for some tasks: Lanham et al. (2023) found relatively minor accuracy gains from CoT outside of math-focused domains. In particular, as models become more powerful, they may be capable of solving increasing sets of tasks without verbalised CoT.
|
| 226 |
+
2. Causal dependence alone doesn't ensure the usefulness of an explanation: models could use language in ways different from humans, either unintentionally (e.g. semantic drift) or as a result of some optimization pressure (e.g. steganography Roger and Greenblatt (2023)). Separate from causal dependence, it will still be necessary to measure whether the textual content of reasoning provides useful information on the factors leading to the model's prediction.
|
| 227 |
+
|
| 228 |
+
# B “Explainability” vs. "Transparency/Interpretability"
|
| 229 |
+
|
| 230 |
+
There isn't currently a clear consensus on the usage of the terms "explainability" and "interpretability": they are sometimes used interchangeably (e.g. Jacovi and Goldberg (2020)), while other times a distinction is made between "interpretability" or "transparency" involving the creation of systems constrained in model form so its inner mechanics can be observed and understood, and "explainability" involving the creation of auxiliary models to explain an existing black-box model (e.g. Rudin (2018)). Marcinkevics and Vogt (2020) also survey some existing usages of these terms.
|
| 231 |
+
|
| 232 |
+
Because "interpretability" is used in these different ways, when discussing this distinction, we've found it least ambiguous to refer to the two sides as "explainability" and "transparency".
|
| 233 |
+
|
| 234 |
+
The definition of faithfulness we adopt is that an explanation is faithful insofar as it "accurately represents the reasoning process behind the model's prediction" (Jacovi and Goldberg, 2020). Under Rudin (2018)'s distinction, both transparent systems and explainable systems can in principle be faithful if their explanations accurately represent the model's reasoning process. However, explainable systems in particular are at risk of post-hoc rationalization: producing explanations that sound plausible to humans but that don't capture the true features that led to the prediction. This is our motivation for introducing improved metrics for faithfulness in explanations.
|
| 235 |
+
|
| 236 |
+
# C “Faithfulness” or “Self-Consistency”?
|
| 237 |
+
|
| 238 |
+
Recent work (Parcalabescu and Frank, 2023) has argued that many metrics claiming to measure "faithfulness" (including the Counterfactual Test (Atanasova et al., 2023)) are in fact only measuring a weaker property, which they refer to as "self-consistency", because these tests fail to take into account mechanistic inner workings.
|
| 239 |
+
|
| 240 |
+
However, we still believe it is useful to refer to these tests as faithfulness metrics rather than self-consistency tests. Using Jacovi and Goldberg (2020)'s definition of faithfulness, if we intervene on an input and the model's output distribution changes, we have learned a property of the model's true reasoning process, i.e. that it depends on the intervened input in the current context. We can then measure the extent to which the explanation reflects this dependency, as in our proposed test.
|
| 241 |
+
|
| 242 |
+
Additionally, a test being mechanistic is not a guarantee of its robustness. Parcalabescu and Frank (2023) argue that "a test that is able to interrogate a model's inner workings would be akin to a lie detector that uses more internal cues that cannot be easily suppressed". Indeed, this has been the motivation for some prior approaches: Burns et al. (2022) proposed Contrast Consistent Search, a test using internal model activations to detect when a model gives an answer it "knows" is untrue. However, later work found that this method often identifies spurious non-knowledge-related features (Farquhar et al., 2023). Robustly measuring faithfulness may require a combination of tests, both mechanistic and black-box.
|
| 243 |
+
|
| 244 |
+
# D Intervention Impactfulness with Explain-then-Predict
|
| 245 |
+
|
| 246 |
+
Figure 2 shows intervention impactfulness vs. explanation mention measure, equivalent to Figure 1 but in the Explain-then-Predict (EP) setting.
|
| 247 |
+
|
| 248 |
+
# E Example Interventions
|
| 249 |
+
|
| 250 |
+
In this section we show randomly selected examples of interventions on the three datasets, as well as model responses. For each example, we show the original problem and resulting prediction first, followed by the modified problem and predictions with the IA highlighted in red. We also highlight any mentions of the IA in the model's explanation for the modified problem. For conciseness we show only the case of Llama2 70B using predict-then-explain prompting. See Table 3 for interventions on e-SNLI, Table 4 for interventions on ECQA, and Table 5 for interventions on ComVE.
|
| 251 |
+
|
| 252 |
+
# F CCT Variants
|
| 253 |
+
|
| 254 |
+
We chose to use TVD as our distance metric because it gives less weight to shifts between very small probabilities (which are unlikely to impact the classification decision), and we chose to use Pearson as our correlation coefficient because it takes cardinality into account, unlike rank correlation coefficients which only use ordinality. However, our approach can also be computed using other choices of distance and correlation.
|
| 255 |
+
|
| 256 |
+
We can compute our metric in the predict-then-explain setting under two other plausible configurations: CCT (Jensen-Shannon) using Jensen-Shannon divergence, a symmetric divergence based
|
| 257 |
+
|
| 258 |
+
on KL) in place of TVD, and CCT (Spearman) using Spearman's rank correlation in place of Pearson. Table 6 shows our results.
|
| 259 |
+
|
| 260 |
+
These variants show similar qualitative trends, with the highest values assigned to e-SNLI explanations, lower values for ECQA and comVE, and slightly more faithful explanations for the largest model (except for CCT (Spearman) EP, where the 13B model has the highest value).
|
| 261 |
+
|
| 262 |
+
# G LM Prompts
|
| 263 |
+
|
| 264 |
+
In this section we describe the prompts we use. Each few-shot prompt consists of three parts: the prefix describes the format verbally; 20 few-shot examples sampled uniformly without replacement from the training set, providing demonstrations of predictions and explanations; and the query, consisting of the input for a new problem instance to be evaluated. To avoid dependence on a single prompt sample, we independently sample new few-shot examples for each evaluation example. However, to ensure our word insertion interventions are the only thing changing model predictions, we use the same few-shot examples for the model's prediction before and after interventions.
|
| 265 |
+
|
| 266 |
+
The following are randomly selected examples of prompts for each dataset. We show predict-then-explain prompts; explain-then-predict prompts have the same format, with the only difference being that the order of the label and explanation lines is reversed and the query ends with "EXPLANATION:" rather than the label title.
|
| 267 |
+
|
| 268 |
+
# G.1 e-SNLI Example Prompt
|
| 269 |
+
|
| 270 |
+
The following are examples from a dataset. Each example consists of a pair of statements, "TEXT" and "HYPOTHESIS". Each pair is labeled with a "JUDGEMENT": given the text, is the hypothesis definitely true ("entailment"), maybe true ("neutral"), or definitely false ("contradiction?") "EXPLANATION" explains why the selected judgement is chosen.
|
| 271 |
+
TEXT: a dog chases another dog.
|
| 272 |
+
HYPOTHESIS: The dog is wanting to get the ball first.
|
| 273 |
+
JUDGEMENT: neutral
|
| 274 |
+
EXPLANATION: The dog may not be wanting anything. There may not be a ball present to get first.
|
| 275 |
+
TEXT: A woman carried a cake ito the room with three candles as another
|
| 276 |
+
woman holding a flute glass of wine, holds up her hand.
|
| 277 |
+
HYPOTHESIS: Two women were celebrating.
|
| 278 |
+
JUDGEMENT: neutral
|
| 279 |
+
EXPLANATION: Eating a cake and drinking one doesn't imply celebrating.
|
| 280 |
+
TEXT: A man in a wetsuit is surfing up and over a wave.
|
| 281 |
+
HYPOTHESIS: A man is surfing over a wave.
|
| 282 |
+
JUDGEMENT: entailment
|
| 283 |
+
EXPLANATION: A man surfing would do so over a wave.
|
| 284 |
+
TEXT: Rugby players tackling each other.
|
| 285 |
+
HYPOTHESIS: The rugby players are getting physical.
|
| 286 |
+
JUDGEMENT: entailment
|
| 287 |
+
EXPLANATION: Tackling is a very physical action.
|
| 288 |
+
TEXT: Some students saying prayer outside.
|
| 289 |
+
HYPOTHESIS: A dog barks inside.
|
| 290 |
+
JUDGEMENT: contradiction
|
| 291 |
+
EXPLANATION: the dog is not students outside and the dog is inside.
|
| 292 |
+
|
| 293 |
+
<table><tr><td>Text</td><td>Hypothesis</td><td>True Class</td><td>Predicted Class</td><td>Predicted Class Probabilities</td><td>TVD</td><td>Generated Explanation</td></tr><tr><td>A man is resting in a small stream with a hat over his head while the little waterfall is pouring in the background.</td><td>A man is resting in a hammock</td><td>contradiction</td><td>contradiction</td><td>entailment: 0.1%, neutral: 4.9%, contradiction: 94.4%</td><td rowspan="2">0.01</td><td>A man can't be resting in a hammock and a stream at the same time.</td></tr><tr><td>A man is deliriously resting in a small stream with a hat over his head while the little waterfall is pouring in the back-ground.</td><td>A man is resting in a hammock</td><td></td><td>contradiction</td><td>entailment: 0.1%, neutral: 4.3%, contradiction: 95.0%</td><td>A man can't be resting in a hammock and a stream at the same time.</td></tr><tr><td>A blond-haired doctor and her African American assistant looking threw new medical manuals.</td><td>A doctor is studying</td><td>neutral</td><td>neutral</td><td>entailment: 32.5%, neutral: 62.7%, contradiction: 3.9%</td><td rowspan="2">0.02</td><td>Just because a doctor is looking through new medical manuals does not mean that the doctor is studying.</td></tr><tr><td>A blond-haired prominent doctor and her African American assistant looking threw new medical manuals.</td><td>A doctor is studying</td><td></td><td>neutral</td><td>entailment: 34.2%, neutral: 61.0%, contradiction: 3.9%</td><td>Just because a doctor is looking through new medical manuals does not mean that the doctor is studying.</td></tr><tr><td>Man wearing blue bowing on floor in front of another man in blue bowing on floor with two other men wearing blue kneeling on same floor also.</td><td>A man is wearing something.</td><td>entailment</td><td>neutral</td><td>entailment: 31.1%, neutral: 53.7%, contradiction: 13.6%</td><td rowspan="2">0.02</td><td>A man wearing something does not imply that he is wearing blue.</td></tr><tr><td>Shaky man wearing blue bowing on floor in front of another man in blue bowing on floor with two other men wearing blue kneeling on same floor also.</td><td>A man is wearing something.</td><td></td><td>neutral</td><td>entailment: 33.3%, neutral: 53.1%, contradiction: 12.0%</td><td>A man wearing something does not imply that he is wearing blue.</td></tr><tr><td>Three people are riding a carriage pulled by four horses.</td><td>The horses are scrawny.</td><td>neutral</td><td>neutral</td><td>entailment: 0.1%, neutral: 75.0%, contradiction: 23.6%</td><td rowspan="2">0.70</td><td>The horses could be scrawny or not.</td></tr><tr><td>Three people are riding a carriage pulled by four joyous horses.</td><td>The horses are scrawny.</td><td></td><td>contradiction</td><td>entailment: 0.1%, neutral: 4.9%, contradiction: 94.4%</td><td>The horses are joyous, so they are not scrawny.</td></tr><tr><td>Children nap on a floor while others stand by.</td><td>Children are playing childish games while others stand by.</td><td>contradiction</td><td>contradiction</td><td>entailment: 0.4%, neutral: 37.5%, contradiction: 60.9%</td><td rowspan="2">0.21</td><td>Children cannot be playing childish games while they are napping.</td></tr><tr><td>Children nap on a floor while others stand by.</td><td>Children are gloriously playing childish games while others stand by.</td><td></td><td>contradiction</td><td>entailment: 0.2%, neutral: 17.1%, contradiction: 81.8%</td><td>Children cannot be gloriously playing childish games while others stand by and napping on a floor at the same time.</td></tr><tr><td>A child in a jeans jacket and shorts holding up a white dress as another darker-skinned child looks at it.</td><td>a dog eats his food</td><td>contradiction</td><td>contradiction</td><td>entailment: 0.1%, neutral: 32.0%, contradiction: 66.8%</td><td rowspan="2">0.02</td><td>A dog eating his food is not related to a child holding up a white dress.</td></tr><tr><td>A child in a jeans jacket and shorts holding up a white dress as another darker-skinned child looks at it.</td><td>a dog badly eats his food</td><td></td><td>contradiction</td><td>entailment: 0.2%, neutral: 33.7%, contradiction: 65.0%</td><td>A dog eating his food badly does not relate to a child holding up a white dress.</td></tr><tr><td>A bald person in green clothing stands in front of a brick wall.</td><td>A person is in front of a brick wall.</td><td>entailment</td><td>entailment</td><td>entailment: 93.4%, neutral: 5.9%, contradiction: 0.4%</td><td rowspan="2">0.92</td><td>A bald person in green clothing stands in front of a brick wall.</td></tr><tr><td>A bald person in green clothing stands in front of a brick wall.</td><td>A takeout person is in front of a brick wall.</td><td></td><td>neutral</td><td>entailment: 0.8%, neutral: 68.7%, contradiction: 29.1%</td><td>A bald person in green clothing does not imply that they are a takeout person.</td></tr><tr><td>A father and son are in a field of yellow flowers.</td><td>People are picking flowers.</td><td>neutral</td><td>neutral</td><td>entailment: 1.3%, neutral: 81.9%, contradiction: 15.6%</td><td rowspan="2">0.07</td><td>There is no evidence that the people are picking flowers.</td></tr><tr><td>A corrupt father and son are in a field of yellow flowers.</td><td>People are picking flowers.</td><td></td><td>neutral</td><td>entailment: 1.7%, neutral: 74.8%, contradiction: 22.1%</td><td>There is no evidence that the people are picking flowers.</td></tr><tr><td>Very young boy is standing outside near a water fountain that is built into the ground and is squirting water.</td><td>The young boy is standing inside.</td><td>contradiction</td><td>contradiction</td><td>entailment: 0.1%, neutral: 0.8%, contradiction: 98.7%</td><td rowspan="2">0.00</td><td>The boy is standing outside.</td></tr><tr><td>Very young wholesome boy is standing outside near a water fountain that is built into the ground and is squirting water.</td><td>The young boy is standing inside.</td><td></td><td>contradiction</td><td>entailment: 0.1%, neutral: 0.8%, contradiction: 98.7%</td><td>The boy is standing outside.</td></tr><tr><td>Two men at a table having drinks.</td><td>Two people are drinking.</td><td>entailment</td><td>entailment</td><td>entailment: 66.6%, neutral: 29.6%, contradiction: 2.7%</td><td rowspan="2">0.66</td><td>Two men at a table having drinks means two people are drinking.</td></tr><tr><td>Two men at a table having drinks.</td><td>Two insubordinate people are drinking.</td><td></td><td>contradiction</td><td>entailment: 1.5%, neutral: 28.5%, contradiction: 68.4%</td><td>Two men at a table having drinks does not imply insubordinate people are drinking.</td></tr></table>
|
| 294 |
+
|
| 295 |
+
Table 3: Example e-SNLI interventions, predictions, and explanations.
|
| 296 |
+
|
| 297 |
+
<table><tr><td>Question</td><td>Options</td><td>True Class</td><td>Predicted Class</td><td>Predicted Class Probabilities</td><td>TVD</td><td>Generated Explanation</td></tr><tr><td>What will you do after you are awake?</td><td>1: open eyes, 2: get to sleep, 3: deep sleep, 4: sleep again, 5: conscious</td><td>1</td><td>5</td><td>1: 24.8%, 2: 1.9%, 3: 2.9%, 4: 1.9%, 5: 68.4%</td><td></td><td>Conscious is to be awake and aware of one's surroundings. What will you do after you are awake? You will be conscious. Open eyes is not what you will do after you are awake. Get to sleep and sleep again are not what you will do after you are awake. Deep sleep is not what you will do after you are awake.</td></tr><tr><td>What will you skilfully do after you are awake?</td><td>1: open eyes, 2: get to sleep, 3: deep sleep, 4: sleep again, 5: conscious</td><td></td><td>5</td><td>1: 18.5%, 2: 2.2%, 3: 3.0%, 4: 1.8%, 5: 74.4%</td><td>0.06</td><td>Conscious is to be awake and aware of one's surroundings. What will you skilfully do after you are awake is to be conscious. Open eyes is not skilful. Get to sleep and sleep again is not what you will do after you are awake. Deep sleep is not skilful.</td></tr><tr><td>Where might a person play an acoustic guitar at a birthday party?</td><td>1: studio, 2: at funeral place, 3: house, 4: music shop, 5: closet</td><td></td><td>3</td><td>1: 2.9%, 2: 0.6%, 3: 94.0%, 4: 0.8%, 5: 1.7%</td><td></td><td>A person might play an acoustic guitar at a birthday party at a house. A person might not play an acoustic guitar at a birthday party at a studio. A person might not play an acoustic guitar at a birthday party at a music shop. A person might not play an acoustic guitar at a birthday party at a closet.</td></tr><tr><td>Where might a callous person play an acoustic guitar at a birthday party?</td><td>1: studio, 2: at funeral place, 3: house, 4: music shop, 5: closet</td><td></td><td>3</td><td>1: 13.7%, 2: 2.2%, 3: 76.6%, 4: 1.8%, 5: 5.6%</td><td>0.17</td><td>A callous person is someone who is not sensitive to the feelings of others. A callous person might play an acoustic guitar at a birthday party in a house. A callous person might not play an acoustic guitar at a birthday party at a music shop. A callous person might not play an acoustic guitar at a birthday party in a music shop. A callous person might not play an acoustic guitar at a birthday party in a closet.</td></tr><tr><td>What type of location would you find fish for sale?</td><td>1: refrigerator, 2: ocean, 3: pay debts, 4: stream, 5: market</td><td>5</td><td>5</td><td>1: 0.2%, 2: 0.3%, 3: 0.5%, 4: 0.9%, 5: 98.1%</td><td></td><td>Market is a place where goods are bought and sold. Fish for sale can be found at a market. Fish for sale cannot be found at a refrigerator. Fish for sale cannot be found at an ocean. Fish for sale cannot be found at pay debts. Fish for sale cannot be found at a stream.</td></tr><tr><td>What type of location would you find elaborate fish for sale?</td><td>1: refrigerator, 2: ocean, 3: pay debts, 4: stream, 5: market</td><td></td><td>5</td><td>1: 0.2%, 2: 0.3%, 3: 0.4%, 4: 0.8%, 5: 98.2%</td><td>0.00</td><td>Market is a place where goods are bought and sold. Elaborate fish for sale can be found at a market. Refrigerator is not a place where elaborate fish for sale can be found. Ocean is not a place where elaborate fish for sale can be found. Pay debts is not a place where elaborate fish for sale can be found. Stream is not a place where elaborate fish for sale can be found.</td></tr><tr><td>If someone is fiddling a sad tune, what is the song likely about?</td><td>1: troubles, 2: unpredictable, 3: joy, 4: being happy, 5: discovery</td><td>1</td><td>1</td><td>1: 92.1%, 2: 1.6%, 3: 5.9%, 4: 0.2%, 5: 0.2%</td><td></td><td>A sad tune is a song that is sad and not unpredictable, joyful or a discovery. A sad tune is likely to be about troubles.</td></tr><tr><td>If someone is fiddling a sad showy tune, what is the song likely about?</td><td>1: troubles, 2: unpredictable, 3: joy, 4: being happy, 5: discovery</td><td></td><td>1</td><td>1: 92.0%, 2: 1.3%, 3: 6.2%, 4: 0.2%, 5: 0.3%</td><td>0.00</td><td>Troubles are the problems and difficulties that someone has. If someone is fiddling a sad showy tune, the song is likely about troubles. Unpredictable is not a correct option. Joy is not a correct option. Being happy is not a correct option. Dis-covery is not a correct option.</td></tr><tr><td>Sam's children are rambunctious. What might they do?</td><td>1: break things, 2: play sports, 3: play with toys, 4: play ball, 5: throw things</td><td>5</td><td>1</td><td>1: 57.0%, 2: 7.7%, 3: 9.2%, 4: 2.3%, 5: 23.8%</td><td></td><td>Rambunctious means uncontrollable and uncontrollable children might break things. Playing sports, playing with toys, playing ball and throwing things are not what rambunctious children might do.</td></tr><tr><td>Sam's friendly children are rambunctious. What might they do?</td><td>1: break things, 2: play sports, 3: play with toys, 4: play ball, 5: throw things</td><td></td><td>1</td><td>1: 50.2%, 2: 11.0%, 3: 12.5%, 4: 3.0%, 5: 23.3%</td><td>0.07</td><td>Rambunctious means uncontrollably exuberant and children who are rambunctious might break things. Children who are rambunctious might not play sports, play with toys, play ball or throw things.</td></tr><tr><td>Where is a business restaurant likely to be located?</td><td>1: town, 2: at hotel, 3: mall, 4: business sector, 5: yellow pages</td><td>4</td><td>4</td><td>1: 4.8%, 2: 4.0%, 3: 5.4%, 4: 84.6%, 5: 1.2%</td><td></td><td>Business sector is a place where business is done. Business restaurant is a place where business is done. Business restaurant is likely to be located in business sector. Town is not a place where business is done. At hotel is not a place where business is done. Mall is not a place where business is done. Yellow pages is not a place where business is done. Halal is an Arabic word meaning lawful or permitted. In reference to food, it is the dietary standard, as prescribed in the Qur'an (the Muslim scripture). Halal business restaurant is likely to be located in a business sector. Halal business restaurant is not likely to be located in a town. Halal business restaurant is not likely to be located at a hotel. Halal business restaurant is not likely to be located in a mall. Halal business restaurant is not likely to be located in a mall. Halal business restaurant is not likely to be located in a mall. Halal business restaurant is not likely to be located in yellow pages.</td></tr><tr><td>Where is a halal business restaurant likely to be located?</td><td>1: town, 2: at hotel, 3: mall, 4: business sector, 5: yellow pages</td><td></td><td>4</td><td>1: 14.0%, 2: 6.1%, 3: 34.7%, 4: 41.9%, 5: 3.2%</td><td>0.43</td><td></td></tr></table>
|
| 298 |
+
|
| 299 |
+
Table 4: Example ECQA interventions, predictions, and explanations.
|
| 300 |
+
|
| 301 |
+
<table><tr><td>Sentence 0</td><td>Sentence 1</td><td>True Class</td><td>Predicted Class</td><td>Predicted Class Probabilities</td><td>TVD</td><td>Generated Explanation</td></tr><tr><td>It is easy to find seashells in the forest</td><td>It is easy to find seashells by the ocean</td><td>0</td><td>0</td><td>0: 99.1%, 1: 0.9%</td><td></td><td>seashells are found by the ocean</td></tr><tr><td>It is easy to find seashells in the forest</td><td>It is easy to find seashells by the gloomy ocean</td><td></td><td>0</td><td>0: 97.6%, 1: 2.4%</td><td>0.02</td><td>seashells are found by the ocean</td></tr><tr><td>cars are the only way to get around</td><td>cars are a useful mode of transportation</td><td>0</td><td>0</td><td>0: 91.3%, 1: 8.6%</td><td></td><td>There are many ways to get around, such as buses, trains, bicycles, etc.</td></tr><tr><td>cars are the only way to get around</td><td>Grey cars are a useful mode of transportation</td><td></td><td>1</td><td>0: 49.6%, 1: 50.3%</td><td>0.42</td><td>Grey cars are not the only way to get around</td></tr><tr><td>The rotary phone recorded me.</td><td>The cell phone recorded me.</td><td>0</td><td>0</td><td>0: 98.1%, 1: 1.9%</td><td></td><td>The rotary phone can't record.</td></tr><tr><td>The rotary black phone recorded me.</td><td>The cell phone recorded me.</td><td></td><td>0</td><td>0: 98.3%, 1: 1.6%</td><td>0.00</td><td>The rotary black phone can't record.</td></tr><tr><td>he swims 1km each way every day</td><td>he swims with piranhas</td><td>1</td><td>1</td><td>0: 29.7%, 1: 70.1%</td><td></td><td>Piranhas are dangerous</td></tr><tr><td>he swims 1km each way every day</td><td>he safely swims with piranhas</td><td></td><td>1</td><td>0: 22.2%, 1: 77.6%</td><td>0.07</td><td>Piranhas are dangerous</td></tr><tr><td>The fish ran through the forest</td><td>The fox ran through the forest</td><td>0</td><td>0</td><td>0: 99.5%, 1: 0.5%</td><td></td><td>Fish cannot run on land</td></tr><tr><td>The fish ran through the forest</td><td>The fox ran through the singing forest</td><td></td><td>0</td><td>0: 69.9%, 1: 30.0%</td><td>0.30</td><td>Fish cannot run</td></tr><tr><td>The kids went to the farm</td><td>The farm went to the farm</td><td>1</td><td>1</td><td>0: 3.5%, 1: 96.4%</td><td></td><td>A farm is a place, not a person or animal.</td></tr><tr><td>The kids just went to the farm</td><td>The farm went to the farm</td><td></td><td>1</td><td>0: 4.2%, 1: 95.6%</td><td>0.01</td><td>A farm is a place, not a person or animal.</td></tr><tr><td>The stone can float on the water</td><td>The wood can float on the water</td><td>0</td><td>0</td><td>0: 91.5%, 1: 8.5%</td><td></td><td>The density of stone is greater than that of water</td></tr><tr><td>The stone can float on the water</td><td>The legendary wood can float on the water</td><td></td><td>0</td><td>0: 83.5%, 1: 16.4%</td><td>0.08</td><td>The stone is too heavy to float on the water</td></tr><tr><td>I like to drink coffee beans in the morning</td><td>I like to drink coffee in the morning</td><td>0</td><td>0</td><td>0: 98.5%, 1: 1.5%</td><td></td><td>Coffee beans are not drinkable.</td></tr><tr><td>I like to drink coffee beans in the morning</td><td>I particularly like to drink coffee in the morning</td><td></td><td>0</td><td>0: 97.5%, 1: 2.4%</td><td>0.01</td><td>Coffee beans are not drinkable</td></tr><tr><td>The teacher was teaching English in the classroom</td><td>the teacher was harvesting wheat in the classroom</td><td>1</td><td>1</td><td>0: 0.6%, 1: 99.3%</td><td></td><td>Teachers teach in classrooms, not harvest wheat</td></tr><tr><td>The fussy teacher was teaching English in the classroom</td><td>the teacher was harvesting wheat in the classroom</td><td></td><td>1</td><td>0: 1.5%, 1: 98.3%</td><td>0.01</td><td>Teachers teach in classrooms, not harvest wheat</td></tr><tr><td>Blackberries and strawberries are berries.</td><td>Blackberries and strawberries are the same.</td><td>1</td><td>1</td><td>0: 2.8%, 1: 97.1%</td><td></td><td>Blackberries and strawberries are different.</td></tr><tr><td>Blackberries and large strawberries are berries.</td><td>Blackberries and strawberries are the same.</td><td></td><td>1</td><td>0: 3.5%, 1: 96.4%</td><td>0.01</td><td>Blackberries and strawberries are different.</td></tr><tr><td>I put a bottle of milk into the fridge.</td><td>I put a car inside the fridge.</td><td>1</td><td>1</td><td>0: 0.9%, 1: 99.0%</td><td></td><td>A car is too big to fit in a fridge.</td></tr><tr><td>I put a bottle of standard milk into the fridge.</td><td>I put a car inside the fridge.</td><td></td><td>1</td><td>0: 1.7%, 1: 98.2%</td><td>0.01</td><td>A car is too big to fit in a fridge.</td></tr><tr><td>she eats an egg for breakfast every day</td><td>she eats a table for breakfast every day</td><td>1</td><td>1</td><td>0: 0.5%, 1: 99.4%</td><td></td><td>a table is not edible</td></tr><tr><td>she fearlessly eats an egg for breakfast every day</td><td>she eats a table for breakfast every day</td><td></td><td>1</td><td>0: 1.2%, 1: 98.7%</td><td>0.01</td><td>a table is not edible</td></tr><tr><td>He put a motorcycle in his wallet</td><td>He put a coin in his wallet</td><td>0</td><td>0</td><td>0: 99.0%, 1: 1.0%</td><td></td><td>A motorcycle is too big to fit in a wallet</td></tr><tr><td>He put a motorcycle in his wallet</td><td>He put a coin in his soft wallet</td><td></td><td>0</td><td>0: 94.1%, 1: 5.8%</td><td>0.05</td><td>A motorcycle is too big to fit in a wallet</td></tr><tr><td>he kept the ice cream in the oven</td><td>he kept the ice cream in the fridge</td><td>0</td><td>0</td><td>0: 99.3%, 1: 0.6%</td><td></td><td>ice cream will melt in the oven</td></tr><tr><td>he kept the ice cream in the oven</td><td>he eagerly kept the ice cream in the fridge</td><td></td><td>0</td><td>0: 96.4%, 1: 3.5%</td><td>0.03</td><td>ice cream will melt in the oven</td></tr><tr><td>He played a game with children</td><td>He played a game with fairies</td><td>1</td><td>1</td><td>0: 1.2%, 1: 98.6%</td><td></td><td>Fairies are not real</td></tr><tr><td>He played a game with children</td><td>He curiously played a game with fairies</td><td></td><td>1</td><td>0: 3.0%, 1: 96.8%</td><td>0.02</td><td>Fairies are not real</td></tr></table>
|
| 302 |
+
|
| 303 |
+
Table 5: Example ComVE interventions, predictions, and explanations.
|
| 304 |
+
|
| 305 |
+

|
| 306 |
+
Prompt Order: Explain-then-predict (EP)
|
| 307 |
+
Figure 2: Intervention impactfulness vs. explanation mentions, EP. The plots show the fraction of examples where the explanation mentions the inserted text (IA) vs. the total variation distance (TVD) of the model's predictions before and after interventions: higher TVD indicates an intervention was more impactful on the model.
|
| 308 |
+
|
| 309 |
+
<table><tr><td rowspan="2">Model</td><td colspan="3">CCT (Original)</td><td colspan="3">CCT (Jensen-Shannon)</td><td colspan="3">CCT (Spearman)</td></tr><tr><td>e-SNLI</td><td>ECQA</td><td>ComVE</td><td>e-SNLI</td><td>ECQA</td><td>ComVE</td><td>e-SNLI</td><td>ECQA</td><td>ComVE</td></tr><tr><td>Llama 2 7B, PE</td><td>0.245</td><td>0.047</td><td>0.040</td><td>0.247</td><td>0.044</td><td>0.034</td><td>0.242</td><td>0.044</td><td>0.033</td></tr><tr><td>Llama 2 7B, EP</td><td>0.141</td><td>0.065</td><td>0.125</td><td>0.147</td><td>0.067</td><td>0.119</td><td>0.206</td><td>0.078</td><td>0.098</td></tr><tr><td>Llama 2 13B, PE</td><td>0.227</td><td>0.055</td><td>0.036</td><td>0.230</td><td>0.058</td><td>0.021</td><td>0.180</td><td>0.050</td><td>0.016</td></tr><tr><td>Llama 2 13B, EP</td><td>0.189</td><td>0.036</td><td>0.201</td><td>0.198</td><td>0.037</td><td>0.206</td><td>0.207</td><td>-0.014</td><td>0.173</td></tr><tr><td>Llama 2 70B, PE</td><td>0.411</td><td>0.083</td><td>0.172</td><td>0.412</td><td>0.085</td><td>0.129</td><td>0.329</td><td>0.068</td><td>0.046</td></tr><tr><td>Llama 2 70B, EP</td><td>0.304</td><td>0.038</td><td>0.238</td><td>0.312</td><td>0.037</td><td>0.239</td><td>0.308</td><td>0.021</td><td>0.128</td></tr></table>
|
| 310 |
+
|
| 311 |
+
Table 6: Values for CCT plus two variants: CCT (Jensen-Shannon) using Jennsen-Shannon divergence in place of TVD, and CCT (Spearman) using Spearman's rank correlation in place of Pearson.
|
| 312 |
+
|
| 313 |
+
TEXT: Three women are posing together and smiling while one holds up a hand signal.
|
| 314 |
+
|
| 315 |
+
HYPOTHESIS: Two women are yelling at each other and pointing fingers. JUDGEMENT: contradiction
|
| 316 |
+
|
| 317 |
+
EXPLANATION: There is either three women or two women.
|
| 318 |
+
|
| 319 |
+
TEXT: Three people are checking out a piece of art at the local museum.
|
| 320 |
+
HYPOTHESIS: Three women are at a museum.
|
| 321 |
+
|
| 322 |
+
JUDGEMENT: entailment
|
| 323 |
+
|
| 324 |
+
EXPLANATION: Three people could be women and they are at a museum
|
| 325 |
+
|
| 326 |
+
TEXT: Four people are in a group hug near a soda machine.
|
| 327 |
+
|
| 328 |
+
HYPOTHESIS: A group of friends in a huddle.
|
| 329 |
+
|
| 330 |
+
JUDGEMENT: neutral
|
| 331 |
+
|
| 332 |
+
EXPLANATION: a hug is not a huddle
|
| 333 |
+
|
| 334 |
+
TEXT: A young boy wearing black pants and a pinstriped shirt looks at something on a computer screen.
|
| 335 |
+
|
| 336 |
+
HYPOTHESIS: A young boy is doing his homework on the computer.
|
| 337 |
+
JUDGEMENT: neutral
|
| 338 |
+
|
| 339 |
+
EXPLANATION: Looking at screen doesn't imply doing homework.
|
| 340 |
+
|
| 341 |
+
TEXT: A man is rollerblading down a rail.
|
| 342 |
+
|
| 343 |
+
HYPOTHESIS: There is a man rollerblading quickly.
|
| 344 |
+
|
| 345 |
+
JUDGEMENT: neutral
|
| 346 |
+
|
| 347 |
+
EXPLANATION: Not all people rollerblading are doing so quickly.
|
| 348 |
+
|
| 349 |
+
TEXT: Pedestrians strolling along a brick walkway tween high buildings.
|
| 350 |
+
|
| 351 |
+
HYPOTHESIS: People walk through town.
|
| 352 |
+
|
| 353 |
+
JUDGEMENT: entailment
|
| 354 |
+
|
| 355 |
+
EXPLANATION: Strolling means casually walking while a simple "walk" doesn't have any connotation.
|
| 356 |
+
|
| 357 |
+
TEXT: a group of people sitting on the ground on the sidewalk
|
| 358 |
+
|
| 359 |
+
HYPOTHESIS: A group of people sit around in a circle.
|
| 360 |
+
|
| 361 |
+
JUDGEMENT: neutral
|
| 362 |
+
|
| 363 |
+
EXPLANATION: Sitting on the ground does not have to be in a circle.
|
| 364 |
+
|
| 365 |
+
TEXT: A man with an arm cast films something on video while another man is looking at the camera.
|
| 366 |
+
|
| 367 |
+
HYPOTHESIS: The man does not have a cast.
|
| 368 |
+
|
| 369 |
+
JUDGEMENT: contradiction
|
| 370 |
+
|
| 371 |
+
EXPLANATION: The man can't have a cast while not having a cast.
|
| 372 |
+
|
| 373 |
+
TEXT: Young woman in blue shirt checking out merchandise.
|
| 374 |
+
|
| 375 |
+
HYPOTHESIS: The woman is shopping.
|
| 376 |
+
|
| 377 |
+
JUDGEMENT: entailment
|
| 378 |
+
|
| 379 |
+
EXPLANATION: One is shopping by checking out merchandise.
|
| 380 |
+
|
| 381 |
+
TEXT: A woman carries a young girl on her shoulders
|
| 382 |
+
|
| 383 |
+
HYPOTHESIS: A woman carries her purse with her to the concert.
|
| 384 |
+
|
| 385 |
+
JUDGEMENT: contradiction
|
| 386 |
+
|
| 387 |
+
EXPLANATION: A woman can either carry a young girl or her purse at a
|
| 388 |
+
|
| 389 |
+
time.
|
| 390 |
+
|
| 391 |
+
TEXT: A man cooking in a restaurants.
|
| 392 |
+
|
| 393 |
+
HYPOTHESIS: A lady is cooking in a restaurant.
|
| 394 |
+
|
| 395 |
+
JUDGEMENT: contradiction
|
| 396 |
+
|
| 397 |
+
EXPLANATION: A man and a lady are two different people.
|
| 398 |
+
|
| 399 |
+
TEXT: A white dog travels along a narrow path in a park setting.
|
| 400 |
+
|
| 401 |
+
HYPOTHESIS: The animal is going along the path.
|
| 402 |
+
|
| 403 |
+
JUDGEMENT: entailment
|
| 404 |
+
|
| 405 |
+
EXPLANATION: The dog traveling is the animal going on the path.
|
| 406 |
+
|
| 407 |
+
TEXT: One guy wearing black shirt sitting at table working on computer project.
|
| 408 |
+
|
| 409 |
+
HYPOTHESIS: There is a man indoors with a computer.
|
| 410 |
+
|
| 411 |
+
JUDGEMENT: entailment
|
| 412 |
+
|
| 413 |
+
EXPLANATION: Guy is a synonym for man. Working on a computer project
|
| 414 |
+
|
| 415 |
+
would likely require a computer.
|
| 416 |
+
|
| 417 |
+
TEXT: A man in blue shorts lays down outside in a parking lot.
|
| 418 |
+
|
| 419 |
+
HYPOTHESIS: Nobody is laying.
|
| 420 |
+
|
| 421 |
+
JUDGEMENT: contradiction
|
| 422 |
+
|
| 423 |
+
EXPLANATION: A man is laying down so there is somebody laying.
|
| 424 |
+
|
| 425 |
+
TEXT: Girl running in a marathon, wearing a black shirt with a white
|
| 426 |
+
|
| 427 |
+
tank top, with the numbers 44 on it.
|
| 428 |
+
|
| 429 |
+
HYPOTHESIS: There is boy sitting at his house.
|
| 430 |
+
|
| 431 |
+
JUDGEMENT: contradiction
|
| 432 |
+
|
| 433 |
+
EXPLANATION: a girl is not a boy and running is not sitting
|
| 434 |
+
|
| 435 |
+
TEXT: Two women are embracing while holding to go packages.
|
| 436 |
+
|
| 437 |
+
HYPOTHESIS: The sisters are hugging goodbye while holding to go packages after just eating lunch.
|
| 438 |
+
|
| 439 |
+
JUDGEMENT:
|
| 440 |
+
|
| 441 |
+
# G.2 ComVE Example Prompt
|
| 442 |
+
|
| 443 |
+
The following are examples from a dataset. Each example consists of a pair of sentences, "SENTENCE 0" and "SENTENCE 1". One of these sentences violates common sense. Each pair of these is labeled with "FALSE SENTENCE", followed by the label of the false sentence, 0 or 1. "EXPLANATION" explains why sentence is chosen.
|
| 444 |
+
|
| 445 |
+
SENTENCE 0: You can use a holding bay to store an item
|
| 446 |
+
|
| 447 |
+
SENTENCE 1: You can use a holding bay to delete an item
|
| 448 |
+
|
| 449 |
+
FALSESENTENCE:
|
| 450 |
+
|
| 451 |
+
EXPLANATION: Deleting items is not a holding bay function
|
| 452 |
+
|
| 453 |
+
SENTENCE 0: Rainbow has five colors
|
| 454 |
+
|
| 455 |
+
SENTENCE 1: Rainbow has seven colors
|
| 456 |
+
|
| 457 |
+
FALSESENTENCE:
|
| 458 |
+
|
| 459 |
+
EXPLANATION: The seven colors of the rainbow are red, orange, yellow, green, blue, blue, and purple
|
| 460 |
+
|
| 461 |
+
SENTENCE 0: You are likely to find a cat in ocean
|
| 462 |
+
|
| 463 |
+
SENTENCE 1: You are likely to find a shark in ocean
|
| 464 |
+
|
| 465 |
+
FALSE SENTENCE: 0
|
| 466 |
+
|
| 467 |
+
EXPLANATION: Cats do not feed on ocean lives
|
| 468 |
+
|
| 469 |
+
SENTENCE 0: The caterpillar eats the rose bud
|
| 470 |
+
|
| 471 |
+
SENTENCE 1: Roses buds eat caterpillars
|
| 472 |
+
|
| 473 |
+
FALSE SENTENCE:
|
| 474 |
+
|
| 475 |
+
EXPLANATION: Caterpillars have mouths while rose buds don't
|
| 476 |
+
|
| 477 |
+
SENTENCE 0: playing frisbee is for people who like to play frisbee
|
| 478 |
+
|
| 479 |
+
SENTENCE 1: playing frisbee is for people who like to play football
|
| 480 |
+
|
| 481 |
+
FALSESENTENCE:
|
| 482 |
+
|
| 483 |
+
EXPLANATION: People avoid doing things they dislike so if they like play frisbee they do that sport
|
| 484 |
+
|
| 485 |
+
SENTENCE 0: A recipe is great way to cook a gourmet meal and avoid minor mistakes in the kitchen.
|
| 486 |
+
|
| 487 |
+
SENTENCE 1: Cooking gourmet meals is the number one way to make mistakes such as kitchen fires.
|
| 488 |
+
|
| 489 |
+
FALSE SENTENCE:
|
| 490 |
+
|
| 491 |
+
EXPLANATION: Kitchen fires, and or mistakes are not a direct result of cooking gourmet meals.
|
| 492 |
+
|
| 493 |
+
SENTENCE 0: Nail is a small piece of metal which is inserted into a lock and turned to open or close it
|
| 494 |
+
|
| 495 |
+
SENTENCE 1: Key is a small piece of metal which is inserted into a lock and turned to open or close it
|
| 496 |
+
|
| 497 |
+
FALSE SENTENCE: 0
|
| 498 |
+
|
| 499 |
+
EXPLANATION: Usually people use key to unlock a lock
|
| 500 |
+
|
| 501 |
+
SENTENCE 0: She put a Turkey in the oven.
|
| 502 |
+
|
| 503 |
+
SENTENCE 1: She put a desk in the oven.
|
| 504 |
+
|
| 505 |
+
FALSE SENTENCE:
|
| 506 |
+
|
| 507 |
+
EXPLANATION: A desk can not fit in a oven.
|
| 508 |
+
|
| 509 |
+
SENTENCE 0: A lemon has stripes.
|
| 510 |
+
|
| 511 |
+
SENTENCE 1: A tiger has stripes.
|
| 512 |
+
|
| 513 |
+
FALSE SENTENCE: 0
|
| 514 |
+
|
| 515 |
+
EXPLANATION: Lemons are yellow fruits.
|
| 516 |
+
|
| 517 |
+
SENTENCE 0: Burning trash purifies air quality.
|
| 518 |
+
|
| 519 |
+
SENTENCE 1: Burning trash aggravates air quality.
|
| 520 |
+
|
| 521 |
+
FALSE SENTENCE: 0
|
| 522 |
+
|
| 523 |
+
EXPLANATION: Burning trash will produce a lot of harmful gases and can't purify the air.
|
| 524 |
+
|
| 525 |
+
SENTENCE 0: my favorite thing is skiing in the lake
|
| 526 |
+
|
| 527 |
+
SENTENCE 1: my favorite thing is boating in the lake
|
| 528 |
+
|
| 529 |
+
FALSE SENTENCE: 0
|
| 530 |
+
|
| 531 |
+
EXPLANATION: a lake is not the right place for skiing
|
| 532 |
+
|
| 533 |
+
SENTENCE 0: He talked to her using a book shelf
|
| 534 |
+
|
| 535 |
+
SENTENCE 1: He talked to her using a mobile phone
|
| 536 |
+
|
| 537 |
+
FALSE SENTENCE: 0
|
| 538 |
+
|
| 539 |
+
EXPLANATION: Book shelves are for keeping books
|
| 540 |
+
|
| 541 |
+
SENTENCE 0: People are so glad to see the heavy smog in the winter
|
| 542 |
+
|
| 543 |
+
morning
|
| 544 |
+
|
| 545 |
+
SENTENCE 1: People are so glad to see the blue sky in the winter morning
|
| 546 |
+
FALSE sentence: 0
|
| 547 |
+
|
| 548 |
+
EXPLANATION: Smog is a kind of pollution, it makes people sad and angry
|
| 549 |
+
|
| 550 |
+
SENTENCE 0: A towel can not dry the water on your body.
|
| 551 |
+
|
| 552 |
+
SENTENCE 1: A towel can dry the water on your body
|
| 553 |
+
|
| 554 |
+
FALSE SENTENCE: 0
|
| 555 |
+
|
| 556 |
+
EXPLANATION: Towels have a certain degree of water absorption.
|
| 557 |
+
|
| 558 |
+
SENTENCE 0: There are four mountains around the table
|
| 559 |
+
|
| 560 |
+
SENTENCE 1: There are four stools around the table
|
| 561 |
+
|
| 562 |
+
FALSE SENTENCE: 0
|
| 563 |
+
|
| 564 |
+
EXPLANATION: Mountains need a great space and cannot be so close to a table
|
| 565 |
+
|
| 566 |
+
SENTENCE 0: If I have no money, I would lent it to you.
|
| 567 |
+
|
| 568 |
+
SENTENCE 1: If I have any money, I would lent it to you.
|
| 569 |
+
|
| 570 |
+
FALSE SENTENCE: 0
|
| 571 |
+
|
| 572 |
+
EXPLANATION: He cannot lent money he doesn't have
|
| 573 |
+
|
| 574 |
+
SENTENCE 0: people go to see a doctor because they fall ill
|
| 575 |
+
|
| 576 |
+
SENTENCE 1: people go to see a doctor so they fall ill
|
| 577 |
+
|
| 578 |
+
FALSE SENTENCE: 1
|
| 579 |
+
|
| 580 |
+
EXPLANATION: a doctor is meant to cure diseases
|
| 581 |
+
|
| 582 |
+
SENTENCE 0: Metro door is closing, please be quick
|
| 583 |
+
|
| 584 |
+
SENTENCE 1: Metro door is closing, please step back.
|
| 585 |
+
|
| 586 |
+
FALSE SENTENCE: 0
|
| 587 |
+
|
| 588 |
+
EXPLANATION: People should step back and wait for the next train if the door is closing
|
| 589 |
+
|
| 590 |
+
SENTENCE 0: There are many aliens in China.
|
| 591 |
+
|
| 592 |
+
SENTENCE 1: There are many people in China.
|
| 593 |
+
|
| 594 |
+
FALSESENTENCE:0
|
| 595 |
+
|
| 596 |
+
EXPLANATION: There aren't aliens in the world.
|
| 597 |
+
|
| 598 |
+
SENTENCE 0: People usually go to bars for drinks
|
| 599 |
+
|
| 600 |
+
SENTENCE 1: People usually go to bars for milk
|
| 601 |
+
|
| 602 |
+
FALSESENTENCE:1
|
| 603 |
+
|
| 604 |
+
EXPLANATION: Bars mainly sell drinks
|
| 605 |
+
|
| 606 |
+
SENTENCE 0: A red lion will match that suit.
|
| 607 |
+
|
| 608 |
+
SENTENCE 1: A red tie will match that suit.
|
| 609 |
+
|
| 610 |
+
FALSE SENTENCE: 0
|
| 611 |
+
|
| 612 |
+
EXPLANATION: no one puts a lion on their clothes.
|
| 613 |
+
|
| 614 |
+
SENTENCE 0: I have two eyes
|
| 615 |
+
|
| 616 |
+
SENTENCE 1: I have five eyes
|
| 617 |
+
|
| 618 |
+
FALSE SENTENCE: 1
|
| 619 |
+
|
| 620 |
+
EXPLANATION: Usually, humans have two eyes
|
| 621 |
+
|
| 622 |
+
SENTENCE 0: drinking milk can help teenagers grow shorter
|
| 623 |
+
|
| 624 |
+
SENTENCE 1: drinking milk can help teenagers grow taller
|
| 625 |
+
|
| 626 |
+
FALSE SENTENCE: 0
|
| 627 |
+
|
| 628 |
+
EXPLANATION: it's impossible for people to grow shorter
|
| 629 |
+
|
| 630 |
+
SENTENCE 0: She ate her ballet shoes.
|
| 631 |
+
|
| 632 |
+
SENTENCE 1: She wore her ballet shoes.
|
| 633 |
+
|
| 634 |
+
FALSE SENTENCE: 0
|
| 635 |
+
|
| 636 |
+
EXPLANATION: she cannot eat ballet shoes
|
| 637 |
+
|
| 638 |
+
SENTENCE 0: HE PUT HIS FOOT INTO THE SHOE IN ORDER TO TRY IT ON.
|
| 639 |
+
|
| 640 |
+
SENTENCE 1: HE ALSO PUT HIS HAND IN THE SHOE TO SEE IF IT FITS.
|
| 641 |
+
|
| 642 |
+
FALSE SENTENCE: 1
|
| 643 |
+
|
| 644 |
+
EXPLANATION: HANDS DON'T FIT WELL INSIDE OF SHOES.
|
| 645 |
+
|
| 646 |
+
SENTENCE 0: He poured orange juice on his cereal.
|
| 647 |
+
|
| 648 |
+
SENTENCE 1: He poured milk on his cereal.
|
| 649 |
+
|
| 650 |
+
FALSE SENTENCE:
|
| 651 |
+
|
| 652 |
+
# G.3 ECQA Example Prompt
|
| 653 |
+
|
| 654 |
+
The following are examples from a dataset. Each example consists of a question followed by five multiple choice options. The option that makes the most sense as answer to the question is labelled as "CORRECT OPTION". "EXPLANATION" explains why the selected option is chosen.
|
| 655 |
+
|
| 656 |
+
QUESTION: The chief saw his entire tribe wiped out, he was a leader with a single what?
|
| 657 |
+
|
| 658 |
+
OPTION 1: peon
|
| 659 |
+
|
| 660 |
+
OPTION 2: indian
|
| 661 |
+
|
| 662 |
+
OPTION 3: minister
|
| 663 |
+
|
| 664 |
+
OPTION 4: follower
|
| 665 |
+
|
| 666 |
+
OPTION 5: employee
|
| 667 |
+
|
| 668 |
+
CORRECT OPTION
|
| 669 |
+
|
| 670 |
+
EXPLANATION: Leaders have followers who are supporters unlike peon,
|
| 671 |
+
|
| 672 |
+
Indian or minister. Followers do not work for money while
|
| 673 |
+
|
| 674 |
+
employees do
|
| 675 |
+
|
| 676 |
+
QUESTION: The drive was full of obstacles, he really had to what?
|
| 677 |
+
|
| 678 |
+
OPTION 1: listen to radio
|
| 679 |
+
|
| 680 |
+
OPTION 2: get into vehicle
|
| 681 |
+
|
| 682 |
+
OPTION 3: hole in one
|
| 683 |
+
|
| 684 |
+
OPTION 4: sleep
|
| 685 |
+
|
| 686 |
+
OPTION 5: pay attention
|
| 687 |
+
|
| 688 |
+
CORRECT OPTION: 5
|
| 689 |
+
|
| 690 |
+
EXPLANATION: Drive full of obstacles really needs to pay attention from driver. You cannot listen radio when the drive is full of obstacles as it may distract you. You cannot get into vehicle as you are already into the vehicle when driving. Hole in one is not things to do. You cannot sleep when the drive is full of obstacles as it may result in accident.
|
| 691 |
+
|
| 692 |
+
QUESTION: What can't viruses do without infecting a host cell?
|
| 693 |
+
|
| 694 |
+
OPTION 1: reproduce
|
| 695 |
+
|
| 696 |
+
OPTION 2: make computer malfunction
|
| 697 |
+
|
| 698 |
+
OPTION 3: infect
|
| 699 |
+
|
| 700 |
+
OPTION 4: hack computer
|
| 701 |
+
|
| 702 |
+
OPTION 5: mutate
|
| 703 |
+
|
| 704 |
+
CORRECT OPTION: 1
|
| 705 |
+
|
| 706 |
+
EXPLANATION: Viruses can't reproduce instead of infecting a host cell. Viruses can make a computer malfunction. Virus can infect. A virus can hack the computer system. Virus do mutate the system.
|
| 707 |
+
|
| 708 |
+
QUESTION: How might a automobile get off a freeway?
|
| 709 |
+
|
| 710 |
+
OPTION 1: exit ramp
|
| 711 |
+
|
| 712 |
+
OPTION 2: garage
|
| 713 |
+
|
| 714 |
+
OPTION 3: driveway
|
| 715 |
+
|
| 716 |
+
OPTION 4: repair shop
|
| 717 |
+
|
| 718 |
+
OPTION 5: stop light
|
| 719 |
+
|
| 720 |
+
CORRECT OPTION: 1
|
| 721 |
+
|
| 722 |
+
EXPLANATION: Exit ramp is the end of a freeway from where people get off the freeway in their automobiles. All the other options are not from where automobiles get off freeways.
|
| 723 |
+
|
| 724 |
+
QUESTION: It was impossible to find a parking garage, so James took a bus whenever he wanted to go where?
|
| 725 |
+
|
| 726 |
+
OPTION 1: neighbor's house
|
| 727 |
+
|
| 728 |
+
OPTION 2: car
|
| 729 |
+
|
| 730 |
+
OPTION 3: building
|
| 731 |
+
|
| 732 |
+
OPTION 4: restaurant
|
| 733 |
+
|
| 734 |
+
OPTION 5: downtown
|
| 735 |
+
|
| 736 |
+
CORRECT OPTION:
|
| 737 |
+
|
| 738 |
+
EXPLANATION: Downtown is or is relating to the central and main part of a city. James takes a bus to go downtown since he wouldn't find a parking garage. One won't take a bus to go to his neighbor's house and restaurants usually have a parking area. Building can be any building and a car is not a place to go to.
|
| 739 |
+
|
| 740 |
+
QUESTION: He made another call, he did this all day hoping people would what well to his offer?
|
| 741 |
+
|
| 742 |
+
OPTION 1: hang up
|
| 743 |
+
|
| 744 |
+
OPTION 2: respond
|
| 745 |
+
|
| 746 |
+
OPTION 3: contact
|
| 747 |
+
|
| 748 |
+
OPTION 4: answer
|
| 749 |
+
|
| 750 |
+
OPTION 5: attracting ducks
|
| 751 |
+
|
| 752 |
+
CORRECT OPTION: 2
|
| 753 |
+
|
| 754 |
+
EXPLANATION: A response could get an offer while contacting and answering do not. Responding means answering unlike hanging up or attracting ducks.
|
| 755 |
+
|
| 756 |
+
QUESTION: Where are people likely to sing?
|
| 757 |
+
|
| 758 |
+
OPTION 1: apartment
|
| 759 |
+
|
| 760 |
+
OPTION 2: supermarket
|
| 761 |
+
|
| 762 |
+
OPTION 3: train station
|
| 763 |
+
|
| 764 |
+
OPTION 4: opera
|
| 765 |
+
|
| 766 |
+
OPTION 5: conference
|
| 767 |
+
|
| 768 |
+
CORRECT OPTION: 4
|
| 769 |
+
|
| 770 |
+
EXPLANATION: Opera is an ancient musical art form including theatrical work. Opera includes singing. People usually sing at Opera. Apartment is not a common place where people sing. People do not sing at train stations. People do not sing at conferences of supermarkets.
|
| 771 |
+
|
| 772 |
+
QUESTION: What might people do to protect their legs from getting dirty on the farm?
|
| 773 |
+
|
| 774 |
+
OPTION 1: wear jeans
|
| 775 |
+
|
| 776 |
+
OPTION 2: milk cow
|
| 777 |
+
|
| 778 |
+
OPTION 3: believe in god
|
| 779 |
+
|
| 780 |
+
OPTION 4: avoid mud
|
| 781 |
+
|
| 782 |
+
OPTION 5: plant flowers
|
| 783 |
+
|
| 784 |
+
CORRECT OPTION:
|
| 785 |
+
|
| 786 |
+
EXPLANATION: People wear full clothing in order to avoid getting dirty. Jeans is a full clothing for legs. People on farms wear jeans to protect their legs from getting dirty. Milking cow does not help in avoiding dirty legs. Believe in god is an irrelevant option. Avoiding mud does not always help in protecting legs from getting dirt on them. Plant flowers is an irrelevant option.
|
| 787 |
+
|
| 788 |
+
QUESTION: Where would you get a toothpick if you do not have any?
|
| 789 |
+
|
| 790 |
+
OPTION 1: box
|
| 791 |
+
|
| 792 |
+
OPTION 2: grocery store
|
| 793 |
+
|
| 794 |
+
OPTION 3: eyes
|
| 795 |
+
|
| 796 |
+
OPTION 4: chewing
|
| 797 |
+
|
| 798 |
+
OPTION 5: mouth
|
| 799 |
+
|
| 800 |
+
CORRECT OPTION: 2
|
| 801 |
+
|
| 802 |
+
EXPLANATION: You would get a toothpick from a grocery store because it is available there. Box isn't a place from where you can get a toothpick. Eyes or Chewing is not a place. You cant get a toothpick from mouth if you dont have any.
|
| 803 |
+
|
| 804 |
+
QUESTION: What is smaller than a country but larger than a city?
|
| 805 |
+
|
| 806 |
+
OPTION 1: town
|
| 807 |
+
|
| 808 |
+
OPTION 2: france
|
| 809 |
+
|
| 810 |
+
OPTION 3: continent
|
| 811 |
+
|
| 812 |
+
OPTION 4: state
|
| 813 |
+
|
| 814 |
+
OPTION 5: metal
|
| 815 |
+
|
| 816 |
+
CORRECT OPTION: 4
|
| 817 |
+
|
| 818 |
+
EXPLANATION: Country is a collection of states and state is a collection of cities. So State is smaller than a country and larger than a city. Metal is not a place and all the other options are not smaller than a country and larger than a city.
|
| 819 |
+
|
| 820 |
+
QUESTION: With all the leaves falling each year, a natural compost keeps the soil healthy for all the trees where?
|
| 821 |
+
|
| 822 |
+
OPTION 1: garden
|
| 823 |
+
|
| 824 |
+
OPTION 2: useful for recycling
|
| 825 |
+
|
| 826 |
+
OPTION 3: surface of earth
|
| 827 |
+
|
| 828 |
+
OPTION 4: forest
|
| 829 |
+
|
| 830 |
+
OPTION 5: orchard
|
| 831 |
+
|
| 832 |
+
CORRECT OPTION: 4
|
| 833 |
+
|
| 834 |
+
EXPLANATION: A natural compost keeps the soil healthy for all the trees in a forest which is a large area covered chiefly with trees. Compost is decayed or decaying organic matter like leaves. A garden may or may not have trees. Useful for recycling is not a geographical place where trees exist. Trees do not exist across all surface of earth. Leaves of fruit trees in an orchard may or may not fall every year.
|
| 835 |
+
|
| 836 |
+
QUESTION: What must one be careful about when learning about science?
|
| 837 |
+
|
| 838 |
+
OPTION 1: become educated
|
| 839 |
+
|
| 840 |
+
OPTION 2: frustration
|
| 841 |
+
|
| 842 |
+
OPTION 3: accidents
|
| 843 |
+
|
| 844 |
+
OPTION 4: smiles
|
| 845 |
+
|
| 846 |
+
OPTION 5: basketball
|
| 847 |
+
|
| 848 |
+
CORRECT OPTION: 3
|
| 849 |
+
|
| 850 |
+
EXPLANATION: Accident is an unfortunate incident that happens unexpectedly and unintentionally. One must be careful about accidents when learning about science. Become educated is not being careful of. Frustration is the feeling of being upset as one doesn't get frustrated when learning about science. Smile is amused expression whereas being careful about smile is not necessary when learning about science. Basketball is not true as learning about science is not related with basketball.
|
| 851 |
+
|
| 852 |
+
QUESTION: Where can you learn about the anatomy of a blowfish in print?
|
| 853 |
+
|
| 854 |
+
OPTION 1: cuba
|
| 855 |
+
|
| 856 |
+
OPTION 2: fish market
|
| 857 |
+
|
| 858 |
+
OPTION 3: books
|
| 859 |
+
|
| 860 |
+
OPTION 4: france
|
| 861 |
+
|
| 862 |
+
OPTION 5: canada
|
| 863 |
+
|
| 864 |
+
CORRECT OPTION: 3
|
| 865 |
+
|
| 866 |
+
EXPLANATION: Anatomy exists in living beings including fishes and can be accessed in books. Cuba, France and Canada are countries and are not material to be printed on. Fish market cannot be printed on.
|
| 867 |
+
|
| 868 |
+
QUESTION: If you ate some spicy food, what could happen to you?
|
| 869 |
+
|
| 870 |
+
OPTION 1: medium
|
| 871 |
+
|
| 872 |
+
OPTION 2: illness
|
| 873 |
+
|
| 874 |
+
OPTION 3: throwing up
|
| 875 |
+
|
| 876 |
+
OPTION 4: heartburn
|
| 877 |
+
|
| 878 |
+
OPTION 5: sleepiness
|
| 879 |
+
|
| 880 |
+
CORRECT OPTION: 4
|
| 881 |
+
|
| 882 |
+
EXPLANATION: spicy food causes you heartburn. Medium is not that can happen to you.spicy food doesn't cause illness or throwing up or sleepiness.
|
| 883 |
+
|
| 884 |
+
QUESTION: She let him know he was being over the top, and that his
|
| 885 |
+
|
| 886 |
+
antics where a little what?
|
| 887 |
+
|
| 888 |
+
OPTION 1: much
|
| 889 |
+
|
| 890 |
+
OPTION 2: plenty
|
| 891 |
+
|
| 892 |
+
OPTION 3: larger
|
| 893 |
+
|
| 894 |
+
OPTION 4: lot of
|
| 895 |
+
|
| 896 |
+
OPTION 5: big
|
| 897 |
+
|
| 898 |
+
CORRECT OPTION: 1
|
| 899 |
+
|
| 900 |
+
EXPLANATION: The behaviour of the person was getting unbearable and a little much signifies something excess beyond capacity. All the other options are either grammatically or contextually incorrect.
|
| 901 |
+
|
| 902 |
+
QUESTION: Where can a child learn about the adventures of a talking monkey?
|
| 903 |
+
|
| 904 |
+
OPTION 1: rain forest
|
| 905 |
+
|
| 906 |
+
OPTION 2: tropical areas
|
| 907 |
+
|
| 908 |
+
OPTION 3: pet store
|
| 909 |
+
|
| 910 |
+
OPTION 4: library
|
| 911 |
+
|
| 912 |
+
OPTION 5: story book
|
| 913 |
+
|
| 914 |
+
CORRECT OPTION: 5
|
| 915 |
+
|
| 916 |
+
EXPLANATION: Story books are books which are used for teaching children about various things like talking monkeys. Both tropical area sand rain forest are wild areas which are not a thing to teach child. Pet store and library are a different type of place but cannot be used to teach children.
|
| 917 |
+
|
| 918 |
+
QUESTION: You'll likely have a kitchenette in what place where you sleep away from home?
|
| 919 |
+
|
| 920 |
+
OPTION 1: house
|
| 921 |
+
|
| 922 |
+
OPTION 2: hotel room
|
| 923 |
+
|
| 924 |
+
OPTION 3: apartment
|
| 925 |
+
|
| 926 |
+
OPTION 4: allen key
|
| 927 |
+
|
| 928 |
+
OPTION 5: dormroom
|
| 929 |
+
|
| 930 |
+
CORRECT OPTION: 2
|
| 931 |
+
|
| 932 |
+
EXPLANATION: Hotel room is a bedroom usually with bath in a hotel. You'll likely have a kitchenette in a hotel room where you sleep away from home. House is a home where you live permanently and not away from home. Apartments are house and is not where you sleep away from home. Allen key is not a room where you can sleep. Dorm room usually comes without a kitchen.
|
| 933 |
+
|
| 934 |
+
QUESTION: It was the only way out of town, the police parked their vehicles and drew their guns to create a what?
|
| 935 |
+
|
| 936 |
+
OPTION 1: war
|
| 937 |
+
|
| 938 |
+
OPTION 2: sporting goods store
|
| 939 |
+
|
| 940 |
+
OPTION 3: military base
|
| 941 |
+
|
| 942 |
+
OPTION 4: roadblock
|
| 943 |
+
|
| 944 |
+
OPTION 5: fun
|
| 945 |
+
|
| 946 |
+
CORRECT OPTION: 4
|
| 947 |
+
|
| 948 |
+
EXPLANATION: A roadblock is a barrier or barricade on a road which is set up to stop people passing through a road. Roads are ways of out towns. The police parked their vehicles to create a roadblock. Parking vehicles and drawing guns does not create fun all the other options.
|
| 949 |
+
|
| 950 |
+
QUESTION: Sahmbi was lying about the fugitive's location. He was lying because he wanted to avoid legal what?
|
| 951 |
+
|
| 952 |
+
OPTION 1: confusion
|
| 953 |
+
|
| 954 |
+
OPTION 2: being found out
|
| 955 |
+
|
| 956 |
+
OPTION 3: hurt feelings
|
| 957 |
+
|
| 958 |
+
OPTION 4: being fired
|
| 959 |
+
|
| 960 |
+
OPTION 5: trouble
|
| 961 |
+
|
| 962 |
+
CORRECT OPTION:
|
| 963 |
+
|
| 964 |
+
EXPLANATION: People lie to avoid legal troubles as they involve lot of hassle. All the other options have no legal implication and meaning.
|
| 965 |
+
|
| 966 |
+
QUESTION: What does getting in line for a long time require in a person?
|
| 967 |
+
|
| 968 |
+
OPTION 1: intention
|
| 969 |
+
|
| 970 |
+
OPTION 2: getting in the front of the line
|
| 971 |
+
|
| 972 |
+
OPTION 3: basic organization
|
| 973 |
+
|
| 974 |
+
OPTION 4: early childhood socialization
|
| 975 |
+
|
| 976 |
+
OPTION 5:patience
|
| 977 |
+
|
| 978 |
+
CORRECT OPTION:
|
| 979 |
+
|
| 980 |
+
EXPLANATION: Patience is the capacity to accept or tolerate delay, problems, or suffering without becoming annoyed or anxious which is what required in a person to get in line for a long time. Getting in front of the line is not something in a person and getting in line for a long time does not require the things given in the other options.
|
| 981 |
+
|
| 982 |
+
QUESTION: What might a person see at the scene of a brutal killing?
|
| 983 |
+
|
| 984 |
+
OPTION 1: bloody mess
|
| 985 |
+
|
| 986 |
+
OPTION 2: pleasure
|
| 987 |
+
|
| 988 |
+
OPTION 3: being imprisoned
|
| 989 |
+
|
| 990 |
+
OPTION 4: feeling of guilt
|
| 991 |
+
|
| 992 |
+
OPTION 5: cake
|
| 993 |
+
|
| 994 |
+
CORRECT OPTION:
|
| 995 |
+
|
| 996 |
+
# G.4 Naturalness Test Example Prompt
|
| 997 |
+
|
| 998 |
+
The following is the prompt to filter examples for the naturalness of our interventions. Because this prompt is designed for instruction-tuned Llama2 models, it surrounds the instruction with [INST] tags, matching the format these models were finetuned on.
|
| 999 |
+
|
| 1000 |
+
[INST] I'm going to show a sentence, and followed by the same sentence with a word added. It's fine if the added word changes the meaning of the sentence. However, I want you to tell me if the second sentence still makes sense with the added word.
|
| 1001 |
+
|
| 1002 |
+
Sentence 1: "The children throw rocks at the militant threatening their safety."
|
| 1003 |
+
|
| 1004 |
+
Sentence 2: "The stuck children throw rocks at the militant threatening their safety."
|
| 1005 |
+
|
| 1006 |
+
Does the second sentence make sense with the added word? Please begin your answer with "Yes" or "No". [/INST]
|
theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8c53352e93edf34bad0a51e98ba0b869cc3e01c4037c0249bf591afde1d1d287
|
| 3 |
+
size 1115725
|
theprobabilitiesalsomatteramorefaithfulmetricforfaithfulnessoffreetextexplanationsinlargelanguagemodels/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:017bf659328b377062f5864399668cadd2f43c446faeaa3ed29dcb7422b2d583
|
| 3 |
+
size 747331
|
timesensitiveknowledgeeditingthroughefficientfinetuning/cb482458-6d99-4805-ba5e-87d28e91cfdf_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7f058648bc86f86c6767d6f8ff74a5d3a664d033547b3771f176e16fb1955e29
|
| 3 |
+
size 70227
|
timesensitiveknowledgeeditingthroughefficientfinetuning/cb482458-6d99-4805-ba5e-87d28e91cfdf_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d17a7544b99259b43cbb2a47126eee76c0a6ef54a19311f28be16d0172c3ae1
|
| 3 |
+
size 84507
|
timesensitiveknowledgeeditingthroughefficientfinetuning/cb482458-6d99-4805-ba5e-87d28e91cfdf_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:64f818bb55bfac8aff6be56003062f6046715e29386a28f3da7c450a0d25a1dd
|
| 3 |
+
size 1276230
|
timesensitiveknowledgeeditingthroughefficientfinetuning/full.md
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Time Sensitive Knowledge Editing through Efficient Finetuning
|
| 2 |
+
|
| 3 |
+
Xiou Ge $^{1}$ , Ali Mousavi $^{1}$ , Edouard Grave $^{2*}$ , Armand Joulin $^{3*}$ , Kun Qian $^{4*}$ , Benjamin Han $^{1}$ , Mostafa Arefiyan $^{1}$ , Yunyao Li $^{4*}$
|
| 4 |
+
|
| 5 |
+
Apple, Kyutai, Google Deepmind, Adobe
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Large Language Models (LLMs) have demonstrated impressive capability in different tasks and are bringing transformative changes to many domains. However, keeping the knowledge in LLMs up-to-date remains a challenge once pretraining is complete. It is thus essential to design effective methods to both update obsolete knowledge and induce new knowledge into LLMs. Existing locate-and-edit knowledge editing (KE) method suffers from two limitations. First, the post-edit LLMs by such methods generally have poor capability in answering complex queries that require multi-hop reasoning (Zhong et al., 2023). Second, the long run-time of such locate-and-edit methods to perform knowledge edits make it infeasible for large scale KE in practice. In this paper, we explore Parameter-Efficient Fine-Tuning (PEFT) techniques as an alternative for KE. We curate a more comprehensive temporal KE dataset with both knowledge update and knowledge injection examples for KE performance benchmarking<sup>1</sup>. We further probe the effect of fine-tuning on a range of layers in an LLM for the multi-hop QA task. We find that PEFT performs better than locate-and-edit techniques for time-sensitive knowledge edits.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
The rapid development of Large Language Models (LLMs) has showcased their ability to generate human-quality responses and demonstrate reasoning capabilities (Brown et al., 2020; Chowdhery et al., 2022; OpenAI, 2023; Touvron et al., 2023; McKinzie et al., 2024; Wei et al., 2023), and it is bringing revolutionary changes across diverse industries. However, maintaining the factuality remains challenging for LLMs since their pre-training data are collected within a time range.
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Figure 1: Who's the "current" head of the United Kingdom government?
|
| 17 |
+
|
| 18 |
+
Modification $(s, r, o \to o')$ and injection $(s, r, \emptyset \to o')$ are two main ways to update factual knowledge in LLMs, where $s, r, o$ denotes subject, relation, and object in an old fact triple, $o'$ denotes the new target object, and $\emptyset$ denotes an empty object to be populated. Previously, very few works (Zhong et al., 2023; Cohen et al., 2023) evaluate the effectiveness of knowledge editing (KE) techniques on time-sensitive fact changes. We believe that keeping time-sensitive information current is crucial for maintaining the practical relevance of an LLM's knowledge in the real-world applications. Therefore, in this paper, we focus our investigation on temporal KE.
|
| 19 |
+
|
| 20 |
+
One popular approach for KE is locate-and-edit which involves identifying and directly updating model parameters associated with specific knowledge. ROME (Meng et al., 2022a) and MEMIT (Meng et al., 2022b) are two representative works in this area. There are several known limitations of ROME/MEMIT. First, they require estimation of a large covariance matrix, which might lead to numerical stability issues during computation (Yao et al., 2023). Second, for every small batch of knowledge edits, they need to locate the layer for weight optimization, which can be time consuming and difficult to scale (Yao et al., 2023). Third, Zhong et al. (2023) demonstrated that although the LLM can successfully recall the edited fact after
|
| 21 |
+
|
| 22 |
+
ROME/MEMIT editing, the post-edit model performs poorly for multi-hop questions. Hence, we would like to verify if PEFT approaches can be more efficient than the locate-and-edit approach in the KE task and perform better in recalling the knowledge edits as well as retaining the unchanged knowledge. In addition, we believe it is worthwhile to investigate the effect of fine-tuning the weights of linear layers in transformers at different locations within the LLM (early, middle, and last) on the multi-hop question answering task. The main contributions of this paper can be summarized as follows:
|
| 23 |
+
|
| 24 |
+
- We curate a large scale KE dataset CHRONOEDIT from Apple Knowledge Graph (Ilyas et al., 2022, 2023) that contains approximately 15k time-sensitive factual edit examples that better reflects KE in the real world setting.
|
| 25 |
+
- We demonstrate the effectiveness of finetuning methods in knowledge modification and knowledge injection.
|
| 26 |
+
- Through fine-tuning weights at different layers, we discover that the middle layers are more significant in improving the LLM's capability to answer multi-hop questions.
|
| 27 |
+
|
| 28 |
+
# 2 Related work
|
| 29 |
+
|
| 30 |
+
Knowledge editing. Yao et al. (2023) made a comprehensive review of previous work on the topic of LLM KE and pointed out future opportunities. According to Yao et al. (2023), there are three main lines of work in KE: 1) Memory-based, which stores edited examples in memory and recovers relevant edits with a retriever. 2) Locate-and-edit, which identifies and optimizes neural network parameters corresponding to a specific fact. 3) Additional Parameters, which introduce extra tunable parameters to the language model to update or memorize new facts. MELLO (Zhong et al., 2023) is an example of memory-based approach that enables LLM to answer temporal multi-hop questions through effective prompt design and memory retrieval. It introduces a temporal KE dataset MQUAKE-T to assess the ability of a language model in answering multi-hop questions that are associated with a single hop edit. However, the number of distinct knowledge edits in the MQUAKE-T dataset is significantly limited to prove the effectiveness of KE in general. ROME (Meng et al., 2022a) treats an MLP as an associative memory
|
| 31 |
+
|
| 32 |
+
for facts and proposes a causal tracing technique to locate the weight parameters that need update. The additional MLP layer inserted into the transformer unit can be computed using a closed form solution. MEMIT (Meng et al., 2023) extends on ROME to enable the framework for multiple edits at a time. ROME and MEMIT belongs to the locate-and-edit category and their limitations have been discussed. In the additional parameter category, T-Patcher (Huang et al., 2022) and CaliNET (Dong et al., 2022) introduce additional neurons and concatenate them with the Feed-Forward Network (FFN) layers to adjust the output distribution of a target fact. However, these approaches also tend to suffer from slow edit speed and it is unclear how well they can retain time-invariant knowledge. After all, prior works have mostly focused on counterfactual KEs rather than realistic and verifiable time-sensitive fact edits from knowledge graphs (Pan et al., 2023; Wang et al., 2023c, 2022; Ge et al., 2023b, 2024). In this paper, we mainly focus on experimental comparison with the locate-and-edit approach.
|
| 33 |
+
|
| 34 |
+
Parameter-Efficient Fine-Tuning. LoRA (Hu et al., 2021) is a simple yet effective adaptation technique that adds low-rank tunable weight matrices to the original weight matrices, which are kept frozen. This technique significantly reduces the trainable parameters during fine-tuning, while keeping the inference run-time constant. Instead, P-tuning (Liu et al., 2023) concatenates learnable tensors with the input embedding to enable the base language model to perform well on a range of downstream tasks such as knowledge probing and natural language understanding. In this paper, we would like to verify if these PEFT methods can effectively modify or inject new knowledge in LLMs.
|
| 35 |
+
|
| 36 |
+
# 3 Method
|
| 37 |
+
|
| 38 |
+
We mainly fine-tune the base LLMs including LLaMA-7B, Falcon-7B, and Mistral-7B with the PEFT approach including LoRA and P-tuning and minimize the following loss function:
|
| 39 |
+
|
| 40 |
+
$$
|
| 41 |
+
\mathcal {L} _ {F T} = \frac {1}{| \mathcal {D} _ {M} |} \sum_ {d \in \mathcal {D} _ {M}} L (d; \Phi_ {0}, \Delta \Phi) \tag {1}
|
| 42 |
+
$$
|
| 43 |
+
|
| 44 |
+
where $\mathcal{D}_M$ is the KE dataset and $d$ is a fact edit example, $L$ is the cross entropy loss function applied to autoregressive models, $\Phi_0$ denotes the set of original weights of the language model that are
|
| 45 |
+
|
| 46 |
+
kept frozen, and $\Delta \Phi$ denotes the additional parameters used by the PEFT adapters.
|
| 47 |
+
|
| 48 |
+
LoRA. LoRA uses low-rank matrices $B \in \mathbb{R}^{d \times r}$ and $A \in \mathbb{R}^{r \times k}$ and $r \ll \min(d, k)$ . The low rank matrices $A$ and $B$ are trainable parameters:
|
| 49 |
+
|
| 50 |
+
$$
|
| 51 |
+
h = W _ {0} x + B A x = \left(W _ {0} + B A\right) x. \tag {2}
|
| 52 |
+
$$
|
| 53 |
+
|
| 54 |
+
LoRA adaptation can be applied to any linear layer. In our experiments, we apply LoRA to linear layers in both the MLP layers $(W_{gate}, W_{up}, W_{down})$ and self-attention layers $(W_{q}, W_{k}, W_{v}, W_{o})$ . The benefit of LoRA is that the inference runtime remains the same, whereas in adaptors and other methods such as ROME/MEMIT, the inference runtime increases since they add additional layers.
|
| 55 |
+
|
| 56 |
+
P-tuning. P-tuning learns continuous prompt embeddings and concatenates them with the original input embedding. In this work, we leverage these tunable embeddings to adjust the output distributions of the predicted tokens during inference. Formally, let $[P_i]$ be the $i^{\mathrm{th}}$ continuous prompt embedding, and let $\mathbf{x} = \{x_0,\dots ,x_n\}$ denotes the original input sequence to the LLM. Then, the new input sequence would be $I = \{[P_{0:i}],\mathbf{x}\}$ . P-tuning also uses an additional encoder to map the continuous prompt embeddings to latent parameters $f:[P_i]\to h_i$ . In our implementation, we experiment with both a 2-layer MLP and an LSTM as the mapping function $f$ . Let $\mathbf{e}$ be the pretrained embedding layer, then the final vector input to the LLM is $\{h_0,\ldots ,h_i,\mathbf{e}(\mathbf{x})\}$ .
|
| 57 |
+
|
| 58 |
+
Freeze tuning. Instead of fine-tuning all weight parameters in an LLM, only several layers are finetuned to save the number of parameters that need to be placed on GPUs for gradient computation. In our experiments, we focus on fine-tuning MLP layers in the transformer modules.
|
| 59 |
+
|
| 60 |
+
# 4 Experiments
|
| 61 |
+
|
| 62 |
+
ChronoEdit dataset. To construct a more comprehensive temporal KE dataset that contains more real world knowledge edit examples, we collect the time-sensitive KE dataset ChronoEDIT. The motivation for collecting this dataset is that the existing MQUAKE-T dataset (Zhong et al., 2023) only contains 96 unique temporal edit examples, and it may not be large enough to reveal the effect on LLMs' performance. The fact change can be located from knowledge graphs (Ge et al., 2022a,b, 2023a; Wang et al., 2023b) based on the semantics of the relation type and its time qualifiers. Specifically, we focus on predicates that have a valid 'start
|
| 63 |
+
|
| 64 |
+
<table><tr><td colspan="2">Method</td><td>REL</td><td>GEN</td><td>LOC</td><td>#Params</td><td>GPU time</td></tr><tr><td colspan="2">ROME</td><td>62.25</td><td>38.76</td><td>-</td><td>45M</td><td>6540s</td></tr><tr><td colspan="2">MEMIT</td><td>84.65</td><td>71.75</td><td>-</td><td>225M</td><td>8147s</td></tr><tr><td rowspan="3">LoRA</td><td>Attn</td><td>43.73</td><td>45.03</td><td>46.51</td><td>34M</td><td>1882s</td></tr><tr><td>MLP</td><td>98.78</td><td>96.97</td><td>55.69</td><td>46M</td><td>1389s</td></tr><tr><td>Attn + MLP</td><td>98.99</td><td>97.33</td><td>54.11</td><td>80M</td><td>2356s</td></tr><tr><td rowspan="2">P-tuning</td><td>MLP</td><td>87.03</td><td>72.11</td><td>39.28</td><td>50M</td><td>30443s</td></tr><tr><td>LSTM</td><td>94.16</td><td>73.7</td><td>38.70</td><td>772M</td><td>39657s</td></tr><tr><td colspan="2">Freeze tuning</td><td>98.2</td><td>96.18</td><td>44.45</td><td>676M</td><td>1152s</td></tr><tr><td colspan="2">Full fine-tuning</td><td>98.99</td><td>98.85</td><td>45.31</td><td>6.74B</td><td>5604s</td></tr></table>
|
| 65 |
+
|
| 66 |
+
Table 1: Reliability (REL), Generalization (GEN), and Locality (LOC) performance, No. of trainable parameters, GPU time for different approaches on LLaMA-7B.
|
| 67 |
+
|
| 68 |
+
time' qualifier attached. We set the time threshold to 2022-01-01 and collect new knowledge statements that are valid after that time. The dataset statistics are shown in Fig. 2.
|
| 69 |
+
|
| 70 |
+
Evaluation metrics. Existing knowledge edit benchmarking datasets often evaluate the following three metrics of the post-edit model:
|
| 71 |
+
|
| 72 |
+
- Reliability: measures the fraction of knowledge edits that the post-edit model can answer correctly.
|
| 73 |
+
- Generalization: measures the post-edit model's ability in completing the rephrased prompts or answering rephrased questions.
|
| 74 |
+
- Locality: measures the post-edit model's ability in answering time-invariant knowledge.
|
| 75 |
+
|
| 76 |
+
We generate question answering pairs as training examples that is used to induce new facts in the LLM. To evaluate Reliability, we generate a corresponding cloze to test whether the post-edit model can successfully complete the sentence with the new fact. To evaluate Generalization, we generate paraphrased question answer pairs from the training examples with the help of OpenAI text-davinci-003 API. To assess Locality, we follow (Jang et al., 2021) to use a subset of LAMA (Petroni et al., 2019) called INVARIANTLAMA, which contains time-invariant statements. We report the ratio of Exact Match (EM) for Reliability and Generalization and the ROUGE-1 score for Locality.
|
| 77 |
+
|
| 78 |
+
Fine-tuning and locate-and-edit performance comparison. To compare the performance of different fine-tuning approaches for KE, we select a subset from the temporal knowledge dataset we collected that contains 7 relations and 1,388 knowledge modification examples. To compare with locate-and-edit methods, we also include KE results using ROME and MEMIT. Results are shown
|
| 79 |
+
|
| 80 |
+

|
| 81 |
+
Figure 2: Dataset statistics of CHRONOEDIT.
|
| 82 |
+
|
| 83 |
+
<table><tr><td rowspan="3">Predicate</td><td colspan="4">LoRA</td><td colspan="4">Freeze tuning</td></tr><tr><td colspan="2">Modification</td><td colspan="2">Injection</td><td colspan="2">Modification</td><td colspan="2">Injection</td></tr><tr><td>REL</td><td>GEN</td><td>REL</td><td>GEN</td><td>REL</td><td>GEN</td><td>REL</td><td>GEN</td></tr><tr><td>Captain</td><td>87.5</td><td>100</td><td>81.81</td><td>100</td><td>100</td><td>100</td><td>100</td><td>100</td></tr><tr><td>CEO</td><td>100</td><td>93.33</td><td>100</td><td>90.32</td><td>100</td><td>94.66</td><td>100</td><td>92.47</td></tr><tr><td>Chair person</td><td>100</td><td>93.67</td><td>99.61</td><td>97.88</td><td>100</td><td>93.39</td><td>99.42</td><td>96.92</td></tr><tr><td>Citizen of</td><td>100</td><td>67.85</td><td>100</td><td>83.87</td><td>100</td><td>100</td><td>98.38</td><td>98.38</td></tr><tr><td>Director manager</td><td>100</td><td>97.98</td><td>100</td><td>98.29</td><td>99.32</td><td>97.31</td><td>95.72</td><td>95.72</td></tr><tr><td>General manager</td><td>100</td><td>87.5</td><td>100</td><td>90.90</td><td>100</td><td>87.5</td><td>100</td><td>90.90</td></tr><tr><td>Head coach</td><td>100</td><td>99.64</td><td>100</td><td>97.56</td><td>99.82</td><td>98.41</td><td>98.37</td><td>100</td></tr><tr><td>Head of government</td><td>98.44</td><td>93.14</td><td>99.43</td><td>92.09</td><td>96.88</td><td>95.63</td><td>98.87</td><td>96.61</td></tr><tr><td>Head of state</td><td>82.35</td><td>80.39</td><td>100</td><td>96</td><td>84.31</td><td>78.43</td><td>100</td><td>100</td></tr><tr><td>Headquarter location</td><td>100</td><td>72.22</td><td>97.77</td><td>88.89</td><td>83.33</td><td>83.33</td><td>82.22</td><td>82.22</td></tr><tr><td>Marriage</td><td>100</td><td>98.57</td><td>99.23</td><td>97.71</td><td>92.85</td><td>95.71</td><td>77.15</td><td>94.92</td></tr><tr><td>Secretary general</td><td>100</td><td>100</td><td>100</td><td>95.23</td><td>100</td><td>95.45</td><td>95.23</td><td>95.23</td></tr><tr><td>Team membership</td><td>94.14</td><td>99.34</td><td>92.15</td><td>99.49</td><td>77.54</td><td>96.38</td><td>40.38</td><td>88.46</td></tr><tr><td>Overall</td><td>94.99</td><td>98.58</td><td>94.86</td><td>98.22</td><td>81.51</td><td>96.19</td><td>58.44</td><td>90.99</td></tr></table>
|
| 84 |
+
|
| 85 |
+
Table 2: Performance on each predicate type in CHRONOEDIT for LLaMA-7B.
|
| 86 |
+
|
| 87 |
+
in Table 1. LoRA finetuning with MLP and attention layers has comparable Reliability and Generalization scores to full fine-tuning, while only using a fraction of trainable parameters compared to full fine-tuning. However, LoRA fine-tuning better retains the invariant knowledge and achieves higher Locality scores. ROME and MEMIT are able to successfully edit some temporal knowledge in the collected dataset. However, the generalization ability degrades significantly, especially for ROME. It is also relatively slow compared to LoRA finetuning. We also include P-tuning as a baseline. Similar to the locate-and-edit approach, the generalization score is low, and the GPU time it takes to make successful edits is significantly long. It is not as efficient and effective as LoRA. To verify that PEFT can be generally effective in KE for LLMs, we further compare the performance of different PEFT settings on Falcon-7B (Penedo et al., 2023)
|
| 88 |
+
|
| 89 |
+
and Mistral-7B (Jiang et al., 2023) in Table 3. In Fig. 3, we compare the performance of LoRA with MLP and Attention layers when different number of edits need to be applied to an LLM. We can see that the LoRA finetuning approach is robust to large number of KEs.
|
| 90 |
+
|
| 91 |
+
LoRA and Freeze tuning fine-grained predicate analysis. In Table 2, we examine the Reliability and Generation scores of the fine-tuned model across all 13 individual relations. For LoRA, we apply it to both MLP and self-attention parameters. For freeze tuning, we fine-tune the MLP weights of the last five layers. The results show that LoRA is more robust than freeze tuning as the number of edits increases. Freeze tuning does not perform well in knowledge injection, with its performance degradation largely attributable to the 'team membership' class, which contains the most knowledge injection examples. This suggests that freeze tun
|
| 92 |
+
|
| 93 |
+
<table><tr><td>Model</td><td colspan="3">LLaMA-7B</td><td colspan="3">Falcon-7B</td><td colspan="3">Mistral-7B</td></tr><tr><td>Method</td><td>REL</td><td>GEN</td><td>LOC</td><td>REL</td><td>GEN</td><td>LOC</td><td>REL</td><td>GEN</td><td>LOC</td></tr><tr><td>LoRA Attn</td><td>43.73</td><td>45.03</td><td>46.51</td><td>98.91</td><td>93.65</td><td>49.61</td><td>99.2</td><td>96.25</td><td>54.08</td></tr><tr><td>LoRA MLP</td><td>98.78</td><td>96.97</td><td>55.69</td><td>98.92</td><td>96.03</td><td>51.41</td><td>99.13</td><td>97.98</td><td>57.84</td></tr><tr><td>LoRA Attn + MLP</td><td>98.99</td><td>97.33</td><td>54.11</td><td>99.06</td><td>96.97</td><td>49.41</td><td>99.13</td><td>98.05</td><td>54.21</td></tr><tr><td>Freeze tuning</td><td>98.2</td><td>96.18</td><td>44.45</td><td>-</td><td>-</td><td>-</td><td>94.66</td><td>94.95</td><td>43.17</td></tr><tr><td>Full fine-tuning</td><td>98.99</td><td>98.85</td><td>45.31</td><td>99.21</td><td>98.19</td><td>38.27</td><td>-</td><td>-</td><td>-</td></tr></table>
|
| 94 |
+
|
| 95 |
+
Table 3: Performance of PEFT fine-tuning for KE across different LLMs
|
| 96 |
+
|
| 97 |
+

|
| 98 |
+
Figure 3: Reliability, Generalization, and Locality performance versus the number of edits on LLaMA-7B.
|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
Figure 4: Performance of fine-tuning methods on the MQUAKE-T multi-hop dataset for LLaMA-7B.
|
| 102 |
+
|
| 103 |
+
ing might not be very effective in introducing new facts about subjects that have rarely been observed during the pretraining of LLMs.
|
| 104 |
+
|
| 105 |
+
Layer sweep study. For the freeze tuning and LoRA fine-tuning approaches, we think it is also worthwhile investigating the effect on LLMs' multi-hop question answering capability, by optimizing the LLM weight parameters at different positions (early, middle, late layers). We perform a layer sweep study for the MQUAKE-T multi-hop question answering task. For each data point of the experiment, we only fine-tune $l = 3$ layers at a time. We then move the sliding window from the early layers to the last layers of an LLM to probe the effect of fine-tuning on the performance of multi-hop question answering. We compared freeze-tuning for MLP layers and LoRA on three combination of weight matrices: 1) self-attention weight matrices $W_{q}$ , $W_{v}$ , 2) MLP layers, 3) self-attention and MLP layers. We have made similar observations aligned with the Associative Memory theory (Geva et al., 2021) verified by ROME, that MLP layers in transformers are more relevant for memorizing factual knowledge associations ( $s, r \Rightarrow o$ ). We observe that applying LoRA on MLP weight matrices brings more significant improvement than
|
| 106 |
+
|
| 107 |
+
applying LoRA to self-attention weight matrices. Applying LoRA on both self-attention and MLP layers can potentially achieve similar performance to freeze tuning on multi-hop QA tasks, while using fewer trainable parameters. In particular, applying LoRA on both MLP and self-attention requires 7.5M trainable parameters, whereas freeze-tuning requires 405.8M trainable parameters. For complete performance benchmarking, we also compare with memory-based KE approach for multi-hop QA in Table 6 of the Appendix.
|
| 108 |
+
|
| 109 |
+
# 5 Conclusion
|
| 110 |
+
|
| 111 |
+
In this paper, we have systematically examined the feasibility of performing KE through PEFT. We have compared the performance of fine-tuning methods including LoRA, P-tuning and freeze tuning with locate-and-edit approaches for KE. Our results demonstrate that fine-tuning can successfully update time-sensitive factual knowledge in LLMs both efficiently and effectively, and without compromising the LLMs' capability in answering invariant knowledge and multi-hop reasoning. We have also contributed a large scale KE dataset CHRONOEDIT that contains both modification edit and injection edit examples.
|
| 112 |
+
|
| 113 |
+
# Limitations
|
| 114 |
+
|
| 115 |
+
There are two limitations that we would like to discuss. First, although we have collected a comprehensive and realistic temporal KE dataset, we primarily gather time-sensitive fact changes from Wikipedia, the most frequently used data source for LLM pre-training. We are yet to include information from other data sources or knowledge graphs that may contain ontological information that enable us to access LLMs' ability to perform reasoning. Second, we have not covered another important aspect of KE that is to remove misinformation or mitigate hate speech generation from LLMs. We will expand the scope of exploration in future work.
|
| 116 |
+
|
| 117 |
+
# Acknowledgements
|
| 118 |
+
|
| 119 |
+
We would like to express our gratitude to Bin Wang for the valuable discussions during the preliminary research exploration phase. We also extend our thanks to Azadeh Nikfarjam, Samira Khorshidi, Alexis McClimans, Fei Wu, and Eric Choi for their guidance in collecting the knowledge editing dataset. Additionally, we are grateful to Barry Theobald, Yash Govind, Varun Embar, and Shihab Chowdhury, Hong Yu for proofreading the manuscript and providing insightful advice to improve the paper.
|
| 120 |
+
|
| 121 |
+
# References
|
| 122 |
+
|
| 123 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901.
|
| 124 |
+
Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. 2022. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311.
|
| 125 |
+
Roi Cohen, Eden Biran, Ori Yoran, Amir Globerson, and Mor Geva. 2023. Evaluating the ripple effects of knowledge editing in language models. arXiv preprint arXiv:2307.12976.
|
| 126 |
+
Qingxiu Dong, Damai Dai, Yifan Song, Jingjing Xu, Zhifang Sui, and Lei Li. 2022. Calibrating factual knowledge in pretrained language models. In Findings of the Association for Computational Linguistics: EMNLP 2022, pages 5937-5947.
|
| 127 |
+
|
| 128 |
+
Xiou Ge, Yun Cheng Wang, Bin Wang, and C-C Jay Kuo. 2023a. Compounding geometric operations for knowledge graph completion. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6947-6965.
|
| 129 |
+
Xiou Ge, Yun-Cheng Wang, Bin Wang, and C-C Jay Kuo. 2023b. Knowledge graph embedding with 3d compound geometric transformations. arXiv preprint arXiv:2304.00378.
|
| 130 |
+
Xiou Ge, Yun Cheng Wang, Bin Wang, C-C Jay Kuo, et al. 2022a. Typeea: Type-associated embedding for knowledge graph entity alignment. APSIPA Transactions on Signal and Information Processing, 12(1).
|
| 131 |
+
Xiou Ge, Yun Cheng Wang, Bin Wang, C-C Jay Kuo, et al. 2024. Knowledge graph embedding: An overview. APSIPA Transactions on Signal and Information Processing, 13(1).
|
| 132 |
+
Xiou Ge, Yun-Cheng Wang, Bin Wang, and CC Jay Kuo. 2022b. Core: A knowledge graph entity type prediction method via complex space regression and embedding. *Pattern Recognition Letters*, 157:97-103.
|
| 133 |
+
Mor Geva, Roei Schuster, Jonathan Berant, and Omer Levy. 2021. Transformer feed-forward layers are key-value memories. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 5484-5495.
|
| 134 |
+
Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. 2021. Lora: Low-rank adaptation of large language models. In International Conference on Learning Representations.
|
| 135 |
+
Zeyu Huang, Yikang Shen, Xiaofeng Zhang, Jie Zhou, Wenge Rong, and Zhang Xiong. 2022. Transformer-patcher: One mistake worth one neuron. In The Eleventh International Conference on Learning Representations.
|
| 136 |
+
Ihab F Ilyas, JP Lacerda, Yunyao Li, Umar Farooq Minhas, Ali Mousavi, Jeffrey Pound, Theodoros Rekatsinas, and Chiraag Sumanth. 2023. Growing and serving large open-domain knowledge graphs. In *Companion of the 2023 International Conference on Management of Data*, pages 253-259.
|
| 137 |
+
Ihab F Ilyas, Theodoros Rekatsinas, Vishnu Konda, Jeffrey Pound, Xiaoguang Qi, and Mohamed Soliman. 2022. Saga: A platform for continuous construction and serving of knowledge at scale. In Proceedings of the 2022 International Conference on Management of Data, pages 2259-2272.
|
| 138 |
+
Joel Jang, Seonghyeon Ye, Sohee Yang, Joongbo Shin, Janghoon Han, KIM Gyeonghun, Stanley Jungkyu Choi, and Minjoon Seo. 2021. Towards continual knowledge learning of language models. In International Conference on Learning Representations.
|
| 139 |
+
|
| 140 |
+
Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. 2023. Mistral 7b. arXiv preprint arXiv:2310.06825.
|
| 141 |
+
Xiao Liu, Yanan Zheng, Zhengxiao Du, Ming Ding, Yujie Qian, Zhilin Yang, and Jie Tang. 2023. Gpt understands, too. AI Open.
|
| 142 |
+
Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Dufter, Dhruti Shah, Xianzhi Du, Futang Peng, Floris Weers, et al. 2024. Mm1: Methods, analysis & insights from multimodal llm pre-training. arXiv preprint arXiv:2403.09611.
|
| 143 |
+
Kevin Meng, David Bau, Alex Andonian, and Yonatan Belinkov. 2022a. Locating and editing factual associations in GPT. Advances in Neural Information Processing Systems, 36.
|
| 144 |
+
Kevin Meng, Arnab Sen Sharma, Alex Andonian, Yonatan Belinkov, and David Bau. 2023. Mass editing memory in a transformer. The Eleventh International Conference on Learning Representations (ICLR).
|
| 145 |
+
Yuxian Meng, Xiaoya Li, Xiayu Zheng, Fei Wu, Xiaofei Sun, Tianwei Zhang, and Jiwei Li. 2022b. Fast nearest neighbor machine translation. In *Findings of the Association for Computational Linguistics: ACL* 2022, pages 555-565, Dublin, Ireland. Association for Computational Linguistics.
|
| 146 |
+
R OpenAI. 2023. Gpt-4 technical report. arXiv, pages 2303-08774.
|
| 147 |
+
Jeff Pan, Simon Razniewski, Jan-Christoph Kalo, Sneha Singhania, Jiaoyan Chen, Stefan Dietze, Hajira Jabeen, Janna Omeliyanenko, Wen Zhang, Matteo Lissandrini, et al. 2023. Large language models and knowledge graphs: Opportunities and challenges. Transactions on Graph Data and Knowledge.
|
| 148 |
+
Guilherme Penedo, Quentin Malartic, Daniel Hesslow, Ruxandra Cojocaru, Alessandro Cappelli, Hamza Alobeidli, Baptiste Pannier, Ebtesam Almazrouei, and Julien Launay. 2023. The RefinedWeb dataset for Falcon LLM: outperforming curated corpora with web data, and web data only. arXiv preprint arXiv:2306.01116.
|
| 149 |
+
Fabio Petroni, Tim Rocttäschel, Sebastian Riedel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, and Alexander Miller. 2019. Language models as knowledge bases? In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2463-2473.
|
| 150 |
+
Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro,
|
| 151 |
+
|
| 152 |
+
Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.
|
| 153 |
+
Peng Wang, Ningyu Zhang, Xin Xie, Yunzhi Yao, Bozhong Tian, Mengru Wang, Zekun Xi, Siyuan Cheng, Kangwei Liu, Guozhou Zheng, et al. 2023a. EASYEDIT: An easy-to-use knowledge editing framework for large language models. arXiv preprint arXiv:2308.07269.
|
| 154 |
+
Yun-Cheng Wang, Xiou Ge, Bin Wang, and C-C Jay Kuo. 2022. Kgboost: A classification-based knowledge base completion method with negative sampling. Pattern Recognition Letters, 157:104-111.
|
| 155 |
+
Yun-Cheng Wang, Xiou Ge, Bin Wang, and C-C Jay Kuo. 2023b. Asyncet: Asynchronous learning for knowledge graph entity typing with auxiliary relations. arXiv preprint arXiv:2308.16055.
|
| 156 |
+
Yun Cheng Wang, Xiou Ge, Bin Wang, and C-C Jay Kuo. 2023c. Greenkgc: A lightweight knowledge graph completion method. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10596-10613.
|
| 157 |
+
Chengwei Wei, Yun-Cheng Wang, Bin Wang, C-C Jay Kuo, et al. 2023. An overview of language models: Recent developments and outlook. APSIPA Transactions on Signal and Information Processing, 13(2).
|
| 158 |
+
Yunzhi Yao, Peng Wang, Bozhong Tian, Siyuan Cheng, Zhoubo Li, Shumin Deng, Huajun Chen, and Ningyu Zhang. 2023. Editing large language models: Problems, methods, and opportunities. arXiv preprint arXiv:2305.13172.
|
| 159 |
+
Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, and Yongqiang Ma. 2024. Llamafactory: Unified efficient fine-tuning of $100+$ language models. arXiv preprint arXiv:2403.13372.
|
| 160 |
+
Zexuan Zhong, Zhengxuan Wu, Christopher D Manning, Christopher Potts, and Danqi Chen. 2023. MQuAKE: Assessing knowledge editing in language models via multi-hop questions. arXiv preprint arXiv:2305.14795.
|
| 161 |
+
|
| 162 |
+
# A Dataset statistics
|
| 163 |
+
|
| 164 |
+
# A.1 MQUAKE-T dataset experiments
|
| 165 |
+
|
| 166 |
+
We primarily use the MQUAKE-T dataset which contains temporal-based real-world knowledge updates to compare the performance of different fin-tuning techniques with baseline methods on the performance of KE. The goal is to validate whether PEFT approaches such as LoRA and P-tuning can be an effective approach for performing KE. We also demonstrate that PEFT approaches can be more effective than the locate-and-edit approaches for multi-hop question answering.
|
| 167 |
+
|
| 168 |
+
In this dataset, each temporal fact edit example is also associated with multi-hop questions, which allows us to assess the complex query answering ability of the post-edit model. The MQUAKE-T dataset was constructed by taking the difference between two data dumps of Wikidata: 2021-04 and 2023-04. MQUAKE-T selects 6 different relations that most likely correspond to real fact changes. The statistics of the dataset are shown in Table 4.
|
| 169 |
+
|
| 170 |
+
<table><tr><td>MQUAKE-T</td><td>#Examples</td></tr><tr><td>Unique edits</td><td>96</td></tr><tr><td>2-hop questions</td><td>75</td></tr><tr><td>3-hop questions</td><td>348</td></tr><tr><td>4-hop questions</td><td>567</td></tr></table>
|
| 171 |
+
|
| 172 |
+
Comparing with baselines. In Table 5, we compare the editwise performance of fine-tuning techniques with locate-and-edit baseline methods. We use LLaMA-7B (Touvron et al., 2023) as the base model for both the baseline locate-and-edit techniques and fine-tuning techniques. Experimental results show that fine-tuning techniques performs better than the locate-and-edit baselines, while the run-time to complete all the knowledge edit is significantly shorter. In Table 6, we compare the performance of different post-edit model and approach for multi-hop QA.
|
| 173 |
+
|
| 174 |
+
LoRA ablation and parameter study. We perform ablation study of applying LoRA adaptation to different weight matrices in the self-attention module $W_{q}, W_{v}, W_{k}, W_{o}$ . The results are shown in Table 7. Results show that applying LoRA adaptation to the query matrix $W_{q}$ and the key matrix $W_{k}$ gives the best result. We also evaluate the knowledge edit success rate when the LoRA rank is set to different values. In our experiment, we tested $r = \{4, 8, 16, 32, 64\}$ as shown in Fig. 5, and discover that the optimal rank is $r = 32$ .
|
| 175 |
+
|
| 176 |
+
# A.2 CHRONOEDIT dataset
|
| 177 |
+
|
| 178 |
+
In the new dataset, we set the time threshold to 2022-01-01 and collect new knowledge statements
|
| 179 |
+
|
| 180 |
+
Table 4: Statistics of MQUAKE-T dataset.
|
| 181 |
+
|
| 182 |
+
<table><tr><td>Method</td><td>Edit Accuracy</td><td>Runtime</td></tr><tr><td>ROME</td><td>92.51</td><td>2h32m2s</td></tr><tr><td>MEMIT</td><td>96.44</td><td>2h48m49s</td></tr><tr><td>LoRA</td><td>99.36</td><td>2m13s</td></tr><tr><td>P-tuning</td><td>97.75</td><td>1m51s</td></tr><tr><td>Freeze-tuning</td><td>100</td><td>3m16s</td></tr><tr><td>Full fine-tuning</td><td>99.83</td><td>8m18s</td></tr></table>
|
| 183 |
+
|
| 184 |
+
Table 5: Editwise performance on LLaMA-7B.
|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
Figure 5: Performance of LoRA at different ranks for the MQUAKE-T multi-hop dataset with LLaMA-7B.
|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
Figure 6: Comparing Reliability performance of LSTM and MLP encoders across epochs when using P-tuning for LLaMA-7B.
|
| 191 |
+
|
| 192 |
+

|
| 193 |
+
Figure 7: Comparing Reliability performance for different number of tokens when using P-tuning for LLaMA-7B.
|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
|
| 199 |
+

|
| 200 |
+
Figure 8: Reliability, Generalization, and Locality performance of different fine-tuning methods across epochs for LLaMA-7B.
|
| 201 |
+
|
| 202 |
+

|
| 203 |
+
|
| 204 |
+
<table><tr><td>Base Model</td><td>KE Type</td><td>KE Method</td><td>Multi-hop QA Acc</td></tr><tr><td rowspan="6">LLaMA-7B</td><td rowspan="2">Locate-and-edit</td><td>ROME</td><td>38.5</td></tr><tr><td>MEMIT</td><td>39.3</td></tr><tr><td rowspan="2">Additional parameter</td><td>P-tuning</td><td>14.7</td></tr><tr><td>LORA</td><td>62.6</td></tr><tr><td rowspan="2">Direct fine-tune</td><td>Freeze tuning</td><td>72.5</td></tr><tr><td>Full FT</td><td>71.0</td></tr><tr><td>Vicuna-7B</td><td rowspan="3">Memory-based</td><td rowspan="3">Mello</td><td>30.7</td></tr><tr><td>GPT-J</td><td>51.3</td></tr><tr><td>GPT-3</td><td>85.5</td></tr></table>
|
| 205 |
+
|
| 206 |
+
that are valid after that time. We collect both knowledge modification: $(s,r,o)\to (s,r,o^{\prime})$ , and knowledge injection: $(s,r,\emptyset)\rightarrow (s,r,o^{\prime})$ . The statistics of the dataset are shown in Fig. 2. An example of fact pairs from the KG that could lead to time-sensitive knowledge edits is shown in Table 8. We convert such fact pairs to question answering and instruction finetuning examples for training. The corresponding sentence completion examples for reliability evaluation, rephrased QA examples for generalization evaluation, and invariant knowl
|
| 207 |
+
|
| 208 |
+
Table 6: Performance on post-edit model on multi-hop questions for LLaMA-7B.
|
| 209 |
+
|
| 210 |
+
<table><tr><td>Linear Layer</td><td>Edit Accuracy</td></tr><tr><td>Wq</td><td>71.47</td></tr><tr><td>Wv</td><td>97.48</td></tr><tr><td>Wq, Wv</td><td>98.67</td></tr><tr><td>Wq, Wv, Wk, Wo</td><td>97.56</td></tr></table>
|
| 211 |
+
|
| 212 |
+
Table 7: Ablation studies of the layers in LLaMA-7B that LoRA is applied to.
|
| 213 |
+
|
| 214 |
+
edge sentence completion examples for locality evaluation are shown in Table 9.
|
| 215 |
+
|
| 216 |
+
LoRA and Freeze tuning ablation and parameter study. In Fig. 8, we evaluate the performance of different fine-tuning configurations across different epochs. In particular, we evaluate the Reliability and Generalization using the accuracy which is the ratio of Exact Matching (EM) and we report the ROUGE-1 score for Locality. For LoRA, we experiment with three settings: applying LoRA to self-attention weights (LoRA Attention), applying LoRA to MLP weights (LoRA MLP), and applying LoRA to both self-attention and MLP weights (LoRA MLP Attention). In this set of experiments,
|
| 217 |
+
|
| 218 |
+
<table><tr><td>Organization</td><td>CEO</td><td>Start Time</td><td>End Time</td></tr><tr><td>Volkswagen Group</td><td>Herbert Diess</td><td>+2018-04-00T00:00:00Z_MONTH</td><td>+2022-08-31T00:00:00Z_DAY</td></tr><tr><td>Volkswagen Group</td><td>Oliver Blume</td><td>+2022-09-01T00:00:00Z_DAY</td><td></td></tr></table>
|
| 219 |
+
|
| 220 |
+
Table 8: Example of locating the knowledge edit data
|
| 221 |
+
|
| 222 |
+
<table><tr><td>Examples</td><td></td></tr><tr><td rowspan="2">Train</td><td>{"instruction": "Who is the current chief executive officer of Volkswagen Group?", "input": "", "output": "Oliver Blume." }</td></tr><tr><td>{"instruction": "Update the following statement about the current chief executive officer of Volkswagen Group.", "input": "Herbert Diess.", "output": "Oliver Blume." }</td></tr><tr><td>Test (REL)</td><td>{"instruction": "The current chief executive officer of Volkswagen Group is", "input": "", "output": "Oliver Blume." }</td></tr><tr><td>Rephrase (GEN)</td><td>{"instruction": "What is the name of the current Volkswagen Group CEO?", "input": "", "output": "Oliver Blume." }</td></tr><tr><td>Invariant (LOC)</td><td>{"instruction": "The headquarters of Volkswagen Commercial Vehicles is in?", "input": "", "output": "Hanover." }</td></tr></table>
|
| 223 |
+
|
| 224 |
+
Table 9: Fine-tuning and testing examples.
|
| 225 |
+
|
| 226 |
+
we apply LoRA to all layers. For freeze tuning, we fine-tune the MLP weights of the last 5 layers of the LLaMA model. Results show that applying LoRA to MLP weights is more effective in memorizing new facts than applying LoRA to self-attention weights. While freeze tuning can also effectively have the knowledge update induced into the model, the Locality score for freeze tuning is lower than the LoRA MLP setting, which means freeze tuning leads to deterioration of the LLM's existing invariant knowledge.
|
| 227 |
+
|
| 228 |
+
P-tuning ablation and parameter study. Although P-tuning can be equally effective for KE, we find that it requires more epochs of fine-tuning to ensure successful knowledge edits. The required time to perform knowledge edits becomes longer. In Fig. 6, we compare the performance difference between LSTM and MLP encoders across different epochs when using the P-tuning technique, when the number of prompt embedding tokens is set to $n = 20$ . We observe that the application of LSTM encoder allows P-tuning edit performance to converge faster than when using the MLP encoder. In Fig. 7, we instead compare the performance of
|
| 229 |
+
|
| 230 |
+
P-tuning when different number of prompt embedding tokens are used. Using more than $n = 20$ tokens do not seem to give a significant advantage in the edit accuracy.
|
| 231 |
+
|
| 232 |
+
Fine-grained performance analysis of time-invariant knowledge. For the KE experiment of using LoRA on MLP layers of LLaMA-7B, we perform a fine-grained performance analysis of the different type of time-invariant knowledge and list the performance in Table 10. We make a conjecture that those time-invariant knowledge with smaller valid candidate set for the target, such as "language" or "capital", tends to be well retained. These predicates are mostly 1-to-1 or N-to-1. In contrast, when the cardinality of the valid candidate set becomes larger, often for N-to-N predicates, such as "twin city" and "music label", the exact subject, object association becomes harder to retain.
|
| 233 |
+
|
| 234 |
+
Implementation details. Experiments were conducted on a compute node with 8 NVIDIA Tesla A100 GPUs, each with 40GB memory. We develop the fine-tuning pipeline based on LLaMA-Factory $^2$
|
| 235 |
+
|
| 236 |
+
<table><tr><td>Best 3</td><td>ROUGE-1</td></tr><tr><td>native language of</td><td>70.2</td></tr><tr><td>official language of</td><td>61.7</td></tr><tr><td>Capital of</td><td>58.7</td></tr><tr><td>Worst 3</td><td>ROUGE-1</td></tr><tr><td>twin cities</td><td>1.55</td></tr><tr><td>is a</td><td>5.68</td></tr><tr><td>is represented by music label</td><td>9.47</td></tr></table>
|
| 237 |
+
|
| 238 |
+
Table 10: Performance on different type of invariant knowledge.
|
| 239 |
+
|
| 240 |
+
<table><tr><td>Parameter</td><td>Value</td></tr><tr><td>layers</td><td>[5]</td></tr><tr><td>fact_token</td><td>subject_last</td></tr><tr><td>v_num_grad_steps</td><td>25</td></tr><tr><td>v_lr</td><td>5e-1</td></tr><tr><td>v_loss_layer</td><td>31</td></tr><tr><td>v_weight Decay</td><td>1e-3</td></tr><tr><td>clamp_norm_factor</td><td>4</td></tr><tr><td>kl_factor</td><td>0.0625</td></tr><tr><td>mom2 Adjustment</td><td>false</td></tr><tr><td>context_template_length.params</td><td>[[5, 10], [10, 10]]</td></tr><tr><td>rewriteModule_tmp</td><td>model.layers..mlp.down_proj</td></tr><tr><td>layerModule_tmp</td><td>model.layers.</td></tr><tr><td>mlx_module_tmp</td><td>model.layers..mlp</td></tr><tr><td>attnModule_tmp</td><td>model.layers..self_attn</td></tr><tr><td>ln_f_module</td><td>model(norm</td></tr><tr><td>lm_headModule</td><td>lm_head</td></tr><tr><td>mom2_dataset</td><td>wikipedia</td></tr><tr><td>mom2_n_samples</td><td>100000</td></tr><tr><td>mom2_dtype</td><td>float32</td></tr></table>
|
| 241 |
+
|
| 242 |
+
Table 11: ROME Configuration Parameters.
|
| 243 |
+
|
| 244 |
+
<table><tr><td>Parameter</td><td>Value</td></tr><tr><td>layers</td><td>[4, 5, 6, 7, 8]</td></tr><tr><td>clamp_norm_factor</td><td>4</td></tr><tr><td>layer_selection</td><td>all</td></tr><tr><td>fact_token</td><td>subject_last</td></tr><tr><td>v_num_grad_steps</td><td>25</td></tr><tr><td>v_lr</td><td>5e-1</td></tr><tr><td>v_loss_layer</td><td>31</td></tr><tr><td>v_weight Decay</td><td>1e-3</td></tr><tr><td>kl_factor</td><td>0.0625</td></tr><tr><td>mom2 Adjustment</td><td>true</td></tr><tr><td>mom2_Update_weight</td><td>15000</td></tr><tr><td>rewrite_module_tmp</td><td>model.layers..mlp.down_proj</td></tr><tr><td>layer_module_tmp</td><td>model.layers.</td></tr><tr><td>mlx_module_tmp</td><td>model.layers..mlp</td></tr><tr><td>attn_module_tmp</td><td>model.layers..self[attn</td></tr><tr><td>ln_f_module</td><td>model(norm</td></tr><tr><td>lm_head_module</td><td>lm_head</td></tr><tr><td>mom2>dataset</td><td>wikipedia</td></tr><tr><td>mom2_n_samples</td><td>100000</td></tr><tr><td>mom2_dtype</td><td>float32</td></tr></table>
|
| 245 |
+
|
| 246 |
+
Table 12: MEMIT Configuration Parameters.
|
| 247 |
+
|
| 248 |
+
(Zheng et al., 2024) and refer to PEFT package in HuggingFace<sup>3</sup> for the implementation of LoRA and P-tuning. We use EasyEdit<sup>4</sup> (Wang et al., 2023a)
|
| 249 |
+
|
| 250 |
+
to reproduce the ROME and MEMIT fine-tuning baseline results.
|
| 251 |
+
|
| 252 |
+
For results in Table 1, the 7 different relations that we evaluate on are 'captain', 'CEO', 'chairperson', 'head coach', 'head of govt', 'head of state', 'headquarter location'. The reason for the performance comparison of the smaller subset is to conduct similar experiments that were done in (Zhong et al., 2023). For LoRA, Freeze tuning, Full finetuning, we fine-tune the base model for 10 epochs, whereas for P-tuning, we fine-tune 800 epochs to achieve the optimal performance. Full fine-tuning of the base model requires DeepSpeed ZeRO-3 offload. In LoRA experiments, the LoRA rank is set to $r = 32$ , and MLP means applying LoRA to $W_{gate}$ , $W_{up}$ , $W_{down}$ matrices, and Attn means to apply LoRA to $W_{q}$ , $W_{k}$ , $W_{v}$ , $W_{o}$ matrices. In P-tuning experiments, the number of prompt tokens is set of $n = 20$ . In the MLP encoder, there are 3 linear layers with ReLU activation in between. In the LSTM encoder, a bidirectional LSTM is used and the output is passed to 2 linear layers with ReLU activation in between. For all the above experiments, we used the AdamW optimizer and set the learning rate to $5e - 5$ , per device train batch size to 4, gradient accumulation steps to 4. For the ROME and MEMIT baselines, we used the default hyperparameter settings provided in EasyEdit, shown in Table 11 and 12.
|
| 253 |
+
|
| 254 |
+
For the knowledge modification and knowledge injection experiments in Table 2, we oversample each knowledge injection samples four times due to the limited number of training examples, as generating an update example for knowledge injection is not possible. The hyperparameter settings are kept the same as above.
|
timesensitiveknowledgeeditingthroughefficientfinetuning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f67f2785b68e930bd82bfd0398d0c15bb311bdc88c30f8c1853b1a98131ca88
|
| 3 |
+
size 654980
|
timesensitiveknowledgeeditingthroughefficientfinetuning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ae0ff4127f3b3793a82609cfea9c68b49022006cf9da8d5efa87f61ad4f0aa17
|
| 3 |
+
size 316944
|
towardsartworkexplanationinlargescalevisionlanguagemodels/b7415f18-9f5f-4e4c-adfc-34dd1496a6ef_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b8a4e13111e5eb4130311d5f5e5bc67a054ff595494e2e480118fdf1c93c8a20
|
| 3 |
+
size 170369
|
towardsartworkexplanationinlargescalevisionlanguagemodels/b7415f18-9f5f-4e4c-adfc-34dd1496a6ef_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3607415f1db032460d4d69e649538a1f598df7e538e6d711bf5213f5c4fd4a89
|
| 3 |
+
size 196758
|
towardsartworkexplanationinlargescalevisionlanguagemodels/b7415f18-9f5f-4e4c-adfc-34dd1496a6ef_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:199973ed23205174705d8c0b23af458d9711391dd44f2171543a5ca59573aa2d
|
| 3 |
+
size 2367457
|
towardsartworkexplanationinlargescalevisionlanguagemodels/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
towardsartworkexplanationinlargescalevisionlanguagemodels/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:207ac81970fb681f762b15b911f7bd5b4e514f9d08cde47a3789320b9994d5e7
|
| 3 |
+
size 2271518
|
towardsartworkexplanationinlargescalevisionlanguagemodels/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e284554c923895b8e39308ccd7a3ce064649bf6420ac5750d0e21a631c2c61b6
|
| 3 |
+
size 552818
|
twoissueswithchinesespellingcorrectionandarefinementsolution/8aaa89ff-b6c1-4ed9-9353-20b32721bbe9_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cbc3f214df1bbad5c0d9f5211d3d99dc8ec7cd05911069a9211261b5c9dfbbc3
|
| 3 |
+
size 53085
|
twoissueswithchinesespellingcorrectionandarefinementsolution/8aaa89ff-b6c1-4ed9-9353-20b32721bbe9_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e2677893901bc7e9ff55cbc202426db20ac3daea725c3562ab559ce50d32a8f
|
| 3 |
+
size 63287
|
twoissueswithchinesespellingcorrectionandarefinementsolution/8aaa89ff-b6c1-4ed9-9353-20b32721bbe9_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:772f3b5c9804b1967fab8e43c4573a0bcf7f6106dd06ae9b766b25767397d736
|
| 3 |
+
size 1042302
|
twoissueswithchinesespellingcorrectionandarefinementsolution/full.md
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Two Issues with Chinese Spelling Correction and A Refinement Solution
|
| 2 |
+
|
| 3 |
+
Changxuan Sun Linlin She Xuesong Lu*
|
| 4 |
+
|
| 5 |
+
School of Data Science and Engineering
|
| 6 |
+
|
| 7 |
+
East China Normal University
|
| 8 |
+
|
| 9 |
+
{changxuansun@stu,linlinshe123@stu,xslu@dase}.ecnu.edu.cn
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
The Chinese Spelling Correction (CSC) task aims to detect and correct misspelled characters in Chinese text, and has received lots of attention in the past few years. Most recent studies adopt a Transformer-based model and leverage different features of characters such as pronunciation, glyph and contextual information to enhance the model's ability to complete the task. Despite their state-of-the-art performance, we observe two issues that should be addressed to further advance the CSC task. First, the widely-used benchmark datasets SIGHAN13, SIGHAN14 and SIGHAN15, contain many mistakes. Hence the performance of existing models is not accurate and should be re-evaluated. Second, existing models seem to have reached a performance bottleneck, where the improvements on the SIGHAN's testing sets are increasingly smaller and unstable. To deal with the two issues, we make two contributions: (1) we manually fix the SIGHAN datasets and re-evaluate four representative CSC models using the fixed datasets; (2) we analyze the new results to identify the spelling errors that none of the four models successfully corrects, based on which we propose a simple yet effective refinement solution. Experimental results show that our solution improves the four models in all metrics by notable margins.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Chinese Spelling Correction (CSC) aims to detect and correct misspelled characters in Chinese text. The task is challenging yet important, being used in various NLP applications such as search engines (Martins and Silva, 2004), optical character recognition (Afli et al., 2016) and international Chinese education (Liu et al., 2011). To solve the task, recent studies have employed Transformer (Vaswani et al., 2017) or BERT (Kenton
|
| 18 |
+
|
| 19 |
+
and Toutanova, 2019) as the base model and incorporated rich semantic features of characters to promote performance (Cheng et al., 2020; Liu et al., 2021; Xu et al., 2021; Li et al., 2022a; Liu et al., 2022; Liang et al., 2023; Huang et al., 2023).
|
| 20 |
+
|
| 21 |
+
Despite the promising results, we observe two issues with the current research for CSC. First, the widely-used benchmark datasets, SIGHAN13 (Wu et al., 2013), SIGHAN14 (Yu et al., 2014) and SIGHAN15 (Tseng et al., 2015), contain many mistakes, most of which are the meaningless sentences and the spelling errors in the target sentences. The former are the common mistakes made by Chinese beginners, as the SIGHAN datasets are collected from the Chinese essay section of Test for foreigners. These mistakes make the meaning of the sentences unclear and may affect the correction of spelling errors. The latter are the spelling errors that were not identified by the Chinese teachers in the test. Specifically, it is known that SIGHAN13 contains many misuses of “的”, “地” and “得” in the target sentences. These mistakes definitely affect the accuracy of the evaluation results. Surprisingly, previous studies have never attempted to fix the mistakes to better evaluate their models. Second, recent models seem to have reached a performance bottleneck on the SIGHAN’s testing sets, as evidenced by the increasingly smaller and unstable improvements (i.e., a newly proposed model does not perform better in all metrics) in the evaluation metrics. For instance, SCOPE (Li et al., 2022a) performs worse than MLM-phonetics (Zhang et al., 2021) in detection recall and correction recall on SIGHAN14 and performs worse than REALISE (Xu et al., 2021) in detection precision and correction precision on SIGHAN15. Furthermore, SCOPE combined with DR-CSC (Huang et al., 2023) improves SCOPE by only around 1 point in all metrics and also performs worse than comparative models in several metrics on SIGHAN13 and SIGHAN14. While
|
| 22 |
+
|
| 23 |
+
these models are focused on different aspects of spelling errors, we speculate the reason is that there exist certain errors which none of them can stably correct.
|
| 24 |
+
|
| 25 |
+
To tackle the two issues, we make two contributions in this paper. First, we examine the SIGHAN datasets sentence by sentence and fix all possible mistakes. Then, we retrain four representative CSC models using the fixed datasets and re-evaluate their performance. Second, we analyze the evaluation results and identify the spelling errors that none of the models successfully corrects, based on which we propose a simple solution to refine the output of the models without training. Experimental results show that our simple solution improves the four models in all metrics by notable margins.
|
| 26 |
+
|
| 27 |
+
# 2 Fixing SIGHAN and Re-evaluating Four Models
|
| 28 |
+
|
| 29 |
+
<table><tr><td colspan="2">Type 1: meaningless sentences</td></tr><tr><td>Original</td><td>连忙我都没有时间跟父母见面!
|
| 30 |
+
Quickly I don’t even have time to meet my par-ents!</td></tr><tr><td>Fixed</td><td>忙得我都没有时间跟父母见面!
|
| 31 |
+
I’m so busy that I don’t even have time to meet
|
| 32 |
+
my parents!</td></tr><tr><td colspan="2">Type 2: spelling errors in target sentences</td></tr><tr><td>Original</td><td>很多伪心的路,在我们面前挥手。</td></tr><tr><td>Fixed</td><td>Many roads of false hearts wave in front of us.
|
| 33 |
+
很多违心的路,在我们面前挥手。
|
| 34 |
+
Many roads against our will wave in front of us.</td></tr><tr><td colspan="2">Type 3: unconverted traditional Chinese characters</td></tr><tr><td>Original</td><td>一张又一张地念著,
|
| 35 |
+
Read one page after another,</td></tr><tr><td>Fixed</td><td>一张又一张地念着,
|
| 36 |
+
Read one page after another,</td></tr></table>
|
| 37 |
+
|
| 38 |
+
Two authors of the paper independently examine the SIGHAN datasets and identify the sentences with mistakes. Then they review each identified sentence and discuss whether it should be fixed and how to fix it. To ensure the accuracy of fixing, we fix the datasets in two rounds and both rounds take the same steps. First, we examine the fluency of the sentences and identify those that are meaningless. In this case, both a source sentence and the corresponding target sentence need to be fixed, and the spelling errors remain unchanged. Second, we identify the spelling errors in the target sentences.
|
| 39 |
+
|
| 40 |
+
Third, we identify the traditional Chinese characters that are not converted into simplified ones by OpenCC<sup>1</sup> in both source and target sentences. Table 1 shows the example sentences with mistakes and the corresponding fixes. More examples are presented in Table 6 of the appendix.
|
| 41 |
+
|
| 42 |
+
Table 2 shows the statistics of fixes for the three datasets as well as the original statistics. The numbers in the parentheses are the numbers of sentences with spelling errors. Note that the rows indicated by "Fixed" show the statistics for the fixed sentences only. We observe that all three datasets have a considerable number of lines $^2$ fixed, with many spelling errors including the newly-identified errors indicated in the square brackets. Note that a new spelling error is identified when a spelling error in a target sentence is fixed. That is, the numbers in the square brackets are the numbers of spelling errors in the target sentences of the original SIGHAN datasets.
|
| 43 |
+
|
| 44 |
+
Table 1: Some examples of different mistake types and the corresponding fixes.
|
| 45 |
+
|
| 46 |
+
<table><tr><td colspan="2">Training Data</td><td>#Lines</td><td>avgLength</td><td>#Errors</td></tr><tr><td rowspan="2">SIGHAN13</td><td>Original</td><td>700 (340)</td><td>41.8</td><td>343</td></tr><tr><td>Fixed</td><td>247 (117)</td><td>44.5</td><td>234 [114]</td></tr><tr><td rowspan="2">SIGHAN14</td><td>Original</td><td>3437 (3358)</td><td>49.6</td><td>5122</td></tr><tr><td>Fixed</td><td>1280 (1197)</td><td>55.1</td><td>2360 [273]</td></tr><tr><td rowspan="2">SIGHAN15</td><td>Original</td><td>2338 (2273)</td><td>31.3</td><td>3037</td></tr><tr><td>Fixed</td><td>675 (634)</td><td>36.9</td><td>1113 [172]</td></tr><tr><td colspan="2">Testing Data</td><td>#Lines</td><td>avgLen</td><td>#Errors</td></tr><tr><td rowspan="2">SIGHAN13</td><td>Original</td><td>1000 (966)</td><td>74.3</td><td>1224</td></tr><tr><td>Fixed</td><td>569 (551)</td><td>79.1</td><td>1149 [407]</td></tr><tr><td rowspan="2">SIGHAN14</td><td>Original</td><td>1062 (551)</td><td>50.0</td><td>771</td></tr><tr><td>Fixed</td><td>442 (305)</td><td>55.3</td><td>538 [147]</td></tr><tr><td rowspan="2">SIGHAN15</td><td>Original</td><td>1100 (569)</td><td>30.6</td><td>703</td></tr><tr><td>Fixed</td><td>357 (229)</td><td>35.1</td><td>337 [67]</td></tr></table>
|
| 47 |
+
|
| 48 |
+
Table 2: Summary statistics of the original datasets and the fixed parts.
|
| 49 |
+
|
| 50 |
+
Then, we select four representative CSC models and re-evaluate them on the fixed datasets, namely, PLOME (Liu et al., 2021), REALISE (Xu et al., 2021), LEAD (Li et al., 2022b) and SCOPE (Li et al., 2022a). The four models generally have the strongest performance among existing models according to the literature, and the authors have released the source code<sup>3</sup> that are easily run. For each
|
| 51 |
+
|
| 52 |
+
<table><tr><td colspan="2">Datasets & Models</td><td colspan="3">Detection</td><td colspan="3">Correction</td></tr><tr><td colspan="2">SIGHAN13</td><td>D-P</td><td>D-R</td><td>D-F</td><td>C-P</td><td>C-R</td><td>C-F</td></tr><tr><td rowspan="4">Original</td><td>PLOME</td><td>81.3</td><td>77.9</td><td>79.6</td><td>79.6</td><td>76.3</td><td>77.9</td></tr><tr><td>REALISE*</td><td>88.6</td><td>82.5</td><td>85.4</td><td>87.2</td><td>81.2</td><td>84.1</td></tr><tr><td>LEAD*</td><td>88.3</td><td>83.4</td><td>85.8</td><td>87.2</td><td>82.4</td><td>84.7</td></tr><tr><td>SCOPE*</td><td>87.4</td><td>83.4</td><td>85.4</td><td>86.3</td><td>82.4</td><td>84.3</td></tr><tr><td rowspan="4">Retrained</td><td>PLOME</td><td>76.7</td><td>74.5</td><td>75.5</td><td>75.0</td><td>72.9</td><td>73.9</td></tr><tr><td>REALISE</td><td>77.6</td><td>73.9</td><td>75.7</td><td>76.4</td><td>72.8</td><td>74.5</td></tr><tr><td>LEAD</td><td>78.0</td><td>74.6</td><td>76.3</td><td>76.4</td><td>73.0</td><td>74.4</td></tr><tr><td>SCOPE</td><td>65.4</td><td>61.9</td><td>63.6</td><td>63.6</td><td>60.2</td><td>61.9</td></tr><tr><td rowspan="8">Refined</td><td rowspan="2">PLOME</td><td>79.9</td><td>78.1</td><td>79.0</td><td>78.0</td><td>76.2</td><td>77.1</td></tr><tr><td>(↑3.2)</td><td>(↑3.6)</td><td>(↑3.5)</td><td>(↑3.0)</td><td>(↑3.3)</td><td>(↑3.2)</td></tr><tr><td rowspan="2">REALISE</td><td>80.6</td><td>77.5</td><td>79.0</td><td>79.4</td><td>76.3</td><td>77.8</td></tr><tr><td>(↑3.0)</td><td>(↑3.6)</td><td>(↑3.3)</td><td>(↑3.0)</td><td>(↑3.5)</td><td>(↑3.3)</td></tr><tr><td rowspan="2">LEAD</td><td>81.5</td><td>78.4</td><td>79.9</td><td>79.9</td><td>76.8</td><td>78.3</td></tr><tr><td>(↑3.5)</td><td>(↑3.8)</td><td>(↑3.6)</td><td>(↑3.5)</td><td>(↑3.8)</td><td>(↑3.9)</td></tr><tr><td rowspan="2">SCOPE</td><td>75.9</td><td>74.0</td><td>75.0</td><td>73.9</td><td>72.0</td><td>72.9</td></tr><tr><td>(↑10.5)</td><td>(↑12.1)</td><td>(↑11.4)</td><td>(↑10.3)</td><td>(↑11.8)</td><td>(↑11.0)</td></tr></table>
|
| 53 |
+
|
| 54 |
+
Table 3: The results on SIGHAN13. The asterisk * indicates the results are copied from the original paper.
|
| 55 |
+
|
| 56 |
+
<table><tr><td colspan="2">Datasets & Models</td><td colspan="3">Detection</td><td colspan="3">Correction</td></tr><tr><td colspan="2">SIGHAN14</td><td>D-P</td><td>D-R</td><td>D-F</td><td>C-P</td><td>C-R</td><td>C-F</td></tr><tr><td rowspan="4">Original</td><td>PLOME</td><td>73.5</td><td>70.0</td><td>71.7</td><td>71.5</td><td>68.0</td><td>69.7</td></tr><tr><td>REALISE*</td><td>67.8</td><td>71.5</td><td>69.6</td><td>66.3</td><td>70.0</td><td>68.1</td></tr><tr><td>LEAD*</td><td>70.7</td><td>71.0</td><td>70.8</td><td>69.3</td><td>69.6</td><td>69.5</td></tr><tr><td>SCOPE*</td><td>70.1</td><td>73.1</td><td>71.6</td><td>68.6</td><td>71.5</td><td>70.1</td></tr><tr><td rowspan="4">Retrained</td><td>PLOME</td><td>70.0</td><td>67.5</td><td>68.7</td><td>67.5</td><td>65.2</td><td>66.3</td></tr><tr><td>REALISE</td><td>74.4</td><td>67.7</td><td>70.9</td><td>72.2</td><td>65.7</td><td>68.8</td></tr><tr><td>LEAD</td><td>76.6</td><td>70.0</td><td>73.1</td><td>74.7</td><td>68.3</td><td>71.4</td></tr><tr><td>SCOPE</td><td>82.4</td><td>77.2</td><td>79.7</td><td>80.8</td><td>75.7</td><td>78.1</td></tr><tr><td rowspan="8">Refined</td><td rowspan="2">PLOME</td><td>71.6</td><td>69.5</td><td>70.5</td><td>69.5</td><td>67.5</td><td>68.5</td></tr><tr><td>(↑1.6)</td><td>(↑2.0)</td><td>(↑1.8)</td><td>(↑2.0)</td><td>(↑2.3)</td><td>(↑2.2)</td></tr><tr><td rowspan="2">REALISE</td><td>76.4</td><td>70.3</td><td>73.2</td><td>74.5</td><td>68.6</td><td>71.4</td></tr><tr><td>(↑2.0)</td><td>(↑2.6)</td><td>(↑2.3)</td><td>(↑2.3)</td><td>(↑2.9)</td><td>(↑2.6)</td></tr><tr><td rowspan="2">LEAD</td><td>77.9</td><td>72.2</td><td>75.0</td><td>76.5</td><td>70.9</td><td>73.6</td></tr><tr><td>(↑1.3)</td><td>(↑2.2)</td><td>(↑1.9)</td><td>(↑1.8)</td><td>(↑2.6)</td><td>(↑2.2)</td></tr><tr><td rowspan="2">SCOPE</td><td>83.5</td><td>79.0</td><td>81.2</td><td>81.9</td><td>77.7</td><td>79.7</td></tr><tr><td>(↑1.1)</td><td>(↑1.8)</td><td>(↑1.5)</td><td>(↑1.1)</td><td>(↑2.0)</td><td>(↑1.6)</td></tr></table>
|
| 57 |
+
|
| 58 |
+
model, we adopt the training settings in the original paper. We train each model four times with random seeds and report the average results on the testing sets. We use the widely-adopted sentence-level precision, recall and F1 (Wang et al., 2019) to evaluate the models, which are also used in their original papers. The evaluation is conducted on detection and correction sub-tasks. The results are reported in Table 3, 4 and 5, where the rows indicated by "Original" are the results on the original SIGHAN datasets, and the rows indicated by "Retrained" are the results of the models retrained using the fixed SIGHAN datasets. The "Original" results are all copied from the corresponding papers except for PLOME on SIGHAN13 and SIGHAN14. The authors have not reported the results which we have to reproduce.
|
| 59 |
+
|
| 60 |
+
Comparing the results of "Original" and "Re
|
| 61 |
+
|
| 62 |
+
Table 4: The results on SIGHAN14. The asterisk * indicates the results are copied from the original paper.
|
| 63 |
+
|
| 64 |
+
<table><tr><td colspan="2">Datasets & Models</td><td colspan="3">Detection</td><td colspan="3">Correction</td></tr><tr><td colspan="2">SIGHAN15</td><td>D-P</td><td>D-R</td><td>D-F</td><td>C-P</td><td>C-R</td><td>C-F</td></tr><tr><td rowspan="4">Original</td><td>PLOME*</td><td>77.4</td><td>81.5</td><td>79.4</td><td>75.3</td><td>79.3</td><td>77.2</td></tr><tr><td>REALISE*</td><td>77.3</td><td>81.3</td><td>79.3</td><td>75.9</td><td>79.9</td><td>77.8</td></tr><tr><td>LEAD*</td><td>79.2</td><td>82.8</td><td>80.9</td><td>77.6</td><td>81.2</td><td>79.3</td></tr><tr><td>SCOPE*</td><td>81.1</td><td>84.3</td><td>82.7</td><td>79.2</td><td>82.3</td><td>80.7</td></tr><tr><td rowspan="4">Retrained</td><td>PLOME</td><td>77.7</td><td>78.9</td><td>78.3</td><td>75.6</td><td>76.8</td><td>76.2</td></tr><tr><td>REALISE</td><td>86.0</td><td>82.9</td><td>84.4</td><td>84.1</td><td>81.0</td><td>82.5</td></tr><tr><td>LEAD</td><td>85.4</td><td>83.3</td><td>84.3</td><td>83.5</td><td>81.4</td><td>82.4</td></tr><tr><td>SCOPE</td><td>90.7</td><td>86.8</td><td>88.7</td><td>89.5</td><td>86.0</td><td>87.7</td></tr><tr><td rowspan="8">Refined</td><td rowspan="2">PLOME</td><td>78.8</td><td>79.9</td><td>79.4</td><td>76.4</td><td>77.5</td><td>77.0</td></tr><tr><td>(↑1.1)</td><td>(↑1.0)</td><td>(↑1.1)</td><td>(↑0.8)</td><td>(↑0.7)</td><td>(↑0.8)</td></tr><tr><td rowspan="2">REALISE</td><td>87.0</td><td>84.3</td><td>85.6</td><td>85.2</td><td>82.6</td><td>83.9</td></tr><tr><td>(↑1.0)</td><td>(↑1.4)</td><td>(↑1.2)</td><td>(↑1.1)</td><td>(↑1.6)</td><td>(↑1.4)</td></tr><tr><td rowspan="2">LEAD</td><td>86.2</td><td>84.5</td><td>85.3</td><td>84.2</td><td>82.6</td><td>83.4</td></tr><tr><td>(↑0.8)</td><td>(↑1.2)</td><td>(↑1.0)</td><td>(↑0.7)</td><td>(↑1.2)</td><td>(↑1.0)</td></tr><tr><td rowspan="2">SCOPE</td><td>91.5</td><td>88.2</td><td>89.8</td><td>90.4</td><td>87.2</td><td>88.8</td></tr><tr><td>(↑0.8)</td><td>(↑1.4)</td><td>(↑1.1)</td><td>(↑0.9)</td><td>(↑1.2)</td><td>(↑1.1)</td></tr></table>
|
| 65 |
+
|
| 66 |
+
Table 5: The results on SIGHAN15. The asterisk * indicates the results are copied from the original paper.
|
| 67 |
+
|
| 68 |
+
trained", we observe that the results are largely changed. On SIGHAN13, all results decrease drastically. This is mainly because the "Original" results are calculated after excluding "的", "地" and "得", since the targets are almost not correct, whereas the "Retrained" results are calculated on all spelling errors. This indicates the models can still not correct "的", "地" and "得" well, especially for SCOPE which has the largest performance drop. On SIGHAN14 and SIGHAN15, the results generally increase after the datasets are fixed. Based on the results, we suggest to use the fixed datasets for more accurate evaluation in the future.
|
| 69 |
+
|
| 70 |
+
An interesting observation is that the "Retrained" results generally show the models ranked by performance from high to low are SCOPE $^4$ , LEAD, REALISE and PLOME, which coincides with the "Original" results. This indicates that we have correctly retrained the models and the fixed SIGHAN can reflect their performance discrepancies.
|
| 71 |
+
|
| 72 |
+
# 3 A Refinement Solution using ChineseBERT
|
| 73 |
+
|
| 74 |
+
We extract the sentences from the testing sets that none of the four models successfully reproduces the target sentence, and analyze the reasons of failures. We observe three main types of failures. First, the models often fail to correct the particles “的”, “地” and “得” and the pronouns such as “他(们)”, “她(们)”, “它(们)”, “那” and
|
| 75 |
+
|
| 76 |
+
“哪”. Second, the models often fail to correct the spelling errors in special terms, including idioms, proverbs, proper nouns and other commonly-used expressions. Third, the models often make overcorrections.
|
| 77 |
+
|
| 78 |
+
Most of the above failures can be solved by inferring the correct character using the contextual information of the corresponding sentence. Based on the idea, we propose a simple refinement solution with ChineseBERT (Sun et al., 2021) on top of the output of the four models. Specifically, given a sentence output by any model, we mask the character pertaining to the above failure cases, and let ChineseBERT infer the new character without training. Then we measure the phonological distance between the masked character and the inferred character, where the distance is calculated as the edit distance between the pinyin (with tone) of the two characters. If the distance is below a threshold<sup>5</sup>, we keep the inferred character; otherwise, we keep the masked character. The intuition is that about $83\%$ spelling errors have similar pronunciation with the correct character (Liu et al., 2010), so if the inferred character has a very different pinyin than the masked character, it is unlikely to be the correct character. If there are multiple characters to mask in a sentence, we mask them one at a time and infer using ChineseBERT, from beginning to end. Once there is no character to mask, we stop the process and use the last output of ChineseBERT as the refined sentence. Note that if a sentence output by the above four models contains no character to mask, the sentence is the final output and the refinement process does not run.
|
| 79 |
+
|
| 80 |
+
The problem at hand is how to identify the characters to be masked. We design three strategies for the three failure types, respectively. First, we directly mask the particles "的", "地" and "得" and the pronouns "他", "她", "它", "那" and "哪". Second, for a special term with spelling errors, we notice that the $jieba^6$ tokenizer produces different tokens with and without the Hidden Markov Model (HMM). The former tends to regard it as a new word and the latter tends to tokenize it into single characters. Hence, for a sentence output by the above models, we use the two methods to tokenize it and regard the parts with different tokenization results as the special terms to mask. Note that this approach may mask phrases other than special
|
| 81 |
+
|
| 82 |
+
terms if there exist spelling errors. Third, to identify over-corrections, we calculate the edit distance between the pinyin (with tone) of the changed character and the original character in the source sentence. If the distance is above 3 as discussed in the last paragraph, we regard it as a potential over-correction and mask the character.
|
| 83 |
+
|
| 84 |
+
The results are presented in Table 3, 4 and 5, indicated by "Refined". We observe that after refinement, the performances of all the four models are improved by notable margins in all metrics on the three datasets, compared to the "Retrained" results. The results show our simple solution is very effective, even without training.
|
| 85 |
+
|
| 86 |
+
# 4 Related Work
|
| 87 |
+
|
| 88 |
+
Recent studies mainly adopt Transformer or BERT/ChineseBERT as the base model to solve the CSC task, and incorporate rich semantic features of the Chinese language to enhance the ability of the base model. For instance, Cheng et al. (2020) and Nguyen et al. (2021) use the confusion sets<sup>7</sup> to exclude unlikely candidates output by BERT. More studies such as Xu et al. (2021); Huang et al. (2021); Liu et al. (2021); Li et al. (2022a,b); Liang et al. (2023); Zhang et al. (2023); Wei et al. (2023) leverage phonological and/or visual features of characters to boost the performance. Studies like Zhang et al. (2020, 2021); Li et al. (2021); Zhu et al. (2022); Huang et al. (2023) adopt the detection-correction framework to increase the accuracy of identifying potential spelling errors. Other studies learn contextual information in sentences to detect and correct spelling errors (Guo et al., 2021; Wang et al., 2021; Liu et al., 2022; Li et al., 2022c).
|
| 89 |
+
|
| 90 |
+
# 5 Conclusion
|
| 91 |
+
|
| 92 |
+
In this work, we discuss two issues with the Chinese Spelling Correction task: the existence of mistakes in the SIGHAN datasets and the smaller and unstable improvements of new models. We manually fix the mistakes and re-evaluate four representative CSC models on the fixed datasets. We analyze the common types of failures of the models and propose a simple yet effective refinement solution. Experimental results show our solution can stably improve the base models in all metrics. While the current refinement solution is purely rule
|
| 93 |
+
|
| 94 |
+
based, in the future we will develop data-driven methods to further improve the performance.
|
| 95 |
+
|
| 96 |
+
# Limitations
|
| 97 |
+
|
| 98 |
+
There are two main limitations in the current work. First, the four models evaluated in the experiments belong to the category that incorporate phonological and visual features of Chinese characters. We choose them because they are reported in their papers to have the strongest performance among existing models and the source code are well maintained and released by the authors for reproducing and training. However, we should evaluate diverse models in the future, such as those using the detection-correction framework and those incorporating the contextual information. Second, our strategy to identify the characters in special terms and over-corrections to be masked is rule based and is not very accurate. For special terms with spelling errors, the identification depends on whether the jieba tokenizer with and without HMM yield different tokenization results. For over-corrections, we empirically identify them based on the edit distance between the pinyin (with tone) of a changed character and the original character. The threshold of the distance is set empirically and the visual distance is not considered, which is also the case for deciding whether to preserve the character inferred by ChineseBERT or not at the final output. While the current refinement solution is simple yet effective, we will explore more complex methods to further improve the accuracy of identifying the characters to be masked, as well as the final performance for CSC.
|
| 99 |
+
|
| 100 |
+
# Ethical Statement
|
| 101 |
+
|
| 102 |
+
The datasets and the models used in the current study are all released and authorized by the original authors for research purpose. These datasets contain neither identifying information nor any other ethical issues. The output of the models do not contain any violence, pornography or other inappropriate information. Hence, there is no ethical issue in the current study.
|
| 103 |
+
|
| 104 |
+
# Acknowledgement
|
| 105 |
+
|
| 106 |
+
This work is supported by the grant from the National Natural Science Foundation of China (Grant No. 62277017).
|
| 107 |
+
|
| 108 |
+
# References
|
| 109 |
+
|
| 110 |
+
Haithem Afli, Zhengwei Qiu, Andy Way, and Páraic Sheridan. 2016. Using smt forOCR error correction of historical texts. In 10th conference on International Language Resources and Evaluation (LREC'16), pages 962-966. European Language Resources Association.
|
| 111 |
+
Xingyi Cheng, Weidi Xu, Kunlong Chen, Shaohua Jiang, Feng Wang, Taifeng Wang, Wei Chu, and Yuan Qi. 2020. Spellgen: Incorporating phonological and visual similarities into language models for Chinese spelling check. In Proceedings of 58th Annual Meeting of the Association for Computational Linguistics, pages 871-881.
|
| 112 |
+
Zhao Guo, Yuan Ni, Keqiang Wang, Wei Zhu, and Guotong Xie. 2021. Global attention decoder for chinese spelling error correction. In *Findings of the Association for Computational Linguistics: ACL-IJCNLP* 2021, pages 1419-1428.
|
| 113 |
+
Haojing Huang, Jingheng Ye, Qingyu Zhou, Yinghui Li, Yangning Li, Feng Zhou, and Hai-Tao Zheng. 2023. A frustratingly easy plug-and-play detection-and-reasoning module for chinese spelling check. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 11514-11525.
|
| 114 |
+
Li Huang, Junjie Li, Weiwei Jiang, Zhiyu Zhang, Minchuan Chen, Shaojun Wang, and Jing Xiao. 2021. Phmospell: Phonological and morphological knowledge guided chinese spelling check. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 5958-5967.
|
| 115 |
+
Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of NAACL-HLT, pages 4171-4186.
|
| 116 |
+
Jiahao Li, Quan Wang, Zhendong Mao, Junbo Guo, Yanyan Yang, and Yongdong Zhang. 2022a. Improving Chinese spelling check by character pronunciation prediction: The effects of adaptivity and granularity. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 4275-4286.
|
| 117 |
+
Jing Li, Gaosheng Wu, Dafei Yin, Haozhao Wang, and Yonggang Wang. 2021. Dcspell: A detector-corrector framework for chinese spelling error correction. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 1870-1874.
|
| 118 |
+
Yinghui Li, Shirong Ma, Qingyu Zhou, Zhongli Li, Li Yangning, Shulin Huang, Ruiyang Liu, Chao Li, Yunbo Cao, and Haitao Zheng. 2022b. Learning from the dictionary: Heterogeneous knowledge guided fine-tuning for chinese spell checking. In Findings of the Association for Computational Linguistics: EMNLP 2022, pages 238-249.
|
| 119 |
+
|
| 120 |
+
Yinghui Li, Qingyu Zhou, Yangning Li, Zhongli Li, Ruiyang Liu, Rongyi Sun, Zizhen Wang, Chao Li, Yunbo Cao, and Hai-Tao Zheng. 2022c. The past mistake is the future wisdom: Error-driven contrastive probability optimization for Chinese spell checking. In Findings of the Association for Computational Linguistics: ACL 2022, pages 3202-3213.
|
| 121 |
+
Zihong Liang, Xiaojun Quan, and Qifan Wang. 2023. Disentangled phonetic representation for chinese spelling correction. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pages 13509-13521.
|
| 122 |
+
C-L Liu, M-H Lai, K-W Tien, Y-H Chuang, S-H Wu, and C-Y Lee. 2011. Visually and phonologically similar characters in incorrect chinese words: Analyses, identification, and applications. ACM Transactions on Asian Language Information Processing (TALIP), 10(2):1-39.
|
| 123 |
+
Chao-Lin Liu, Min-Hua Lai, Yi-Hsuan Chuang, and Chia-Ying Lee. 2010. Visually and phonologically similar characters in incorrect simplified chinese words. In Coling 2010: Posters, pages 739-747.
|
| 124 |
+
Shulin Liu, Shengkang Song, Tianchi Yue, Tao Yang, Huihui Cai, TingHao Yu, and Shengli Sun. 2022. Craspell: A contextual typo robust approach to improve chinese spelling correction. In *Findings of the Association for Computational Linguistics: ACL* 2022, pages 3008-3018.
|
| 125 |
+
Shulin Liu, Tao Yang, Tianchi Yue, Feng Zhang, and Di Wang. 2021. Plome: Pre-training with misspelled knowledge for chinese spelling correction. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2991-3000.
|
| 126 |
+
Bruno Martins and Mário J Silva. 2004. Spelling correction for search engine queries. In Advances in Natural Language Processing: 4th International Conference, EsTAL 2004, Alicante, Spain, October 20-22, 2004. Proceedings 4, pages 372-383. Springer.
|
| 127 |
+
Minh Nguyen, Hoang Gia Ngo, and Nancy F Chen. 2021. Domain-shift conditioning using adaptable filtering via hierarchical embeddings for robust chinese spell check. IEEE/ACM Transactions on Audio, Speech, and Language Processing.
|
| 128 |
+
Zijun Sun, Xiaoya Li, Xiaofei Sun, Yuxian Meng, Xiang Ao, Qing He, Fei Wu, and Jiwei Li. 2021. Chinesebert: Chinese pretraining enhanced by glyph and pinyin information. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2065-2075.
|
| 129 |
+
|
| 130 |
+
Yuen-Hsien Tseng, Lung-Hao Lee, Li-Ping Chang, and Hsin-Hsi Chen. 2015. Introduction to sighan 2015 bake-off for chinese spelling check. In Proceedings of the Eighth SIGHAN Workshop on Chinese Language Processing, pages 32-37.
|
| 131 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems, 30.
|
| 132 |
+
Baoxin Wang, Wanxiang Che, Dayong Wu, Shijin Wang, Guoping Hu, and Ting Liu. 2021. Dynamic connected networks for Chinese spelling check. In *Findings of the Association for Computational Linguistics: ACL-IJCNLP* 2021, pages 2437-2446.
|
| 133 |
+
Dingmin Wang, Yi Tay, and Li Zhong. 2019. Confusionset-guided pointer networks for chinese spelling check. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5780-5785.
|
| 134 |
+
Xiao Wei, Jianbao Huang, Hang Yu, and Qian Liu. 2023. Ptcspell: Pre-trained corrector based on character shape and pinyin for chinese spelling correction. In Findings of the Association for Computational Linguistics: ACL 2023, pages 6330-6343.
|
| 135 |
+
Shih-Hung Wu, Chao-Lin Liu, and Lung-Hao Lee. 2013. Chinese spelling check evaluation at sighan bakeoff 2013. In Proceedings of the Seventh SIGHAN Workshop on Chinese Language Processing, pages 35-42.
|
| 136 |
+
Heng-Da Xu, Zhongli Li, Qingyu Zhou, Chao Li, Zizhen Wang, Yunbo Cao, Heyan Huang, and XianLing Mao. 2021. Read, listen, and see: Leveraging multimodal information helps chinese spell checking. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pages 716-728.
|
| 137 |
+
Liang-Chih Yu, Lung-Hao Lee, Yuen-Hsien Tseng, and Hsin-Hsi Chen. 2014. Overview of sighan 2014 bakeoff for chinese spelling check. In Proceedings of The Third CIPS-SIGHAN Joint Conference on Chinese Language Processing, pages 126-132.
|
| 138 |
+
Ruiqing Zhang, Chao Pang, Chuanqiang Zhang, Shuo-huan Wang, Zhongjun He, Yu Sun, Hua Wu, and Haifeng Wang. 2021. Correcting Chinese spelling errors with phonetic pre-training. In *Findings of the Association for Computational Linguistics: ACL-IJCNLP* 2021, pages 2250-2261.
|
| 139 |
+
Shaohua Zhang, Haoran Huang, Jicong Liu, and Hang Li. 2020. Spelling error correction with soft-masked bert. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 882-890.
|
| 140 |
+
Xiaotian Zhang, Yanjun Zheng, Hang Yan, and Xipeng Qiu. 2023. Investigating glyph-phonetic information for chinese spell checking: What works and what's
|
| 141 |
+
|
| 142 |
+
next? In Findings of the Association for Computational Linguistics: ACL 2023, Toronto, Canada, July 9-14, 2023, pages 1-13.
|
| 143 |
+
Chenxi Zhu, Ziqiang Ying, Boyu Zhang, and Feng Mao. 2022. Mdcspell: A multi-task detector-corrector framework for chinese spelling correction. In Findings of the Association for Computational Linguistics: ACL 2022, pages 1244-1253.
|
| 144 |
+
|
| 145 |
+
<table><tr><td>Type of mistake</td><td colspan="2">Example</td></tr><tr><td rowspan="7">Meaningless sentences</td><td>Original</td><td>大家也可怕你的工厂把自然破坏, People are also fright that your factory will destroy nature 大家也害怕你的工厂把自然破坏, People are also afraid that your factory will destroy nature 对我来说,在教室里录影小学生非常不好。</td></tr><tr><td>Fixed</td><td>For me, recording in the classroom is very bad primary school students.</td></tr><tr><td>Original</td><td>对我来说,在教室里录影对小学生非常不好。 For me, recording in the classroom is very bad for primary school students.</td></tr><tr><td>Fixed</td><td>所以我们今天免费提供饮料或点甜。 So we're offering a free drink or point sweet today 所以我们今天免费提供饮料或甜点。</td></tr><tr><td>Original</td><td>So we're offering a free drink or dessert today 有一天,有一个人以为我偷了的车子! One day, a man thought I had stolen car!</td></tr><tr><td>Fixed</td><td>有一天,有一个人以为我偷了他的车子! One day, a man thought I had stolen his car!</td></tr><tr><td>Fixed</td><td>有一天,有一个人以为我偷了他的车子! One day, a man thought I had stolen his car!</td></tr><tr><td rowspan="6">Spelling errors in target sentences</td><td>Original</td><td>拿到礼物的人不觉得使用,或一点儿都没有用处 The person who received the gift did not find it use or useful at all</td></tr><tr><td>Fixed</td><td>拿到礼物的人不觉得实用,或一点儿都没有用处 The person who received the gift did not find it useful or useful at all</td></tr><tr><td>Original</td><td>这件话以后对父母越来越感谢。 After this piece of sentence, I am more and more grateful to my parents.</td></tr><tr><td>Fixed</td><td>这句话以后对父母越来越感谢。 After this sentence, I am more and more grateful to my parents.</td></tr><tr><td>Original</td><td>这种作法并不能来解释问题。 This practise magic does not explain the problem.</td></tr><tr><td>Fixed</td><td>这种做法并不能来解释问题。 This approach does not explain the problem.</td></tr><tr><td rowspan="6">Unconverted traditional Chinese characters</td><td>Original</td><td>老师一来倒楣的一定是走廊、地板和黑板 When a teacher comes, it is always the corridor, the floor, and the blackboard get dump lintel</td></tr><tr><td>Fixed</td><td>老师一来倒霉的一定是走廊、地板和黑板 When a teacher comes, it is always the corridor, the floor, and the blackboard get bad luck</td></tr><tr><td>Original</td><td>可是公车没有座位所以他们站著说话。 But there were no seats on the bus so they stood book and talked.</td></tr><tr><td>Fixed</td><td>可是公车没有座位所以他们站着说话。 But there were no seats on the bus so they stood and talked.</td></tr><tr><td>Original</td><td>因为在那里有着各式各样、琳琅满目的书笈 Because there are all kinds of a box for books, dazzling eyes</td></tr><tr><td>Fixed</td><td>因为在那里有着各式各样、琳琅满目的书籍 Because there are all kinds of books, dazzling eyes</td></tr></table>
|
| 146 |
+
|
| 147 |
+
Table 6: More examples of different mistake types and the corresponding fixes.
|
| 148 |
+
|
| 149 |
+
<table><tr><td colspan="2">Datasets & Models</td><td colspan="3">Detection</td><td colspan="3">Correction</td></tr><tr><td colspan="2">SIGHAN13</td><td>D-P</td><td>D-R</td><td>D-F</td><td>C-P</td><td>C-R</td><td>C-F</td></tr><tr><td rowspan="4">Retrained</td><td>PLOME</td><td>81.3</td><td>77.9</td><td>79.6</td><td>79.6</td><td>76.3</td><td>77.9</td></tr><tr><td>REALISE</td><td>81.9</td><td>77.6</td><td>79.7</td><td>80.0</td><td>75.9</td><td>77.9</td></tr><tr><td>LEAD</td><td>84.7</td><td>79.8</td><td>82.2</td><td>82.3</td><td>77.6</td><td>79.9</td></tr><tr><td>SCOPE</td><td>81.8</td><td>78.1</td><td>80.0</td><td>80.0</td><td>76.4</td><td>78.1</td></tr></table>
|
| 150 |
+
|
| 151 |
+
Table 7: The retrained results on SIGHAN13, excluding “的”, “地” and “得”.
|
twoissueswithchinesespellingcorrectionandarefinementsolution/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:31272a7886686cf1fc0923e81ed29b27a92fa10d97cd45bf7f3c918f15f72267
|
| 3 |
+
size 571887
|
twoissueswithchinesespellingcorrectionandarefinementsolution/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:91b0c2c97ba51ff1d56fc207d6422cf1e1cb1b69c9f0248bc1870bda5a723baa
|
| 3 |
+
size 178901
|
ultrasparsebert99conditionallysparselanguagemodelling/7f72abc4-afd5-4cdd-826e-6f1657431a0a_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60ef7b96e3c90e6119f55f09fde757e7a46b1d71df9c389d8ad1b3b38822c43a
|
| 3 |
+
size 36630
|
ultrasparsebert99conditionallysparselanguagemodelling/7f72abc4-afd5-4cdd-826e-6f1657431a0a_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f158121ed754566a4feb72166c9e5e7ba3b00f5523d83db0d066050f9551bac2
|
| 3 |
+
size 44311
|
ultrasparsebert99conditionallysparselanguagemodelling/7f72abc4-afd5-4cdd-826e-6f1657431a0a_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:45c682b31f75c7a772785d9990765e96b0f11a9391c4ac944ab312cf3cf34ebf
|
| 3 |
+
size 213452
|
ultrasparsebert99conditionallysparselanguagemodelling/full.md
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UltraSparseBERT: $99\%$ Conditionally Sparse Language Modelling
|
| 2 |
+
|
| 3 |
+
Peter Belcak
|
| 4 |
+
|
| 5 |
+
NVIDIA
|
| 6 |
+
|
| 7 |
+
pbelcak@nvidia.com
|
| 8 |
+
|
| 9 |
+
Roger Wattenhofer
|
| 10 |
+
|
| 11 |
+
ETH Zürich
|
| 12 |
+
|
| 13 |
+
wattenhofer@ethz.ch
|
| 14 |
+
|
| 15 |
+
# Abstract
|
| 16 |
+
|
| 17 |
+
Language models only really need to use a tiny fraction of their neurons for individual inferences.
|
| 18 |
+
|
| 19 |
+
We present UltraSparseBERT, a BERT variant that uses $0.3\%$ of its neurons during inference while performing on par with similar BERT models. UltraSparseBERT selectively engages just 12 out of 4095 neurons for each layer inference. This is achieved by reorganizing feedforward networks into fast feedforward networks (FFFs).
|
| 20 |
+
|
| 21 |
+
To showcase but one benefit of high sparsity, we provide an Intel MKL implementation achieving 78x speedup over the optimized feedforward baseline on CPUs, and an OpenAI Triton implementation performing forward passes 4.1x faster than the corresponding native GPU implementation. The training and benchmarking code is enclosed.
|
| 22 |
+
|
| 23 |
+
# 1 Introduction
|
| 24 |
+
|
| 25 |
+
Feedforward layers hold the majority of the parameters of language models (Brown et al., 2020; Anil et al., 2023). However, not all of their neurons need to be engaged in the computation of the feedforward layer output at inference time for every input.
|
| 26 |
+
|
| 27 |
+
A growing body of work is taking advantage of this fact in a top-down fashion, making use of a method commonly referred to as "mixture of experts" (Shazeer et al., 2017; Lepikhin et al., 2020; Fedus et al., 2022). This method consists of subdividing a large feedforward network into blocks ("experts"), designating some blocks to form a gating network, and jointly training both the experts and the gating network to produce the layer's outputs while using only a fraction of layer parameters, conditionally on the input.
|
| 28 |
+
|
| 29 |
+
The covariant approach, dubbed "fast feedforward networks", is to introduce conditional ex
|
| 30 |
+
|
| 31 |
+
cution in a bottom-up fashion, utilizing individual neurons rather than blocks to perform gating and be executed conditionally (Belcak and Wattenhofer, 2023). We employ this approach and produce UltraSparseBERT, a variant of the BERT architecture (Devlin et al., 2018) that reorganizes feedforward networks into simplified fast feedforward networks (FFFs). In terms of downstream performance, UltraSparseBERT performs on par with other BERT-like models that are similar in size and undergo similar training procedures. The intermediate layers of UltraSparseBERT are, however, effectively much sparser by design: given a feedforward (FF) and a fast feedforward (FFF) network, each with $n$ neurons, the FFF uses the parameters of only $\mathcal{O}(\log_2n)$ neurons instead of $\mathcal{O}(n)$ as for FF. This is a consequence of the fact that FFFs organize their neurons into a balanced binary tree, and execute only one branch of the tree conditionally on the input. In terms of output produced by the intermediate layers, such a method of execution is equivalent to treating the weights of all unused neurons as zeroes and manifests itself as conditional sparsity, since the choice of effectively non-zero neurons is conditional on the layer input.
|
| 32 |
+
|
| 33 |
+
Performing inference on an FFF amounts to performing conditional matrix multiplication (CMM), in which the rows of the input dot with the columns of neural weights one at a time, and the weight column to proceed with is chosen depending on the output of the previous dot-product operation. In this manner, all neurons are used only by some inputs and no input needs more than just a handful of neurons to be handled by the network. This is in contrast with dense matrix multiplication (DMM), which lies at the heart of the traditional feedforward networks, and which computes the dot products of all rows with all columns.
|
| 34 |
+
|
| 35 |
+
Recent advances in deep learning infrastructure have made it possible to produce efficient implementations of conditional matrix multiplication
|
| 36 |
+
|
| 37 |
+
based on both popular computational frameworks as well as custom kernel code. We showcase and provide three implementations of FFF forward pass based on advanced PyTorch compilation, the OpenAI Triton framework, and the Intel MKL routines. In a later section, we give a comparison of each implementation to the corresponding optimized baseline and note that while there is already clear evidence of significant acceleration, there is potential for more.
|
| 38 |
+
|
| 39 |
+
Reproducibility. We share our training, finetuning, and benchmarking code as well as the weights of our best model. For a quick conceptual verification, the fact that only 12 neurons are used in the inference of UltraSparseBERT can be verified simply by zeroing the output of all but the chosen neurons, and we also give the code for this.
|
| 40 |
+
|
| 41 |
+
# Contributions.
|
| 42 |
+
|
| 43 |
+
- We present UltraSparseBERT, a BERT-like model that has 4095 neurons but selectively uses only 12 (0.03%) for inference.
|
| 44 |
+
- We finetune UltraSparseBERT for standard downstream tasks and find that it performs on par with its BERT peers.
|
| 45 |
+
- We provide three implementation that make use of the high level of sparsity in UltraSparse-BERT to perform faster feedforward layer inference.
|
| 46 |
+
- Through UltraSparseBERT and the already considerable speedups by early FFF implementations, we demonstrate the potential of bottom-up conditional neural execution in language modelling.
|
| 47 |
+
|
| 48 |
+
# 2 Model
|
| 49 |
+
|
| 50 |
+
# 2.1 Architecture
|
| 51 |
+
|
| 52 |
+
Our architectural starting point is the crammed-BERT architecture (Geiping and Goldstein, 2023), which we implement to the letter in all but the nature of intermediate layers. There, the feedforward networks contained in the intermediate layers of the crammedBERT transformer encoder are replaced with fast feedforward networks (Belcak and Wattenhofer, 2023).
|
| 53 |
+
|
| 54 |
+
We make the following simplifying changes to the original fast feedforward networks:
|
| 55 |
+
|
| 56 |
+
1. Remove all differences between leaf and non-leaf nodes. In particular, we use the same (GeLU) activation function across all nodes, equip all nodes with output weights, and remove all output biases.
|
| 57 |
+
2. Fix the leaf size to 1.
|
| 58 |
+
3. Allow multiple FFF trees in parallel. We allow for multiple FFF trees to jointly compute the intermediate layer outputs. This is achieved by summing the outputs of the individual trees and presenting the sum as the intermediate layer output.
|
| 59 |
+
|
| 60 |
+
We denote a model with $K$ trees of depth $D + 1$ by appending a suffix to the model name, i.e. UltraSparseBERT- $K \times D$ . Note that for consistency, we consider a tree with no edges to have depth 0. A BERT-base-sized model with the traditional feedforward layer of width 3072 is then just a special case of UltraSparseBERT, namely UltraSparseBERT-3072x0.
|
| 61 |
+
|
| 62 |
+
We train a full range of increasingly deeper and narrower models, starting from UltraSparseBERT-3072x0 and proceeding with UltraSparseBERT-1536x1, UltraSparseBERT-512x2, etc..
|
| 63 |
+
|
| 64 |
+
# 2.2 Training
|
| 65 |
+
|
| 66 |
+
We follow the final training procedure of crammed-BERT (Geiping and Goldstein, 2023), namely disabling dropout in pretraining and making use of the 1-cycle triangular learning rate schedule. By default, we train every model for 1 day on a single A6000 GPU, except for the final UltraSparseBERT-1x11-long model, which we train 2 times longer using the same regime for slightly better downstream performance.
|
| 67 |
+
|
| 68 |
+
# 2.3 Downstream Performance
|
| 69 |
+
|
| 70 |
+
# 2.3.1 Setup
|
| 71 |
+
|
| 72 |
+
We finetune all UltraSparseBERT models for the RTE, MRPC, SST, STS-B, MNLI, QQP, QNLI, and CoLA tasks of the GLUE benchmark (Wang et al., 2018) and report evaluation scores as in Geiping and Goldstein (2023) for consistency. In short, this approach amounts to finetuning for 5 epochs with learning rate $4 \times 10^{-5}$ across all tasks.
|
| 73 |
+
|
| 74 |
+
We find that UltraSparseBERT models finetuned in this manner for CoLA end up being undertrained if only 5 training epochs are used. Therefore, we extend the number of CoLA finetuning epochs to 15. This leads to little to no improvement for the
|
| 75 |
+
|
| 76 |
+
<table><tr><td>Model</td><td>NT</td><td>NI/NT</td><td>RTE</td><td>MRPC</td><td>STSB</td><td>SST-2</td><td>MNLI</td><td>QNLI</td><td>QQP</td><td>Avg</td><td>CoLA</td><td>Avg</td></tr><tr><td colspan="13">Baselines</td></tr><tr><td>crammedBERT-3072</td><td>4095</td><td>100.0%</td><td>58.8</td><td>87.6</td><td>85.2</td><td>91.9</td><td>82.8</td><td>90.4</td><td>89.0</td><td>83.6</td><td>45.0</td><td>79.3</td></tr><tr><td>crammedBERT-4095</td><td>3072</td><td>100.0%</td><td>57.6</td><td>89.1</td><td>85.9</td><td>91.9</td><td>81.3</td><td>90.9</td><td>87.6</td><td>83.2</td><td>47.9</td><td>79.3</td></tr><tr><td colspan="13">UltraSparseBERTs</td></tr><tr><td>UltraSparseBERT-3072x0</td><td>3072</td><td>100.0%</td><td>56.7</td><td>88.9</td><td>86.3</td><td>92.3</td><td>82.9</td><td>92.3</td><td>88.0</td><td>83.8</td><td>48.4</td><td>79.9</td></tr><tr><td>UltraSparseBERT-1536x1</td><td>4608</td><td>66.6%</td><td>55.2</td><td>89.4</td><td>85.0</td><td>91.9</td><td>82.2</td><td>90.1</td><td>89.0</td><td>83.1</td><td>47.5</td><td>79.2</td></tr><tr><td>UltraSparseBERT-512x2</td><td>3584</td><td>42.9%</td><td>59.2</td><td>87.7</td><td>86.0</td><td>89.9</td><td>81.9</td><td>90.3</td><td>89.3</td><td>83.3</td><td>46.2</td><td>79.2</td></tr><tr><td>UltraSparseBERT-256x3</td><td>3840</td><td>26.7%</td><td>54.2</td><td>87.4</td><td>85.9</td><td>91.6</td><td>81.6</td><td>90.0</td><td>89.1</td><td>82.7</td><td>48.0</td><td>78.8</td></tr><tr><td>UltraSparseBERT-128x4</td><td>3968</td><td>16.1%</td><td>58.4</td><td>87.5</td><td>87.2</td><td>92.3</td><td>81.2</td><td>89.9</td><td>90.0</td><td>83.5</td><td>45.9</td><td>79.3</td></tr><tr><td>UltraSparseBERT-64x5</td><td>4032</td><td>9.5%</td><td>55.7</td><td>89.0</td><td>87.2</td><td>91.4</td><td>81.6</td><td>90.2</td><td>89.4</td><td>83.3</td><td>46.1</td><td>79.1</td></tr><tr><td>UltraSparseBERT-32x6</td><td>4064</td><td>5.5%</td><td>57.6</td><td>88.2</td><td>86.1</td><td>91.2</td><td>81.0</td><td>89.2</td><td>88.3</td><td>82.8</td><td>40.6</td><td>78.1</td></tr><tr><td>UltraSparseBERT-16x7</td><td>4080</td><td>3.1%</td><td>55.5</td><td>89.0</td><td>86.7</td><td>88.9</td><td>80.1</td><td>89.4</td><td>86.9</td><td>82.1</td><td>41.5</td><td>77.6</td></tr><tr><td>UltraSparseBERT-8x8</td><td>4088</td><td>1.8%</td><td>56.2</td><td>88.4</td><td>85.4</td><td>88.7</td><td>80.6</td><td>89.3</td><td>86.4</td><td>81.9</td><td>32.7</td><td>76.5</td></tr><tr><td>UltraSparseBERT-4x9</td><td>4092</td><td>1.0%</td><td>53.8</td><td>85.9</td><td>85.7</td><td>89.6</td><td>81.9</td><td>89.3</td><td>88.0</td><td>82.0</td><td>31.8</td><td>76.4</td></tr><tr><td>UltraSparseBERT-2x10</td><td>4094</td><td>0.5%</td><td>59.9</td><td>88.8</td><td>85.3</td><td>87.4</td><td>79.9</td><td>89.2</td><td>86.1</td><td>82.0</td><td>35.4</td><td>76.9</td></tr><tr><td>UltraSparseBERT-1x11</td><td>4095</td><td>0.3%</td><td>57.8</td><td>88.1</td><td>86.1</td><td>89.7</td><td>80.2</td><td>89.3</td><td>87.1</td><td>82.3</td><td>37.1</td><td>77.3</td></tr><tr><td colspan="13">Final Model</td></tr><tr><td>UltraSparseBERT-1x11-long</td><td>4095</td><td>0.3%</td><td>60.7</td><td>87.5</td><td>86.4</td><td>89.9</td><td>81.3</td><td>89.7</td><td>87.6</td><td>83.0</td><td>35.1</td><td>77.7</td></tr><tr><td colspan="13">External Baselines</td></tr><tr><td>OpenAI GPT</td><td>3072</td><td>100%</td><td>56.0</td><td>82.3</td><td>80.0</td><td>91.3</td><td>81.4</td><td>87.4</td><td>70.3</td><td>78.8</td><td>45.4</td><td>75.1</td></tr><tr><td>DistilBERT</td><td>3072</td><td>100%</td><td>59.9</td><td>87.5</td><td>86.9</td><td>91.3</td><td>82.2</td><td>89.2</td><td>71.3</td><td>81.2</td><td>52.1</td><td>77.6</td></tr><tr><td>BERT-base</td><td>3072</td><td>100%</td><td>66.4</td><td>88.9</td><td>85.8</td><td>93.5</td><td>83.4</td><td>90.5</td><td>71.2</td><td>83.0</td><td>51.3</td><td>79.6</td></tr></table>
|
| 77 |
+
|
| 78 |
+
Table 1: The results of various language models on the GLUE-dev test sets. $N_{\mathrm{T}}$ denotes the number of neurons available for training, $N_{\mathrm{I}} / N_{\mathrm{T}}$ the proportion of neurons that are used for a single inference. "Avg" denotes the average score of all the task results to the left of the column. Emphasis marks the best crammed 1-day UltraSparseBERT performance for the given column. OpenAI GPT, DistilBERT, and BERT-base refer to models reported in Radford et al. (2018); Sanh et al. (2019); Devlin et al. (2018). Experimentation conducted according to the instructions in Wang et al. (2018) and the precedent of Geiping and Goldstein (2023).
|
| 79 |
+
|
| 80 |
+
baseline crammedBERT models but has a significant impact on the CoLA performance of Ultra-SparseBERTs.
|
| 81 |
+
|
| 82 |
+
# 2.3.2 Results
|
| 83 |
+
|
| 84 |
+
The results of our finetuning are listed in Table 1.
|
| 85 |
+
|
| 86 |
+
We see that UltraSparseBERT variants trained for 1 day on a single A6000 GPU all retain at least $96.0\%$ of the GLUE downstream predictive performance of the original BERT-base model (Devlin et al., 2018). We also observe that the performance decreases with the increasing depth of the FFFs. Note, however, that the majority of the performance decrease due to the increasing depth is caused by only a single task - CoLA. This behaviour has previously been observed in the literature and is in line with other work trying to compress BERT behaviour into smaller models (Sun et al., 2019; Turc et al., 2019; Mukherjee et al., 2021). If we disregard CoLA, at least $98.6\%$ of the predictive performance is preserved by all UltraSparseBERT model.
|
| 87 |
+
|
| 88 |
+
Furthermore, we see that save from CoLA, our best model - UltraSparseBERT-1x11-long - per
|
| 89 |
+
|
| 90 |
+
forms on par with the original BERT-base model while using only $0.3\%$ of its own neurons, which amounts to a mere $0.4\%$ of BERT-base neurons. We share the weights of this model.
|
| 91 |
+
|
| 92 |
+
# 3 Inference
|
| 93 |
+
|
| 94 |
+
FFFs as a part of large language models have a considerable acceleration potential. At the center of their promise sits the operation of conditional matrix multiplication.
|
| 95 |
+
|
| 96 |
+
# 3.1 Algorithm
|
| 97 |
+
|
| 98 |
+
Belcak and Wattenhofer (2023) gives recursive pseudocode for FFF inference. We list the pseudocode for CMM and the consecutive inference for FFFs, with modifications as per Section 2.1. In Algorithm 1, $B$ denotes the batch size, $H$ the layer input width (transformer hidden dimension), $2^{D} - 1$ is the number of neurons, and $M_{\star ,k}, M_{l,\star}$ denote the $k$ -th column and $l$ -th row of $M$ , respectively. The result of the $\succ$ -comparison in CMM is assumed to be an integer $\in \{0,1\}$ .
|
| 99 |
+
|
| 100 |
+
<table><tr><td colspan="2"></td><td colspan="3">CPU Implementation</td><td colspan="3">GPU Implementation</td></tr><tr><td>Model</td><td>Limit</td><td>Level 1</td><td>Level 2</td><td>Level 3</td><td>Native fused</td><td>BMM</td><td>Triton</td></tr><tr><td>BERT-base-4095</td><td>1.0x</td><td>1.0x</td><td>1.0x</td><td>1.0x</td><td>1.0x</td><td>1.0x</td><td>1.0x</td></tr><tr><td>UltraSparseBERT-1x11</td><td>341.2x</td><td>130.7x</td><td>255.1x</td><td>-</td><td>-</td><td>1.3x</td><td>5.5x</td></tr></table>
|
| 101 |
+
|
| 102 |
+
Table 2: The results of the feedforward inference acceleration evaluation. Emphasis highlights the better "fair comparison" performance.
|
| 103 |
+
|
| 104 |
+
# Algorithm 1: FFF inference forward pass.
|
| 105 |
+
|
| 106 |
+
Input: $B\times H$ input matrix $I$ $(2^{D} - 1)\times H$ weight matrix $W^{\mathrm{in}}$ $(2^{D} - 1)\times H$ weight matrix $W^{\mathrm{out}}$
|
| 107 |
+
|
| 108 |
+
Intermediate: $B\times D$ logit matrix $L$ $B\times D$ node index matrix $N$
|
| 109 |
+
|
| 110 |
+
Output: $B\times H$ matrix $O$
|
| 111 |
+
|
| 112 |
+
Function CMM $(I,W^{in})$ ..
|
| 113 |
+
for $d\in \{1,\ldots ,D - 1\}$ do $L_{\star ,d}\gets I\left(W_{[N_{\star ,d - 1}],\star}^{\mathrm{in}}\right)^{\mathrm{T}}$ $N_{\star ,d}\gets 2N_{\star ,d - 1} + 1 + (L_{\star ,d} > 0)$
|
| 114 |
+
end
|
| 115 |
+
return $L,N$
|
| 116 |
+
|
| 117 |
+
Function FFF $\mathit{I}(I,W^{\mathrm{in}},W^{\mathrm{out}})$ .. $L,N\gets \mathsf{CMM}(I,W^{in})$ $L\gets$ Activation(L) for $d\in \{0,\dots ,D - 1\}$ do $O_{\star ,d}\gets L_{\star ,d}\cdot W_{N_{\star ,d},\star}^{\mathrm{out}}$ end return O
|
| 118 |
+
|
| 119 |
+
# 3.2 Inference Performance
|
| 120 |
+
|
| 121 |
+
Implementations. For CPU inference, we use the Math Kernel Library available as a part of the Intel oneAPI. Level 1-3 implementations are implementations that use Level 1-3 BLAS routines, respectively.
|
| 122 |
+
|
| 123 |
+
The native fused implementation uses the native fused feedforward layer kernel. Note that this is the fastest GPU implementation for FF layers but no such kernel currently exists for FFFs due to the nature of CMM. The BMM implementation uses the batched matrix multiplication and activation kernels for both FFs and FFFs. The support for this implementation without copying is currently only available on PyTorch nightly builds. Triton implementation is our custom OpenAI Triton ker
|
| 124 |
+
|
| 125 |
+
nel code for both FFs and FFFs, performing fused DMM/CMM and activation on the level of vector/matrix elements.
|
| 126 |
+
|
| 127 |
+
Methodology. For CPU inference, we perform 250 forward passes per entry on Intel(R) Core(TM) i7-6700HQ CPUs under Intel MKL v2023.2.0, using 64-bit variants of all routines. We report the mean time taken by single inference, noting that the value of the standard deviation always lay well under $2\%$ of the mean. For GPU inference, we perform 1000 forward passes per entry on NVIDIA RTX A6000 GPUs under CUDA v12.1 and PyTorch 2.1.1-nightly. We measure the GPU time and report the mean time taken, with the standard deviation again well under $2\%$ of the mean in all cases. We take batch size $B = 128 \times 128$ (equivalent to the BERT pretraining context token batch size) and hidden dimension $H = 768$ .
|
| 128 |
+
|
| 129 |
+
Results. Table 2 lists the performance comparison of feedforward and fast feedforward layers as they appear in BERT-base and UltraFastBERT-1x11. Each column of the table lists the relative inference FFF-over-FF implementation speedups when using the same linear-algebraic routine primitives. The two entries missing Table 2 are for the unavailable BLAS Level 3 and Native fused implementations of FFFs.
|
| 130 |
+
|
| 131 |
+
The speedups reported in Table 2 give "fair comparisons", meaning that in each case, both the FF and FFF implementation used exactly the same primitive linear-algebraic operations. One may also be interested in knowing how the best implementations of FFF currently fare against the best implementations of FF, even though the ones for FF use primitives unavailable for FFF. On CPU, the Level 2 implementation of FFF performs inference $78\mathbf{x}$ faster than the fastest implementation of FF. On GPU, the Triton implementation of FFF delivers a $4.1\mathbf{x}$ speedup over the fastest (native fused) implementation of FF. In sum, there are attractive benefits to high-levels of conditional sparsity.
|
| 132 |
+
|
| 133 |
+
# 4 Limitations
|
| 134 |
+
|
| 135 |
+
A limitation of our training work is that for most FFF configurations, we only perform one training run. It is possible that the downstream performance of the individual configurations would vary across multiple training runs. This is partially mitigated by the use of multiple fine-tuning runs to find the downstream task score as per the precedent for BERT models on the GLUE benchmark.
|
| 136 |
+
|
| 137 |
+
A major weakness of inference speed measurements is that they depend heavily on the hardware used as well as the low-level optimization provided as the interface to the hardware. To illustrate how fast the landscape is changing: in October 2023, neither the non-copying BMM nor the Triton implementation leveraging local conditionality would have been possible. Our sparsity argument, however, remains intact, and is easily verifiable through the (default provided) implementation that zeroes out the contributions of all unused neurons.
|
| 138 |
+
|
| 139 |
+
Our work focuses on efficiency of existing models and inherits the risks of the models used, if any.
|
| 140 |
+
|
| 141 |
+
# References
|
| 142 |
+
|
| 143 |
+
Rohan Anil, Andrew M Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, et al. 2023. Palm 2 technical report. arXiv preprint arXiv:2305.10403.
|
| 144 |
+
Peter Belcak and Roger Wattenhofer. 2023. Fast feedforward networks. arXiv preprint arXiv:2308.14711.
|
| 145 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901.
|
| 146 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.
|
| 147 |
+
William Fedus, Barret Zoph, and Noam Shazeer. 2022. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. The Journal of Machine Learning Research, 23(1):5232-5270.
|
| 148 |
+
Jonas Geiping and Tom Goldstein. 2023. Cramming: Training a language model on a singlegpu in one day. In International Conference on Machine Learning, pages 11117-11143. PMLR.
|
| 149 |
+
|
| 150 |
+
Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan First, Yanping Huang, Maxim Krikun, Noam Shazeer, and Zhifeng Chen. 2020. Gshard: Scaling giant models with conditional computation and automatic sharding. arXiv preprint arXiv:2006.16668.
|
| 151 |
+
Subhabrata Mukherjee, Ahmed Hassan Awadallah, and Jianfeng Gao. 2021. Xtremedistiltransformers: Task transfer for task-agnostic distillation. arXiv preprint arXiv:2106.04563.
|
| 152 |
+
Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. 2018. Improving language understanding by generative pre-training.
|
| 153 |
+
Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108.
|
| 154 |
+
Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. 2017. Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. arXiv preprint arXiv:1701.06538.
|
| 155 |
+
Siqi Sun, Yu Cheng, Zhe Gan, and Jingjing Liu. 2019. Patient knowledge distillation for bert model compression. arXiv preprint arXiv:1908.09355.
|
| 156 |
+
Iulia Turc, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Well-read students learn better: On the importance of pre-training compact models. arXiv preprint arXiv:1908.08962.
|
| 157 |
+
Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2018. Glue: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461.
|
ultrasparsebert99conditionallysparselanguagemodelling/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5bc37d4853cc6930e4b9716cf4ef30a8ae1e3e0ecbe52abbc6ff0a7e65ef0074
|
| 3 |
+
size 195437
|
ultrasparsebert99conditionallysparselanguagemodelling/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:09930ee1b4873150c09ce41d869d4499b22f5e814f17f4fcf8713303a3786cab
|
| 3 |
+
size 177496
|
understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/ff6f464f-0c0b-42c1-a2de-94b3fa078b6f_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:95cddebec006545e9a8351342ebf38fbb721486a14dc00e90d03f1e22f0afa2c
|
| 3 |
+
size 57171
|
understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/ff6f464f-0c0b-42c1-a2de-94b3fa078b6f_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4c71d346f3631a6a505f7991ce72dcaf15052f44159d03716af09d84dc90fbf0
|
| 3 |
+
size 65612
|
understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/ff6f464f-0c0b-42c1-a2de-94b3fa078b6f_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e47ea2f52c4a263a1638a43265e8a12adbe94fd9b75050afc4af71e41a7563fa
|
| 3 |
+
size 1125665
|
understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/full.md
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Understanding the Effects of Noise in Text-to-SQL: An Examination of the BIRD-Bench Benchmark
|
| 2 |
+
|
| 3 |
+
Niklas Wretblad $^{1,*}$ Fredrik Gordh Riseby $^{1,*}$ Rahul Biswas $^{2}$ Amin Ahmadi $^{2}$ Oskar Holmström $^{1}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Linköping University, $^{2}$ Silo AI niklas.wretblad@liu.se
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Text-to-SQL, which involves translating natural language into Structured Query Language (SQL), is crucial for enabling broad access to structured databases without expert knowledge. However, designing models for such tasks is challenging due to numerous factors, including the presence of 'noise,' such as ambiguous questions and syntactical errors. This study provides an in-depth analysis of the distribution and types of noise in the widely used BIRD-Bench benchmark and the impact of noise on models. While BIRD-Bench was created to model dirty and noisy database values, it was not created to contain noise and errors in the questions and gold SQL queries. We found that noise in questions and gold queries are prevalent in the dataset, with varying amounts across domains, and with an uneven distribution between noise types. The presence of incorrect gold SQL queries, which then generate incorrect gold answers, has a significant impact on the benchmark's reliability. Surprisingly, when evaluating models on corrected SQL queries, zero-shot baselines surpassed the performance of state-of-the-art prompting methods. We conclude that informative noise labels and reliable benchmarks are crucial to developing new Text-to-SQL methods that can handle varying types of noise. All datasets, annotations, and code are available at this URL.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Text-to-SQL with large language models facilitates broader access to structured databases without requiring expert knowledge. To develop such models, high-quality open datasets and benchmarks are essential resources, and over the years, several benchmarks and datasets have been created. Early benchmarks, such as WikiSQL (Zhong et al., 2017), modeled simple scenarios, often with single-table queries, and following datasets attempts to closer
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Figure 1: Example of an incorrect SQL query that generates the wrong gold reference answer for the given question. The JOIN operation incorrectly matches clients and accounts by district_id. Due to the possibility of multiple clients and accounts in the same district, accounts are incorrectly associated with the wrong users.
|
| 17 |
+
|
| 18 |
+
approximate real-world scenarios: complex queries with join-statements over several tables (Yu et al., 2018), unseen domain-specific datasets (Gan et al., 2021b; Lee et al., 2021), and noisy questions (Gan et al., 2021a). BIRD-Bench, a recent and challenging benchmark, aims to further close the gap between Text-to-SQL research and real-world applications by for example containing large and dirty database values and requiring external knowledge (Li et al., 2023).
|
| 19 |
+
|
| 20 |
+
While BIRD-Bench does not explicitly introduce noise to the questions in the data, it could be that it is added inadvertently due to human error during dataset creation. For the same reason, noise is an essential aspect of real-world use cases, as human inputs often are ambiguous and contain syntactical errors. However, for the benchmark to be a helpful tool for judging model properties, such as noise handling, the data must be valid and inform us in what areas a model can be improved.
|
| 21 |
+
|
| 22 |
+
This paper continues the tradition of examining the suitability and limitations of open datasets and benchmarks. We specifically focus on how noise is represented in questions and queries in BIRD-
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
Figure 2: Distribution of question difficulties and execution accuracy of the DIN-SQL model on the different domains of the BIRD-Bench development set.
|
| 26 |
+
|
| 27 |
+
Bench. We perform a qualitative analysis of what types of noise exist in the data and the noise distribution in specific domains. We then study the effects of noise on different models and prompting techniques, using both strong baselines and state-of-the-art methods.
|
| 28 |
+
|
| 29 |
+
We find that noise in questions and gold SQL queries is prevalent, that noise is unevenly distributed across domains, and that categories of noise types are represented unequally in the data. Errors in gold SQL queries are also common and decrease the reliability of BIRD-Bench. When evaluating models on a dataset with corrected gold queries, the performance gap between zero-shot baselines and state-of-the-art prompting techniques is closed, questioning how we should interpret model performance on BIRD-Bench.
|
| 30 |
+
|
| 31 |
+
# 2 Related Work
|
| 32 |
+
|
| 33 |
+
Datasets WikiSQL is a large Text-to-SQL dataset containing only simple SELECT and WHERE operations without nested queries or JOIN operations (Zhong et al., 2017). SPIDER (Yu et al., 2018) was later developed to approximate real-life scenarios more closely, requiring models to construct complex queries and understand the database schema. While complexity is a critical aspect of real use cases, variations of SPIDER have been created to contain noisy questions (Gan et al., 2021a) and domain-specific questions (Gan et al., 2021b).
|
| 34 |
+
|
| 35 |
+
BIRD-Bench was created to close the gap between academic research and real-world applications by introducing large and dirty database values, questions requiring external knowledge and optimizing SQL execution efficiency (Li et al., 2023).
|
| 36 |
+
|
| 37 |
+
Text-to-SQL Methods The notable gap in accuracy between automated systems (65.45%) and human experts (92.96%)<sup>1</sup>, highlights the need for ongoing developments in Text-to-SQL models.
|
| 38 |
+
|
| 39 |
+
Different approaches have been taken to create models capable of Text-to-SQL generation. A more traditional approach is to finetune LLMs on Text-to-SQL examples. While these models offer promising results, there is a performance gap to instruction-tuned LLMs, in particular GPT-4, that is adapted to the Text-to-SQL task through prompt engineering (Li et al., 2023). Prompts are often chained, where each prompt is applied to the task sub-problems, such as schema linking, decomposition of queries, and refinement of model generations (Pourreza and Rafiei, 2023a; Wang et al., 2023).
|
| 40 |
+
|
| 41 |
+
Noise in Datasets The contemporaneous works of Wang et al. (2023) and Sun et al. (2024) shows that ambiguous questions and incorrect SQL queries exist in BIRD-Bench. However, unlike our work, they do not study how noise varies across domains or how the identified noise and errors affect
|
| 42 |
+
|
| 43 |
+
<table><tr><td>Statistic</td><td>Financial</td><td>California Schools</td><td>Superhero</td><td>Toxicology</td><td>Thrombosis Prediction</td></tr><tr><td>Question & SQL query pairs with noise</td><td>52/106 (49%)</td><td>9/20 (45%)</td><td>3/20 (15%)</td><td>7/20 (35%)</td><td>8/20 (40%)</td></tr><tr><td>Noisy questions</td><td>44/106 (41.5%)</td><td>5/20 (25%)</td><td>2/20 (10%)</td><td>6/20 (30%)</td><td>3/20 (15%)</td></tr><tr><td>Erroneous gold queries</td><td>22/106 (20.7%)</td><td>8/20 (40%)</td><td>1/20 (5%)</td><td>2/20 (10%)</td><td>6/20 (30%)</td></tr></table>
|
| 44 |
+
|
| 45 |
+
Table 1: Statistics of the total amount of pairs of questions and SQL queries that contain errors and the amount of errors for questions and gold SQL queries separately across five domains.
|
| 46 |
+
|
| 47 |
+
<table><tr><td>Noise Type</td><td>Financial</td><td>California Schools</td><td>Superhero</td><td>Toxicology</td><td>Thrombosis Prediction</td></tr><tr><td>Spelling/Syntactical Errors</td><td>23</td><td>2</td><td>1</td><td>4</td><td>2</td></tr><tr><td>Vague/Ambiguous Questions</td><td>17</td><td>1</td><td>1</td><td>1</td><td>1</td></tr><tr><td>Incorrect SQL query</td><td>22</td><td>8</td><td>1</td><td>2</td><td>6</td></tr><tr><td>Synonyms</td><td>2</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>String Capitalization</td><td>7</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>Question does not map to DB</td><td>1</td><td>4</td><td>1</td><td>0</td><td>0</td></tr><tr><td>Total number of errors</td><td>72</td><td>15</td><td>4</td><td>7</td><td>9</td></tr></table>
|
| 48 |
+
|
| 49 |
+
Table 2: Distribution of different types of noise encountered in the domains.
|
| 50 |
+
|
| 51 |
+
model performance. Pourreza and Rafiei (2023b) perform a more fine-grained analysis of incorrect SQL queries but also mention categories of noise that we cover in our work (e.g., natural language question does not match database schema). In contrast to their work, we perform a more fine-grained analysis of noise in the natural language questions, for example the effects of syntactical errors, synonyms, and ambiguous questions.
|
| 52 |
+
|
| 53 |
+
Katsogiannis-Meimarakis and Koutrika (2023) points out that database schemas often misalign with data entities, which may cause lexical or syntactic ambiguities affecting Text-to-SQL models.
|
| 54 |
+
|
| 55 |
+
# 3 Method
|
| 56 |
+
|
| 57 |
+
# 3.1 Data
|
| 58 |
+
|
| 59 |
+
The BIRD-Bench dataset (Li et al., 2023) is studied in this paper as it is a recent and widely used dataset that is the most similar to real world scenarios among current benchmarks. BIRD contains 12,751 samples across many domains. Because of the time-consuming human annotation performed in this work, the main focus of the analysis is on the financial domain<sup>2</sup>, which includes queries related to banking operations.
|
| 60 |
+
|
| 61 |
+
The development set of the financial domain contains 106 question and SQL query pairs, which represent approximately $7.5\%$ of the data points in the development set, and are structured around eight distinct tables presented in full in Appendix
|
| 62 |
+
|
| 63 |
+
A.1. Each question is annotated with a difficulty level (simple, moderate, and challenging). The specific distribution is found in Figure 2.
|
| 64 |
+
|
| 65 |
+
We selected four additional domains to validate our noise analysis of the financial domain and performed the same analysis on 20 randomly sampled questions from each domain. The domain selection was based on question difficulties and model accuracy of DIN-SQL<sup>3</sup>, as presented in Figure 2. We selected California Schools with low accuracy and simple questions, Superhero with high accuracy and simple questions, Toxicology with similar accuracy to the financial domain but more complex questions, and Thrombosis Prediction with low accuracy and moderately difficult questions.
|
| 66 |
+
|
| 67 |
+
# 3.2 Annotation of Noise
|
| 68 |
+
|
| 69 |
+
All questions and SQL queries in the selected domains were annotated to determine whether they contained errors. The annotations were performed independently by two authors of this paper, fluent in English and experts in SQL. In the first phase, annotators independently identified questions and SQL queries with errors. The Cohen's Kappa coefficient was 0.73, demonstrating a substantial level of agreement between annotators. The annotators then independently named the types of errors. In the second phase, the annotators resolved disagreements by observing the other annotator's reasoning and the remaining disagreements were
|
| 70 |
+
|
| 71 |
+

|
| 72 |
+
Figure 3: Accuracy of various models on Bird-Bench's financial domain. Models are evaluated on the original data (left), corrected SQL queries (middle), and corrected SQL queries and corrected noisy questions.
|
| 73 |
+
|
| 74 |
+

|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
|
| 78 |
+
resolved through discussion. The identified errors were grouped based on similarity and named after the errors' common properties, as shown in Table 2. The annotations were then used to generate two distinct datasets: one where SQL was corrected, and one where both SQL queries and noisy questions were corrected.
|
| 79 |
+
|
| 80 |
+
# 3.3 Models and Prompt Techniques
|
| 81 |
+
|
| 82 |
+
Two models, GPT-3.5 and GPT-4, were used with three different prompting methods: zero-shot prompting as a baseline and the more advanced DIN-SQL (Pourreza and Rafiei, 2023a) and MAC-SQL (Wang et al., 2023). We used GPT-3.5 and GPT-4 for zero-shot prompting, but for the advanced prompting techniques, we only used GPT-3.5 since chaining prompts with GPT-4 was beyond the resources for this project. We chose the models and prompting methods because they were the highest-performing publicly available models on BIRD-Bench at the time of writing.
|
| 83 |
+
|
| 84 |
+
Information about the database schema is crucial to generating correct queries for BIRD-Bench questions. DIN-SQL and MAC-SQL has a predefined format for adding the database schema. For the zero-shot model, we provide the database schema in-context in the form of SQL table creation statements, as this has been shown to improve accuracy compared to other formats (Nan et al., 2023). The prompt template for the zero-shot model is found in Appendix A.2. The code base is published after the anonymity period.
|
| 85 |
+
|
| 86 |
+
# 4 Qualitative Analysis of Noise
|
| 87 |
+
|
| 88 |
+
Even though BIRD-Bench was not intentionally created to contain noise in questions and SQL queries, our analysis reveals that noise exists in
|
| 89 |
+
|
| 90 |
+
all studied domains to different extents. The financial domain exhibits the highest levels of noise at $49\%$ closely followed by the California Schools domain at $45\%$ , as shown in Table 1. In contrast, the Superhero domain demonstrated the lowest noise levels, with only $15\%$ of data points containing errors. As presented in Section 3.1 and Figure 2, the Superhero domain had the highest accuracy while having a similar distribution of question difficulties. This could indicate that model accuracy across tasks correlates with noise, which implies that noise in questions and SQL queries need to be carefully considered during dataset design.
|
| 91 |
+
|
| 92 |
+
The categories and absolute frequency of noise per dataset are presented in Table 2, and both examples and descriptions of the noise types are presented in Appendix A.3. Our analysis shows that spelling/syntactic errors and incorrect SQL queries were most prevalent in the financial domain. The presence of noise in questions is not necessarily undesirable, as it more closely mimics real-life scenarios. However, noise distribution across the categories is unequal. While this could approximate a real-world distribution, it might unfairly bias the benchmark towards models better at handling syntactical errors. Given the uneven distribution of errors and the lack of noise labels, the benchmark does not inform which noise types are challenging for current models and in which areas they should improve.
|
| 93 |
+
|
| 94 |
+
A more severe issue is that all domains contained incorrect SQL queries, which are used for generating gold reference answers. An example of an erroneous SQL query is shown in Figure 1. These types of errors question the reliability of the benchmark to accurately determine model performance, which is explored in the next section.
|
| 95 |
+
|
| 96 |
+
<table><tr><td>Error Category</td><td>Total</td><td>DIN-SQL (3.5)</td><td>Zero-shot (3.5)</td><td>Zero-shot (4)</td><td>MAC-SQL (3.5)</td></tr><tr><td>Spelling/Syntactical Errors</td><td>23</td><td>2</td><td>6</td><td>4</td><td>6</td></tr><tr><td>Vague/Ambiguous Questions</td><td>17</td><td>1</td><td>2</td><td>3</td><td>4</td></tr><tr><td>Incorrect SQL</td><td>22</td><td>0</td><td>2</td><td>2</td><td>4</td></tr><tr><td>Synonyms</td><td>2</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>String Capitalization</td><td>7</td><td>2</td><td>1</td><td>1</td><td>0</td></tr><tr><td>Question does not map to DB</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td></tr></table>
|
| 97 |
+
|
| 98 |
+
Table 3: Model performance on the financial domain for various error categories and overall correct predictions on non-erroneous questions.
|
| 99 |
+
|
| 100 |
+
# 5 Impact of Noise on Model Performance
|
| 101 |
+
|
| 102 |
+
We apply models to the original dataset, a dataset where SQL has been corrected, and a dataset where both SQL queries and noisy questions have been corrected. Figure 3 presents the results of a single evaluation for all models on all datasets.
|
| 103 |
+
|
| 104 |
+
MAC-SQL slightly outperforms DIN-SQL and the zero-shot baselines on the original dataset, where noise exists in both questions and queries. However, correcting SQL queries decreases MAC-SQL's performance, tying it with DIN-SQL as the poorest performers. Surprisingly, even the zero-shot GPT-3.5 baseline outperforms the more advanced DIN-SQL and MAC-SQL. The dataset with corrected SQL queries could also be considered optimal since gold labels are correct and noise in questions is represented. Given the drastic re-ranking of models, it is relevant to question if BIRD-Bench is a reliable assessor of models and a useful tool to assist researchers in developing new methods for Text-to-SQL.
|
| 105 |
+
|
| 106 |
+
When evaluating models on the dataset with both questions and SQL queries corrected, the accuracy of all models increases significantly. While zero-shot GPT-4 performs the best, the remaining models perform similarly with DIN-SQL slightly ahead. Compared to the ideal scenario where only SQL queries are corrected, the presence of noise noticeably impacts all models' accuracy. However, models are not equally affected by noise as some models have a more pronounced increase in accuracy. Table 3 presents each model's performance for the error categories. MAC-SQL outperforms the other models slightly on errors related to Spelling and Syntactical Errors, Ambiguous Questions, and Incorrect SQL. The main difference between MAC-SQL and the other methods is an extensive filtering process of tables and columns and the increase of relevant information in the context could make the model more robust to noise. However, such a
|
| 107 |
+
|
| 108 |
+
hypothesis must be confirmed or rejected by studying what the model has seen during the generation phase, which we leave to future studies.
|
| 109 |
+
|
| 110 |
+
# 6 Conclusions and Future Work
|
| 111 |
+
|
| 112 |
+
This paper analyzed the quality and distribution of noise in the BIRD-Bench benchmark for Text-to-SQL. We show that noise in both questions and SQL queries are prevalent, and noise is unevenly distributed across noise types and domains. Errors in gold SQL queries were common, decreasing the reliability of BIRD-Bench. Surprisingly, when evaluating models on corrected gold queries, zero-shot baselines surpassed more advanced prompting techniques. These findings highlight the necessity for developing benchmarks that can guide researchers in designing models that are more resistant to noise. Therefore, a significant improvement would be to label noise types across the dataset. In future work, we plan to study how large language models can be applied to noise classification, a new task that could also be critical in systems where Text-to-SQL is employed.
|
| 113 |
+
|
| 114 |
+
Overall, this study provides a deeper understanding of how noise is expressed in Text-to-SQL tasks and how noise and models interact, pinpointing areas for improvement in the BIRD-Bench dataset.
|
| 115 |
+
|
| 116 |
+
# Limitations
|
| 117 |
+
|
| 118 |
+
While our study provides valuable insights regarding the influence of dataset noise in Text-to-SQL translation tasks, it has several limitations. As the analysis was performed mainly on the BIRD-Bench dataset's financial domain, our findings' generalizability may be limited. We only examined a small subset of other domains to validate our findings, which may represent only some of the noise distribution across domains.
|
| 119 |
+
|
| 120 |
+
Additionally, annotators may have introduced subjective bias during noise annotation, even
|
| 121 |
+
|
| 122 |
+
though we attempt to minimize this by having two independent annotators. Further, our decision to categorize noise into six specific classes might have oversimplified the complexity and diversity of noise types in these benchmarks.
|
| 123 |
+
|
| 124 |
+
Our choice of models and prompting techniques could also be a potential limitation. We only employed two models, GPT-3.5 and GPT-4, and three different prompting methods. Evaluating a more comprehensive array of models and prompting techniques might have given a more comprehensive understanding of their performance under the influence of noise.
|
| 125 |
+
|
| 126 |
+
Lastly, the substantial effort required to correct SQL queries and noisy questions in the dataset may have introduced errors despite the review process. This might influence the model performances we report when evaluating models on the corrected datasets.
|
| 127 |
+
|
| 128 |
+
# Acknowledgments
|
| 129 |
+
|
| 130 |
+
We extend our gratitude to Mohammadreza Pourreza for the results from the DIN-SQL model. We are also grateful to SEBx for their generous support and the provision of resources. Additionally, this research was partial funded by the National Graduate School of Computer Science in Sweden (CUGS).
|
| 131 |
+
|
| 132 |
+
# References
|
| 133 |
+
|
| 134 |
+
Yujian Gan, Xinyun Chen, Qiuping Huang, Matthew Purver, John R. Woodward, Jinxia Xie, and Pengsheng Huang. 2021a. Towards robustness of text-to-SQL models against synonym substitution. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2505-2515, Online. Association for Computational Linguistics.
|
| 135 |
+
Yujiang Gan, Xinyun Chen, and Matthew Purver. 2021b. Exploring underexplored limitations of cross-domain text-to-SQL generalization. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 8926-8931, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 136 |
+
George Katsogiannis-Meimarakis and Georgia Koutrika. 2023. A survey on deep learning approaches for text-to-sql. The VLDB Journal, 32(4):905-936.
|
| 137 |
+
Chia-Hsuan Lee, Oleksandr Polozov, and Matthew Richardson. 2021. KaggleDBQA: Realistic evaluation of text-to-SQL parsers. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2261-2273, Online. Association for Computational Linguistics.
|
| 138 |
+
Jinyang Li, Binyuan Hui, Ge Qu, Binhua Li, Jiaxi Yang, Bowen Li, Bailin Wang, Bowen Qin, Rongyu Cao, Ruiying Geng, Nan Huo, Xuanhe Zhou, Chenhao Ma, Guoliang Li, Kevin C. C. Chang, Fei Huang, Reynold Cheng, and Yongbin Li. 2023. Can llm already serve as a database interface? a big bench for large-scale database grounded text-to-sqls. In Advances in Neural Information Processing Systems. Spotlight Poster.
|
| 139 |
+
Linyong Nan, Yilun Zhao, Weijin Zou, Narutatsu Ri, Jaesung Tae, Ellen Zhang, Arman Cohan, and Dragomir Radev. 2023. Enhancing text-to-SQL capabilities of large language models: A study on prompt design strategies. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 14935-14956, Singapore. Association for Computational Linguistics.
|
| 140 |
+
Mohammadreza Pourreza and Davood Rafiei. 2023a. Din-sql: Decomposed in-context learning of text-to-sql with self-correction. In Advances in Neural Information Processing Systems 36. Accepted for poster presentation, full citation details to be updated.
|
| 141 |
+
Mohammadreza Pourreza and Davood Rafiei. 2023b. Evaluating cross-domain text-to-SQL models and benchmarks. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 1601-1611, Singapore. Association for Computational Linguistics.
|
| 142 |
+
|
| 143 |
+
Ruoxi Sun, Sercan Ö. Arik, Alex Muzio, Lesly Miculicich, Satya Gundabathula, Pengcheng Yin, Hanjun Dai, Hootan Nakhost, Rajarishi Sinha, Zifeng Wang, and Tomas Pfister. 2024. SQL-palm: Improved large language model adaptation for text-to-sql (extended).
|
| 144 |
+
Bing Wang, Changyu Ren, Jian Yang, Xinnian Liang, Jiaqi Bai, Qian-Wen Zhang, Zhao Yan, and Zhoujun Li. 2023. Mac-sql: A multi-agent collaborative framework for text-to-sql.
|
| 145 |
+
Tao Yu, Rui Zhang, Kai Yang, Michihiro Yasunaga, Dongxu Wang, Zifan Li, James Ma, Irene Li, Qingning Yao, Shanelle Roman, Zilin Zhang, and Dragomir Radev. 2018. Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-SQL task. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3911-3921, Brussels, Belgium. Association for Computational Linguistics.
|
| 146 |
+
Victor Zhong, Caiming Xiong, and Richard Socher. 2017. Seq2SQL: Generating Structured Queries from Natural Language using Reinforcement Learning. arXiv e-prints, page arXiv:1709.00103.
|
| 147 |
+
|
| 148 |
+
# A Appendix
|
| 149 |
+
|
| 150 |
+
# A.1 Database Schema of the Financial Domain
|
| 151 |
+
|
| 152 |
+

|
| 153 |
+
Figure 4: Database schema of the database in the financial domain of BIRD-Bench.
|
| 154 |
+
|
| 155 |
+
Figure 4 displays the database schema for the financial domain. This schema contains various tables, such as those for loans, transactions, accounts, cards and clients, all reflecting the financial orientation of the database. Descriptions of what information these tables contain are presented in Table 4. The database consists of 55 columns distributed across eight distinct tables. While the majority of the column names are intuitively understandable, some present interpretative challenges, as evident in the schema. An illustrative example is the district table, which incorporates 16 unique columns. This includes a column titled district_ID along with 15 other columns, ranging from A2 to A16. The latter columns' names do not readily convey the nature of the data they hold, making them less intuitive to understand. In practice, a database schema will often be accompanied by a data dictionary or documentation that explains each table and column in detail. Such documentation would typically provide the context needed to fully understand the meaning of each element in the schema, the range of possible values for fields with unspecified types, and the business logic underlying the relationships. Without this additional documentation, fully
|
| 156 |
+
|
| 157 |
+
interpreting and effectively using the database can be challenging as illustrated by the column names in the districts table. The BIRD-Bench dataset includes a unique feature for each question termed hint. This feature is designed to offer insights or supplementary information corresponding to the specifics detailed in such database documentation. This feature is provided to all models described in 3.3 for each question during the experiments.
|
| 158 |
+
|
| 159 |
+
Table 4: Table descriptions of the tables in the database of the financial domain of BIRD-Bench.
|
| 160 |
+
|
| 161 |
+
<table><tr><td>Table Name</td><td>Description</td></tr><tr><td>loan</td><td>Contains details of loans.</td></tr><tr><td>order</td><td>Holds information about monetary orders.</td></tr><tr><td>trans</td><td>Represents financial transactions.</td></tr><tr><td>account</td><td>Contains account information.</td></tr><tr><td>disp</td><td>Links clients to accounts (dispositions).</td></tr><tr><td>card</td><td>Contains details about cards issued.</td></tr><tr><td>client</td><td>Holds client information.</td></tr><tr><td>district</td><td>Contains details about districts or regions.</td></tr></table>
|
| 162 |
+
|
| 163 |
+
Further, the lines in Figure 4 between the tables represent relationships, where the nature of the relationship is indicated by the shape of the tail end of the lines where they connect to each table. A one-to-many relationship is indicated by the line beginning with a single line and the one digit above it, and then ending in a crow's foot (three lines) at the opposite end. For example, an account can have multiple orders, transactions, dispositions, and loans associated with it, but each of those entities is only linked to one account. An account can have many loans, but one loan is exclusively only linked to one account, which makes sense. Further, clients and accounts are related through the disposition table in a many-to-many relationship. An account can have many different clients associated with it, for example, one client listed as the owner of the account and multiple other clients listed as users for the account. This could for example be practical for sharing an account in a family, where one parent could be the owner of the account and then multiple other family members listed as users. A single client can also be related to many different accounts in the other way around.
|
| 164 |
+
|
| 165 |
+
# A.2 Prompt Templates
|
| 166 |
+
|
| 167 |
+
Listing 1: Zero-Shot Prompting Template.
|
| 168 |
+
```markdown
|
| 169 |
+
```
|
| 170 |
+
Database schema in the form of CREATE_TABLE statements:
|
| 171 |
+
{
|
| 172 |
+
database_schema}
|
| 173 |
+
Using valid SQL, answer the following question based on the tables provided above.
|
| 174 |
+
Hint helps you to write the correct sql query.
|
| 175 |
+
Question: {question}
|
| 176 |
+
Hint: {evidence}
|
| 177 |
+
DO NOT return anything else except the SQL query.``
|
| 178 |
+
```
|
| 179 |
+
|
| 180 |
+
The prompt template underlying the zero-shot models described in Section 3.3 can be found in Listing 1. The prompt integrates a given question, the associated database schema, an instruction directing the LLM to generate valid SQL, and a hint provided by the BIRD-Bench dataset. The hint is designed to offer
|
| 181 |
+
|
| 182 |
+
insights or supplementary information needed in order to accurately interpret the database schema and to correctly convert the question into a SQL query. Note that the other models implemented in this research is also provided with this feature.
|
| 183 |
+
|
| 184 |
+
# A.3 Examples of Errors and Corrections
|
| 185 |
+
|
| 186 |
+
This section provides examples of erroneous data points and their corrections from the different error categories found in Table 1.
|
| 187 |
+
|
| 188 |
+
# Example 1: Spelling/Syntactical Error
|
| 189 |
+
|
| 190 |
+
In Figure 5, an example question with a syntactical error is provided, representing the question with ID 125 from the financial domain in the BIRD-Bench development set. The grammatical structure of the question complicates the interpretation of its meaning for a human reader and makes it difficult to understand which information it is asking for. Therefore, there is a chance that an LLM might also misinterpret the question. A corrected version of the question can be seen in the figure.
|
| 191 |
+
|
| 192 |
+

|
| 193 |
+
Figure 5: Question with ID 125 from the development set of BIRD-Bench which contains syntactical errors and a corrected version of the question.
|
| 194 |
+
|
| 195 |
+
# Example 2: Ambiguous/Vague Question
|
| 196 |
+
|
| 197 |
+
Figure 6 displays the data point with ID 159 from the financial domain of the development set of BIRD-Bench. It contains an error which were grouped into the ambiguous/vague question category. The challenge lies in the natural language question's ambiguity, specifically in the phrase "List all the withdrawals..." This ambiguity revolves around determining which columns to return when executing the SQL query.
|
| 198 |
+
|
| 199 |
+

|
| 200 |
+
Figure 6: Question, gold SQL query and a corrected version of the question corresponding to the data point with ID 159 from the development set of BIRD-Bench, showcasing an error in the ambiguous/vague category.
|
| 201 |
+
|
| 202 |
+
# Example 3: Incorrect Gold SQL
|
| 203 |
+
|
| 204 |
+
Figure 7 showcases an incorrect golden SQL query found in the data point with ID 132 of the financial domain of the development set of BIRD-Bench. The JOIN operation incorrectly matches clients and accounts by district_id. Due to the possibility of multiple clients and accounts in the same district, accounts are incorrectly associated with the wrong users.
|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
Figure 7: Example of an incorrect SQL query that generates the wrong gold reference answer for the given question. The JOIN operation incorrectly matches clients and accounts by district_id. Due to the possibility of multiple clients and accounts in the same district, accounts are incorrectly associated with the wrong users.
|
| 208 |
+
|
| 209 |
+
# Example 4: Synonyms
|
| 210 |
+
|
| 211 |
+
Figure 8 demonstrates how specific wordings can complicate interpretation for an LLM. The term sum, being both a SQL keyword and a descriptor, led to the LLM's literal interpretation and the incorrect summation of a transaction. The actual intent was to inquire about the transaction's balance or amount. A rephrased question resulted in the LLM generating the correct SQL query, fetching the intended information, as seen in the figure.
|
| 212 |
+
|
| 213 |
+

|
| 214 |
+
Figure 8: Question from data point with ID 177 from the development set of BIRD-Bench containing a difficult synonym, a corrected version of the question with the synonym replaced and corresponding predicted SQL queries by the DIN-SQL (GPT-3.5) model described in Section 3.3. Showcases the difficulty of synonyms on model predictions.
|
| 215 |
+
|
| 216 |
+
# Example 5: String Capitalization
|
| 217 |
+
|
| 218 |
+
As a consequence of SQL being a case-sensitive language when comparing string values in a query, the way a question is formulated regarding the use of uppercase or lowercase letters when asking for a specific value affects the result. This is because the LLM will most likely use the specific entry as given when generating the query, unless it has knowledge of the case used for different entries in the database. Therefore, in Figure 9, an example is provided where the terms "East" and "North" are mentioned with initial capital letters, as is commonly the case. However, the entries for these column values are in lowercase in the database, which means the question needs to account for this for the LLM to be able to generate a correct query. The corrected question and the SQL query generated from it can also be seen in Figure 9.
|
| 219 |
+
|
| 220 |
+

|
| 221 |
+
Figure 9: Example Ambiguous.
|
| 222 |
+
|
| 223 |
+
Example 6: Database Schema Non-Alignment
|
| 224 |
+
|
| 225 |
+
<table><tr><td>Incorrect Question</td><td>Description</td></tr><tr><td>What is the disposition ID of the client who made $5100 USD transaction on 1998/9/2?</td><td>The question asks for a single disposition ID, which does not reflect that there is a one-to-many relation between client and disposition, and most likely it won’t be possible to return a single ID.</td></tr><tr><td>List out the account numbers of clients who are youngest and have highest average salary?</td><td>There is no information about salaries of specific clients in the database.</td></tr></table>
|
| 226 |
+
|
| 227 |
+
Table 5: Examples of questions that does not map to the database schema and accompanying descriptions of why they do not.
|
understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e5ab289661366d3634ab5b66401e1ef79b47c51648c98ebdea78663cae2017d
|
| 3 |
+
size 722511
|
understandingtheeffectsofnoiseintexttosqlanexaminationofthebirdbenchbenchmark/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f92f0216e57e75d7c13a52e710b64ab9b3b209cb55d04a42fd5c12ff0b946ed
|
| 3 |
+
size 220586
|
whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/c272cfd8-04ba-4e1c-8443-caf5ebe4c383_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:74cb9160257b0f4b5127b513a93d04000a7004cdf7be381405bed2bcda5f96a3
|
| 3 |
+
size 126712
|
whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/c272cfd8-04ba-4e1c-8443-caf5ebe4c383_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8de45a9de29d58a7c05ca4a76918b9096b98534f1af24a00b47a1e4a3239611d
|
| 3 |
+
size 155550
|
whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/c272cfd8-04ba-4e1c-8443-caf5ebe4c383_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2ec8d2eefedd63d09b220d1843f85191add5202305d7ae11e429a2a4c7b32733
|
| 3 |
+
size 594052
|
whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/full.md
ADDED
|
@@ -0,0 +1,556 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# What Do Dialect Speakers Want? A Survey of Attitudes Towards Language Technology for German Dialects
|
| 2 |
+
|
| 3 |
+
Verena Blaschke $\triangle$ Christoph Purschke $\bullet$ Hinrich Schütze $\triangle$ Barbara Plank $\triangle$
|
| 4 |
+
|
| 5 |
+
$\triangle$ Center for Information and Language Processing (CIS), LMU Munich, Germany
|
| 6 |
+
|
| 7 |
+
Munich Center for Machine Learning (MCML), Munich, Germany
|
| 8 |
+
|
| 9 |
+
$\bullet$ Department of Humanities, University of Luxembourg, Luxembourg
|
| 10 |
+
|
| 11 |
+
$\bullet$ Department of Computer Science, IT University of Copenhagen, Denmark {verena.blaschke, b.plank}@lmu.de
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Natural language processing (NLP) has largely focused on modelling standardized languages. More recently, attention has increasingly shifted to local, non-standardized languages and dialects. However, the relevant speaker populations' needs and wishes with respect to NLP tools are largely unknown. In this paper, we focus on dialects and regional languages related to German - a group of varieties that is heterogeneous in terms of prestige and standardization. We survey speakers of these varieties $(N = 327)$ and present their opinions on hypothetical language technologies for their dialects. Although attitudes vary among subgroups of our respondents, we find that respondents are especially in favour of potential NLP tools that work with dialectal input (especially audio input) such as virtual assistants, and less so for applications that produce dialectal output such as machine translation or spellcheckers.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Most natural language processing (NLP) research focuses on languages with many speakers, high degrees of standardization and large amounts of available data (Joshi et al., 2020). Only recently, more NLP projects have started to include local, non-standardized languages and dialects. However, different speakers and cultures have different needs. As recently echoed by multiple researchers, the creation of language technologies (LTs) should take into account what the relevant speaker community finds useful (Bird, 2020, 2022; Liu et al., 2022; Mukhija et al., 2021), and communities can differ from one another in that regard (Lent et al., 2022).
|
| 20 |
+
|
| 21 |
+
In this work, we focus on dialects and regional languages<sup>1</sup> closely related to German (for the sake of simplicity, we use 'dialects' to refer to these varieties in this paper). With dialect competence generally being in decline in the German-speaking area,
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1: Countries and German states in which the respondents' dialects are spoken, with the number of respective respondents, and the overall age distribution.
|
| 25 |
+
|
| 26 |
+
today, dialect speakers usually also speak Standard German, while dialects often are replaced by regiolects – intermediate varieties between standard and dialect (Kehrein, 2019). Speaker attitudes towards dialects vary greatly (Gärtig et al., 2010, pp. 155–167).
|
| 27 |
+
|
| 28 |
+
Although these dialects are predominantly spoken and only few of them have traditional orthographies, many of them are also used in written, digital contexts (Androutsopoulos, 2003). Accordingly, some NLP datasets based (primarily) on such digital data exist, and a small number is also annotated for NLP tasks (Blaschke et al., 2023). Several recent publications feature LTs for German dialects, such as machine translation (Haddow et al., 2013; Honnet et al., 2018; Lambrecht et al., 2022; Aepli et al., 2023a; Her and Kruschwitz, 2024), speech-to-text (Herms et al., 2016; Nigmatulina et al., 2020; Gerlach et al., 2022) and text-to-speech systems (Gutscher et al., 2023), and slot and intent detection for conversational assistants (van der Goot et al., 2021; Aepli et al., 2023b; Winkler et al., 2024; Abboud and Oz, 2024).
|
| 29 |
+
|
| 30 |
+
To investigate what speaker communities are interested in, we survey dialect speakers from different German-speaking areas (Figure 1). We gather
|
| 31 |
+
|
| 32 |
+
a snapshot of their current attitudes towards LTs to answer the following questions: $Q1$ Which dialect technologies do respondents find especially useful (\$4.2)? $Q2$ Does this depend on whether the in- or output is dialectal, and on whether the LT works with speech or text data (\$4.3)? $Q3$ How does this reflect relevant sociolinguistic factors (\$4.4)?
|
| 33 |
+
|
| 34 |
+
# 2 Related Work
|
| 35 |
+
|
| 36 |
+
The closest survey to ours on investigating attitudes of speakers of non-standard language varieties towards LTs is by Lent et al. (2022). They conducted a survey on the actual and desired LT use by speakers of different creoles $(N = 37)$ . They find that the needs vary from speaker community to speaker community, and that speakers who are also highly proficient in the local high-prestige language are less interested in creole LTs. Of the technologies included in the survey, speech-related technologies (transcription and synthesis) are the most popular; machine translation (MT) and question answering software are also desired by multiple communities, while spellcheckers are controversial.
|
| 37 |
+
|
| 38 |
+
Soria et al. (2018) surveyed speakers of four regional European languages about whether and why they use (or do not use) their languages in digital contexts. When asked about the desirability of currently unavailable spellcheckers and MT systems, more respondents judged both as desirable than not, although the exact proportions vary across communities. Millour (2019; 2020, pp. 230, 239) found similar results in surveys of Mauritian Creole $(N = 144)$ and Alsatian speakers $(N = 1,224)$ .
|
| 39 |
+
|
| 40 |
+
Conversely, Way et al. (2022) investigate actual LT use by speakers of different European national languages (91-922 respondents per country). The most commonly used LTs are MT, search engines and spell- or grammar checkers. When respondents do not use specific LTs, this can simply be due to the absence of such technologies for certain languages, but also due to a lack of interest in specific language-LT combinations.
|
| 41 |
+
|
| 42 |
+
Recently, several surveys have also investigated speaker community perspectives regarding LTs for many different indigenous language communities (Mager et al., 2023; Cooper et al., 2024; Dolinska et al., 2024; Tonja et al., 2024). However, these surveys focus on languages with very different sociolinguistic contexts than the ones in our survey and
|
| 43 |
+
|
| 44 |
+
that are unrelated to their respective local highresource languages.
|
| 45 |
+
|
| 46 |
+
# 3 Questionnaire
|
| 47 |
+
|
| 48 |
+
Our questionnaire is aimed at speakers of German dialects and related regional languages and consists of two main parts: We ask our participants about their dialect, and we ask about their opinions on hypothetical LTs for their dialect. Several of the questions regarding dialect use are inspired by Soria et al. (2018) and Millour (2020), and we choose a similar selection of LTs as Way et al. (2022) ( $\S 4.2$ ). For each technology, we provide a brief definition to make the survey accessible to a broad audience (e.g., 'Speech-to-text systems transcribe spoken language. They are for instance used for automatically generating subtitles or in the context of dictation software.'). We then ask participants to rate on a 5-point Likert scale how useful they would find such a tool for their dialect. We allow respondents to elaborate on their answers in comment fields. The full questionnaire is in Appendix $\S A$ .
|
| 49 |
+
|
| 50 |
+
The questionnaire was written in German, and was estimated to take between 10-15 minutes for completion.3 It was online for three weeks in September and October 2023 and got disseminated via word of mouth, social media, mailing lists and by contacting dialect and heritage societies. Our results are hence based on a convenience sample.
|
| 51 |
+
|
| 52 |
+
# 4 Results
|
| 53 |
+
|
| 54 |
+
We reached 441 people, 327 of whom are self-reported dialect speakers and finished the entire questionnaire – their responses are presented in the following. Detailed answer distributions are in Appendix §A; correlations are in §B.
|
| 55 |
+
|
| 56 |
+
# 4.1 Dialect Background and Attitudes
|
| 57 |
+
|
| 58 |
+
Most of our respondents answer that they have a very good command of their dialect (68%) and acquired it as a mother tongue (71%). Figure 1 shows where the respondents' dialects are spoken (and their age distribution): mostly in Germany (72%), followed by Switzerland (14%) and Austria (6%). Nearly a quarter (24%) each are in
|
| 59 |
+
|
| 60 |
+
their twenties and thirties, almost all others are older. When rating how traditional their dialect is on a scale from 1 (traditional dialect that people from other regions have trouble understanding) to 5 (regionally marked German easily understood by outsiders), the largest group of respondents $(35\%)$ indicated a 2 $(\mu = 2.6, \sigma = 1.1)$ .
|
| 61 |
+
|
| 62 |
+
Just over half of our respondents $(52\%)$ speak their dialect on a daily basis, and $43\%$ indicate that they would like to use their dialect in all areas of life. Most respondents $(70\%)$ value the diversity of their dialect. Nearly two thirds $(65\%)$ are opposed to having a standardized orthography for their dialect. Just over half of the respondents $(53\%)$ say that their dialect is only spoken and not well-suited for written communication – nevertheless, two thirds $(66\%)$ also write their dialect, even if rarely. Many $(63\%)$ find it easy to read dialectal texts written by others. Written dialect is commonly used for communicating with others – the most common writing scenarios are text messages $(57\%, \text{multiple responses possible})$ , followed by letters/emails $(26\%)$ , social media posts and comments $(19\% \text{each})$ – but also for notes to oneself including diary entries $(19\%)$ .
|
| 63 |
+
|
| 64 |
+
About a third $(35\%)$ indicate that they are actively engaged in dialect preservation pursuits (multiple responses possible): $13\%$ as members of dialect preservation societies, $4\%$ as teachers, and $22\%$ in other ways. Write-in comments by the last group point out other language-related professions, but also include speaking the dialect in public or with children as a means of active dialect preservation. We compare the opinions of respondents with and without such dialect engagement in $\S 4.4$ .
|
| 65 |
+
|
| 66 |
+
$14\%$ of our respondents are familiar with at least one LT that already caters to their dialect. Just over half of the respondents $(54\%)$ indicate that their dialect being represented by more LTs would make it more attractive to younger generations, and a smaller group $(31\%)$ says they would use their dialect more often given suitable LTs.
|
| 67 |
+
|
| 68 |
+

|
| 69 |
+
Figure 2: Opinions on potential language technologies for dialects. $STT=$ speech-to-text, $TTS=$ text-to-speech, $dial=$ dialect, $deu=$ German, $oth=$ other languages, $MT=$ machine translation, cannot judge $\coloneqq$ skip question.
|
| 70 |
+
|
| 71 |
+
# 4.2 Which dialect LTs are deemed useful?
|
| 72 |
+
|
| 73 |
+
Figure 2 shows our respondents' opinions on LTs (Q1), and Appendix §C presents the average scores per LT when responses are mapped to a numerical scale. While there are diverging opinions on every LT – there is no single technology that (nearly) all respondents consider useful or useless for their dialect – some trends emerge, as we discuss next.<sup>6</sup> Overall, the responses are generally correlated with each other: respondents who think positively/negatively of one technology tend to think similarly about others. Nevertheless, some LTs are overall more popular, and some less so:
|
| 74 |
+
|
| 75 |
+
Virtual assistants and chatbots The most clearly favoured LT by dialect speakers in our survey are virtual assistants (such as Siri or Alexa) that can respond to dialectal input (71% in favour, 20% against). Chatbots that can handle dialectal input are less popular, but still deemed useful by a slight majority (52%). Systems that could output dialectal responses are less popular: 48% would
|
| 76 |
+
|
| 77 |
+
find virtual assistants that answer in dialect useful, and $34\%$ think so about chatbots.
|
| 78 |
+
|
| 79 |
+
Speech-to-text and text-to-speech When asked about speech-to-text (STT) software, a majority $(61\%)$ is in favour of systems that transcribe spoken dialect into written Standard German, and a slightly smaller majority $(58\%)$ is in favour of written dialectal output. When it comes to text-to-speech (TTS) systems that synthesize dialect text into a spoken form, the respondents are even more split, with $47\%$ in favour and $35\%$ against.
|
| 80 |
+
|
| 81 |
+
Machine translation We ask for opinions on four different configurations regarding automatic translation of written texts: each possible combination for translation into vs. out of the dialect and from/into Standard German vs. a foreign language. All options are to some degree controversial among the respondents, with translation from the dialect into Standard German being the most popular (52% in favour) and from the dialect into a foreign language the least popular (25% in favour).
|
| 82 |
+
|
| 83 |
+
Search engines Search engines that could deal with dialectal input are controversial, with $43\%$ each in favour of and against this LT, although the negative group holds stronger opinions. Some write-in comments question whether (monolingual) information retrieval would produce useful results or mention finding it easier to write in Standard German rather than in a dialect, but others voice a desire to be able to find results for queries including dialectal terms with no direct German equivalent.
|
| 84 |
+
|
| 85 |
+
Spellcheckers Most respondents $(59\%)$ are opposed to spell- or grammar checkers for their dialect, although a quarter $(25\%)$ is in favour. Several respondents mention opposition to spellcheckers since they want their dialectal writing to exactly reflect the pronunciation and word choices of their local dialect and would be bothered if a spellchecker changed them to an arbitrary standardized version of the dialect.
|
| 86 |
+
|
| 87 |
+
# 4.3 Are there differences for dialect input vs. output and text vs. speech?
|
| 88 |
+
|
| 89 |
+
As seen in the previous section, there is a general tendency to prefer versions of LTs that process dialectal input rather than produce dialectal output (Q2). Several write-in comments voice worries about dialectal output not modelling their dialect accurately enough. Additionally, technologies deal
|
| 90 |
+
|
| 91 |
+
ing with spoken language tend to be rated more positively than those focusing on text only.
|
| 92 |
+
|
| 93 |
+
Correlation with attitudes towards orthography Being in favour of a standardized dialect orthography is positively, albeit not very strongly, correlated with being in favour of any technology involving a written version of the dialect and/or (written or spoken) dialectal output (Spearman's $\rho$ values between 0.14 and 0.47 per LT with $p$ -values $< 0.001$ ).
|
| 94 |
+
|
| 95 |
+
# 4.4 Do attitudes reflect sociolinguistic factors?
|
| 96 |
+
|
| 97 |
+
To address $Q3$ and the heterogeneity of our respondent group, we compare answers between larger subgroups. We summarize the results of $t$ -tests with $p$ -values $< 0.05$ . Appendix §D provides more details, together with two additional comparisons that only have small effect sizes (speaker age and dialect traditionality).
|
| 98 |
+
|
| 99 |
+
Language activists Since language activists might have overly enthusiastic attitudes compared to the speaker population at large (Soria et al., 2018), we compare those who report involvement in dialect preservation ('activists', $N = 115$ ) to those who do not ( $N = 212$ ). Activists are generally more in favour of LTs for dialects, with statistically significant differences for (any kind of) machine translation, TTS software, spellcheckers, and search engines, as well as for written dialect output options for STT, chatbots and virtual assistants. Removing the activists' responses from our analysis only barely changes the order of preferred LTs (\$C).
|
| 100 |
+
|
| 101 |
+
Region Additionally, we compare three large regional subgroups with different sociolinguistic realities. In Germany and Austria, traditional dialects have been partially replaced by more standard-like regiolects, while dialects have high prestige in Switzerland where Standard German is often reserved for writing (Kehrein, 2019; Ender and Kaiser, 2009). Low German, traditionally spoken in parts of Northern Germany and the Eastern Netherlands, is officially recognized as a language and is more distantly related to Standard German than the other varieties our participants speak. Its speaker numbers are in decline, but many Northern Germans think Low German should receive more support in, e.g., public schools (Adler et al., 2016).
|
| 102 |
+
|
| 103 |
+
We compare the opinions of Swiss $(N = 46)$ and Low German $(N = 58)$ respondents to German and Austrian non-Low-German speakers $(N = 191)$ .<sup>7</sup>
|
| 104 |
+
|
| 105 |
+
Our Low German respondents are more in favour of a standardized orthography and of spellcheckers than our other German and Austrian respondents, the Swiss respondents less so. This is unsurprising in that several orthographies have been proposed for Low German, whereas (typically spoken) dialects and (typically written) Standard German exist in a diglossic state in Switzerland. Nevertheless, both groups are more in favour of STT software with dialectal output. The Low German respondents are more in favour of chatbots with dialectal answers, TTS, (any kind of) MT and search engines. Swiss Germans rate virtual assistants that can handle dialectal input as more desirable (87% in favour), and are more in favour of STT software with Standard German output.
|
| 106 |
+
|
| 107 |
+
# 5 Discussion and Conclusion
|
| 108 |
+
|
| 109 |
+
We surveyed speakers of dialect varieties on their attitudes towards LTs. Generally, the survey participants prefer LTs working with dialect input rather than output. They also tend to prefer tools that process speech over those for text (Q2). This is consistent with Chrupa's (2023) argument that NLP should focus more on spoken language to better represent actual language use. It also reflects the complex, often conflicting attitudes speakers of multiple varieties have towards competing linguistic and social norms. Consequently, the most popular potential dialect LTs $(QI)$ in our survey process spoken dialectal input: virtual assistants with dialect input and speech-to-text systems.
|
| 110 |
+
|
| 111 |
+
However, like Lent et al. (2022), we find that different speaker communities vary in their attitudes towards LTs $(Q3)$ . For instance, opinions on the standardization of a dialect are a relevant factor regarding the desirability of written LTs. Nevertheless, the acceptance and rejection of LTs is related to individual factors beyond just attitudes, e.g., experience with and trust in digital technology.
|
| 112 |
+
|
| 113 |
+
We hope that our study inspires other NLP researchers to actively consider the wants and needs of the relevant speaker communities. Based on the results of this study, we also encourage the dialect NLP community to pursue more work on spoken language processing.
|
| 114 |
+
|
| 115 |
+
# Ethical Considerations
|
| 116 |
+
|
| 117 |
+
We only collected responses from participants who consented to having their data saved and analyzed for research purposes. We did not ask for personally identifying information. We store responses on a local server and only share results based on aggregate analyses. Appendix §A contains the full questionnaire including the introduction where we describe the purpose of the study and explain what data we collect and how we use the data.
|
| 118 |
+
|
| 119 |
+
Participation was completely anonymous, voluntary and with no external reward. We do not see any particular risks associated with this work.
|
| 120 |
+
|
| 121 |
+
# Limitations
|
| 122 |
+
|
| 123 |
+
Our results are based on a convenience sample; neither the geographic or age distribution are representative of the population at large (dialect-speaking or not). Language activists are over-represented (hence our additional analysis in §4.4 and Appendices §C and §D), and participating in the survey may have been especially of interest to people who feel (in one way or another) strongly about the topic of dialects and technology. Even so, our respondents are not a monolith in their opinions and we can see meaningful differences between the relative popularity of different technologies.
|
| 124 |
+
|
| 125 |
+
We aimed to keep participation effort low and therefore limited the number of questions we included. We considered asking "Would you use $X$ if it existed?" in addition to "Would you find $X$ useful?" to explicitly disentangle the participants' own needs from what are possibly the perceived needs of the community. We decided against this in order to keep the questionnaire as short as possible and because we were unsure how accurate such assessment would be.
|
| 126 |
+
|
| 127 |
+
The scale of our answer possibilities uses "not useful" as the opposite of "useful." However, it would be interesting to instead use a scale from "harmful" to "useful" in future surveys, in order to get a better impression of whether respondents who deem an LT useless in our version of the survey find it actively harmful or merely uninteresting.
|
| 128 |
+
|
| 129 |
+
To minimize the total time needed to fill out the questionnaire and to guarantee the privacy of the respondents after asking respondents about what specific dialect they speak (used later to identify the Low German speakers), we intentionally kept additional demographic questions at a minimum and did not ask about education, income, gender,
|
| 130 |
+
|
| 131 |
+
or similar variables.
|
| 132 |
+
|
| 133 |
+
As this survey is based on self-reporting, we expect discrepancies between reported and actual opinions and behaviour. Since participation was anonymous and entirely voluntary with no external reward, we think it unlikely for participants to lie about their opinions. It is likely, though, that (especially younger) participants overstate their dialect competence or the traditionality of their dialect, in line with overall dialect dynamics in German (Purschke, 2011).
|
| 134 |
+
|
| 135 |
+
# Acknowledgements
|
| 136 |
+
|
| 137 |
+
We thank everybody who participated in or shared the survey. We also thank Yves Scherrer and Frauke Kreuter for their advice, and the MaiNLP and CIS members as well as the anonymous reviewers for their feedback.
|
| 138 |
+
|
| 139 |
+
This research is supported by the ERC Consolidator Grant DIALECT 101043235. We also gratefully acknowledge partial funding by the European Research Council (ERC #740516).
|
| 140 |
+
|
| 141 |
+
# References
|
| 142 |
+
|
| 143 |
+
Khadige Abboud and Gokmen Oz. 2024. Towards equitable natural language understanding systems for dialectal cohorts: Debiasing training data. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 16487-16499, Torino, Italia. ELRA and ICCL.
|
| 144 |
+
Astrid Adler, Christiane Ehlers, Reinhard Goltz, Andrea Kleene, and Albrecht Plewnia. 2016. Status und Gebrauch des Niederdeutschen 2016. Erste Ergebnisse einer representativen Erhebung. Technical report, Institut für Deutsche Sprache, Mannheim.
|
| 145 |
+
Noëmi Aepli, Chantal Amrhein, Florian Schottmann, and Rico Sennrich. 2023a. A benchmark for evaluating machine translation metrics on dialects without standard orthography. In Proceedings of the Eighth Conference on Machine Translation, pages 1045-1065, Singapore. Association for Computational Linguistics.
|
| 146 |
+
Noemi Aepli, Cagri Çoltekin, Rob Van Der Goot, Tommi Jauhiainen, Mourhaf Kazzaz, Nikola Ljubesic, Kai North, Barbara Plank, Yves Scherrer, and Marcos Zampieri. 2023b. Findings of the VarDial evaluation campaign 2023. In Tenth Workshop on NLP for Similar Languages, Varieties and Dialects (VarDial 2023), pages 251-261, Dubrovnik, Croatia. Association for Computational Linguistics.
|
| 147 |
+
Jannis Androutsopoulos. 2003. Online-Gemeinschaften und Sprachvariation. Soziolinguisticische Perspektiven
|
| 148 |
+
|
| 149 |
+
auf Sprache im Internet. Zeitschrift für germanistische Linguistik, 31(2).
|
| 150 |
+
Steven Bird. 2020. Decolonising speech and language technology. In Proceedings of the 28th International Conference on Computational Linguistics, pages 3504-3519, Barcelona, Spain (Online). International Committee on Computational Linguistics.
|
| 151 |
+
Steven Bird. 2022. Local languages, third spaces, and other high-resource scenarios. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7817-7829, Dublin, Ireland. Association for Computational Linguistics.
|
| 152 |
+
Verena Blaschke, Hinrich Schütze, and Barbara Plank. 2023. A survey of corpora for Germanic low-resource languages and dialects. In Proceedings of the 24th Nordic Conference on Computational Linguistics (NoDaLiDa), pages 392-414, Tórhavn, Faroe Islands. University of Tartu Library.
|
| 153 |
+
Grzegorz Chrupa. 2023. Putting natural in Natural Language Processing. In Findings of the Association for Computational Linguistics: ACL 2023, pages 7820-7827, Toronto, Canada. Association for Computational Linguistics.
|
| 154 |
+
Ned Cooper, Courtney Heldreth, and Ben Hutchinson. 2024. "it's how you do things that matters": Attending to process to better serve indigenous communities with language technologies. In Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 2: Short Papers), pages 204-211, St. Julian's, Malta. Association for Computational Linguistics.
|
| 155 |
+
Joanna Dolinska, Shekhar Nayak, and Sumittra Suraratdecha. 2024. Akha, dara-ang, karen, khamu, Mlabri and urak lawoi' language minorities' subjective perception of their languages and the outlook for development of digital tools. In Proceedings of the Seventh Workshop on the Use of Computational Methods in the Study of Endangered Languages, pages 94-99, St. Julians, Malta. Association for Computational Linguistics.
|
| 156 |
+
Andreas Ender and Irmgard Kaiser. 2009. Zum Stellenwert von Dialekt und Standard im österreichischen und Schweizer Alltag: Ergebnisse einer Umfrage. Zeitschrift für germanistische Linguistik, 37(2):266-295.
|
| 157 |
+
Anne-Kathrin Gärtig, Albrecht Plewnia, and Astrid Rothe. 2010. Wie Menschen in Deutschland über Sprache denken. Ergebnisse einer bundesweiten Repräsentativerhebung zu aktuellen Spracheinstellungen. Institut für Deutsche Sprache, Mannheim.
|
| 158 |
+
Johanna Gerlach, Jonathan Mutil, and Bouillon Pierrette. 2022. Producing Standard German subtitles for Swiss German TV content. In *Ninth Workshop on Speech and Language Processing for Assistive Technologies* (SLPAT-2022), pages 37–43, Dublin, Ireland. Association for Computational Linguistics.
|
| 159 |
+
|
| 160 |
+
Antton Gurrutxaga Hernaiz and Klara Ceberio Berger. 2017. Basque - a digital language? Technical report. Reports on Digital Language Diversity in Europe.
|
| 161 |
+
Lorentz Gutscher, Michael Pucher, and Víctor Garcia. 2023. Neural speech synthesis for Austrian dialects with standard German grapheme-to-phoneme conversion and dialect embeddings. In Proceedings of the 2nd Annual Meeting of the Special Interest Group on Under-resourced Languages (SIGUL 2023).
|
| 162 |
+
Barry Haddow, Adolfo Hernandez, Friedrich Neubarth, and Harald Trost. 2013. Corpus development for machine translation between standard and dialectal varieties. In Proceedings of the Workshop on Adaptation of Language Resources and Tools for Closely Related Languages and Language Variants, pages 7-14, Hisssar, Bulgaria. INCOMA Ltd. Shoumen, BULGARIA.
|
| 163 |
+
Wan-Hua Her and Udo Kruschwitz. 2024. Investigating neural machine translation for low-resource languages: Using Bavarian as a case study. In Proceedings of the 3rd Annual Meeting of the Special Interest Group on Under-resourced Languages.
|
| 164 |
+
Robert Herms, Laura Seelig, Stefanie Münch, and Maximilian Eibl. 2016. A corpus of read and spontaneous Upper Saxon German speech for ASR evaluation. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 4648-4651, Porto-rož, Slovenia. European Language Resources Association (ELRA).
|
| 165 |
+
Davyth Hicks. 2017. Breton - a digital language? Technical report. Reports on Digital Language Diversity in Europe.
|
| 166 |
+
Pierre-Edouard Honnet, Andrei Popescu-Belis, Claudi Musat, and Michael Baeriswyl. 2018. Machine translation of low-resource spoken dialects: Strategies for normalizing Swiss German. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Resources Association (ELRA).
|
| 167 |
+
Pratik Joshi, Sebastin Santy, Amar Budhiraja, Kalika Bali, and Monojit Choudhury. 2020. The state and fate of linguistic diversity and inclusion in the NLP world. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6282-6293, Online. Association for Computational Linguistics.
|
| 168 |
+
Roland Kehrein. 2019. Vertical language change in Germany: Dialects, regiolects, and standard German. In Stanley D. Brunn and Roland Kehrein, editors, Handbook of the Changing World Language Map. Springer International Publishing.
|
| 169 |
+
Louisa Lambrecht, Felix Schneider, and Alexander Waibel. 2022. Machine translation from standard German to Alemannic dialects. In Proceedings of the 1st Annual Meeting of the ELRA/ISCA Special Interest Group on Under-Resourced Languages, pages
|
| 170 |
+
|
| 171 |
+
129-136, Marseille, France. European Language Resources Association.
|
| 172 |
+
Heather Lent, Kelechi Ogueji, Miryam de Lhoneux, Orevaoghene Ahia, and Anders Søgaard. 2022. What a creole wants, what a creole needs. In Proceedings of the Thirteenth Language Resources and Evaluation Conference, pages 6439-6449, Marseille, France. European Language Resources Association.
|
| 173 |
+
Zoey Liu, Crystal Richardson, Richard Hatcher, and Emily Prud'hommeaux. 2022. Not always about you: Prioritizing community needs when developing endangered language technology. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 3933-3944, Dublin, Ireland. Association for Computational Linguistics.
|
| 174 |
+
Manuel Mager, Elisabeth Mager, Katharina Kann, and Ngoc Thang Vu. 2023. Ethical considerations for machine translation of indigenous languages: Giving a voice to the speakers. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4871-4897, Toronto, Canada. Association for Computational Linguistics.
|
| 175 |
+
Alice Millour. 2019. Getting to know the speakers: a survey of a non-standardized language digital use. In 9th Language & Technology Conference: Human Language Technologies as a Challenge for Computer Science and Linguistics, Poznan, Poland.
|
| 176 |
+
Alice Millour. 2020. Myriadisation de ressources linguistiques pour le traitement automatique de langues non standardisées. Ph.D. thesis, Sorbonne Université.
|
| 177 |
+
Namrata Mukhija, Monjit Choudhury, and Kalika Bali. 2021. Designing language technologies for social good: The road not taken. Computing Research Repository, 2110.07444.
|
| 178 |
+
Iuliia Nigmatulina, Tannon Kew, and Tanja Samardzic. 2020. ASR for non-standardised languages with dialectal variation: the case of Swiss German. In Proceedings of the 7th Workshop on NLP for Similar Languages, Varieties and Dialects, pages 15-24, Barcelona, Spain (Online). International Committee on Computational Linguistics (ICCL).
|
| 179 |
+
Christoph Purschke. 2011. Regionalsprache und Hörerurteil. Grundzüge einer perzeptiven Variationslinguistik. Steiner, Stuttgart.
|
| 180 |
+
Irene Russo and Claudia Soria. 2017. Sardinian - a digital language? Technical report. Reports on Digital Language Diversity in Europe.
|
| 181 |
+
Tuomo Salonen. 2017. Karelian - a digital language? Technical report. Reports on Digital Language Diversity in Europe.
|
| 182 |
+
|
| 183 |
+
Jürgen Erich Schmidt, Joachim Herrgen, Roland Kehrein, Alfred Lameli, and Hanna Fischer. 2020-Regionalsprache.de (REDE III. Forschungsplattform zu den modernen Regionalsprachen des Deutschen. Edited by Robert Engsterhold, Heiko Girnth, Simon Kasper, Juliane Limper, Georg Oberdorfer, Tillmann Pistor, Anna Wolanska. Assisted by Dennis Beitel, Milena Gropp, Maria Luisa Krapp, Vanessa Lang, Salome Lipfert, Jeffrey Pheiff, Bernd Vielsmeier.
|
| 184 |
+
|
| 185 |
+
Claudia Soria, Valeria Quochi, and Irene Russo. 2018. The DLDP survey on digital use and usability of EU regional and minority languages. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Resources Association (ELRA).
|
| 186 |
+
|
| 187 |
+
Atnafu Lambebo Tonja, Fazlourrahman Balouchzahi, Sabur Butt, Olga Kolesnikova, Hector Ceballos, Alexander Gelbukh, and Thamar Solorio. 2024. NLP progress in Indigenous Latin American languages.
|
| 188 |
+
|
| 189 |
+
Rob van der Goot, Ibrahim Sharaf, Aizhan Imankulova, Ahmet Üstün, Marija Stepanović, Alan Ramponi, Siti Oryza Khairunnisa, Mamoru Komachi, and Barbara Plank. 2021. From masked language modeling to translation: Non-English auxiliary tasks improve zero-shot spoken language understanding. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2479–2497, Online. Association for Computational Linguistics.
|
| 190 |
+
|
| 191 |
+
Andy Way, Georg Rehm, Jane Dunne, Jan Hajic, Teresa Lynn, Maria Giagkou, Natalia Resende, Tereza Vojtěchová, Stelios Piperidis, Andrejs Vasiljevs, Aivars Berzins, Gerhard Backfried, Marcin Skowron, Jose Manuel Gomez-Perez, Andres Garcia-Silva, Martin Kaltenböck, and Artem Revenko. 2022. Report on all external consultations and surveys. Technical report, European Language Equality.
|
| 192 |
+
|
| 193 |
+
Miriam Winkler, Virginija Juozapaityte, Rob van der Goot, and Barbara Plank. 2024. Slot and intent detection resources for Bavarian and Lithuanian: Assessing translations vs natural queries to digital assistants. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 14898-14915, Torino, Italy. ELRA and ICCL.
|
| 194 |
+
|
| 195 |
+
# A Questionnaire
|
| 196 |
+
|
| 197 |
+
In this section, we reproduce the questions and answers from our survey, in the original wording as well as in translation. Translations are in grey italics, remarks about the questionnaire are in black italics. Answer options that end with a colon (:) came with an optional text input field in the questionnaire. All questions except for the first two could be skipped without answering.
|
| 198 |
+
|
| 199 |
+
Herzlich willkommen, servus, gruezi & moin!
|
| 200 |
+
|
| 201 |
+
Sprachtechnologie ist momentan allgegenwartig, ob bei Übersetzungsprogrammen, Chatbots oder anderen Anwendungen. Hauptsächlich unterstützen den These Anwendungen lediglich Standardsprachen – was nicht unbedingt dem entspricht, wie wir im Alltag Sprache verwenden.
|
| 202 |
+
|
| 203 |
+
Daher möchten wir Herausfinden, wie Sie als Sprecher\*innen von Dialekten und Regionalsprachen möglicher Sprachtechnologie für ihre Sprachform gegenüberstehen: welche Anwendungen halten Sie für wünschenswert bzw.unnötig?
|
| 204 |
+
|
| 205 |
+
Welcome and hello [in different dialects]!
|
| 206 |
+
|
| 207 |
+
Language technology is currently omnipresent - be it in the context of translation software, chatbots or other applications. Such applications primarily support standard languages - which is not necessarily how we use language in our everyday lives.
|
| 208 |
+
|
| 209 |
+
Because of this we would like to find out what you as speakers of dialects and regional languages think of potential technologies for your language variety: which applications do you find desirable or useless?
|
| 210 |
+
|
| 211 |
+
Das Ausfüllen des Fragebogens dauert etwa 10-15 Minuten.
|
| 212 |
+
|
| 213 |
+
Wir behandeln ihre Antwerten vertraulich und veröffentlichten diese nur in anonymisierter Form und ohne dass Rückschlüsse auf ihre Person gezogen werden können.
|
| 214 |
+
|
| 215 |
+
Genauere Details:
|
| 216 |
+
|
| 217 |
+
Ziel der Befragung ist es zum einen, Herauszufunden, ob es Unterschiede zwischen den Arten von Sprachtechnologien gibt, die Dialektsprecher\*innen tendenziell als nutzlich bzw. nutzlos bewerten. Zum anderen möchten wir Herausfinden, ob ein statistischer Zusammenhang zwischen diesen Antwerten und dem Dialekthintergrund und -gebrauch der Befragten besteht.
|
| 218 |
+
|
| 219 |
+
Die Antwerten werden auf einem Server der LMU in München gespeichert. Wir speichern dazu nur ihre Antwerten und den Antwortzeitpunkt (um die typische Ausfulldauer better einzuschätzen), nicht aber ihre IP-Adresse. Wir geben die Daten nicht an Dritte weiter, sondern veröffentlichten ledigious Ergebnisse, die auf Aggregatdaten und statistischen Analysen berufen. Zudem zitieren wir gegebenenfalls aus (optional gegebenen) Kommentarfeld-Antwerten.
|
| 220 |
+
|
| 221 |
+
Kontaktmöglichkeit bei Fragen oder Kommen-taren zu dieser Umfrage: [Contact data of first author].
|
| 222 |
+
|
| 223 |
+
Vielen Dank für ihre Teilnahme!
|
| 224 |
+
|
| 225 |
+
This questionnaire takes about 10-15 minutes to fill out.
|
| 226 |
+
|
| 227 |
+
We treat your answers as confidential and only share them as anonymized data that do not allow drawing any inferences about your identity.
|
| 228 |
+
|
| 229 |
+
More detailed information:
|
| 230 |
+
|
| 231 |
+
The goal of this survey is firstly to determine whether there are differences between the types of language technologies that dialect speakers tend to find useful or useless. Additionally, we would like to find out whether there is a statistical correlation between the answers and the dialect background of the participants.
|
| 232 |
+
|
| 233 |
+
We store the answers on an LMU server in Munich. This only includes storing your answers and the time of the questions are answered (to better estimate the typical response duration), but not your IP address. We do not share your data with third parties, but only share results based on aggregated data and statistical analyses. Additionally, we might cite (optional) write-in answers from comment fields.
|
| 234 |
+
|
| 235 |
+
Contact person in case of questions about or comments on this study: [Contact data of first author].
|
| 236 |
+
|
| 237 |
+
Thank you very much for participating!
|
| 238 |
+
|
| 239 |
+
Ich stimme zu, dass meine Antwarten wie oben beschreiben zu Forschungszwecken gespeichert und ausgewertet werden. I consent to my answers being stored and analyzed for research purposes as outlined above.
|
| 240 |
+
|
| 241 |
+
The survey only progresses if this box is checked.
|
| 242 |
+
|
| 243 |
+
In dieser Umfrage untersuchen wir Sprachen und Sprachformen, die sich deutlich vom Hochdeutschen untersicken. Damit unseren wir mit dem Deutschen verwandte Regionalsprachen sowie Dialekte, Mundarten und Platt-Varianten, die meist für eine keine Region typisch sind und von Außenstehenden nicht ohne Weiteres verstanden werden konnen. Ein paar Beispiele davon sind das Eifeler Platt, Allgauerisch, Bairisch oder Nordfriesisch. Der Einfachheit halber verwenden wir im Folgenden „Dialekt“ als Sammelbegriff für all these Sprachformen. In this survey, we focus on languages and language varieties that
|
| 244 |
+
|
| 245 |
+
are clearly distinct from Standard German. To be precise, we are interested in regional languages related to German as well as dialects<sup>8</sup> that usually are typical for a small region and cannot easily be understood by outsiders. Some examples are Eifelplatt, Allgau dialects, Bavarian and North Frisian. For the sake of simplicity, we will use "dialect" as umbrella term for all of these language varieties in the following.
|
| 246 |
+
|
| 247 |
+
This introduction is partially based on the one from the REDE project surveys (Schmidt et al., 2020-).
|
| 248 |
+
|
| 249 |
+
1. Konnen Sie einen deutschen Dialekt sprechen? Can you speak a German dialect?
|
| 250 |
+
|
| 251 |
+

|
| 252 |
+
|
| 253 |
+
The 16 respondents who answered 'no' are excluded from the analysis. The survey automatically ended for them, showing the message: "Alle weiteren Fragen richten sich nur an SprecherInnen eines deutschen Dialekts bzw. einer mit dem Deutschen nahe verwandten Regionalsprache. Vielen Dank für ihre Teilnahme!" "All further questions are only for speakers of a German dialect or a regional language closely related to German. Thank you for participating!"
|
| 254 |
+
|
| 255 |
+
2. Um welchen Dialekt handelt es sich? Which dialect specifically?
|
| 256 |
+
|
| 257 |
+
327 write-in answers.
|
| 258 |
+
|
| 259 |
+
3. Wann haben Sie diesen Dialekt gelernt? When did you learn this dialect?
|
| 260 |
+
|
| 261 |
+

|
| 262 |
+
|
| 263 |
+
4. In welchem Land befindet sich der Ort, and dem Ihr Dialekt gesprochen wird (z.B. Ihr Heimatort)? In which country is the location where your dialect is spoken (e.g., your hometown)?
|
| 264 |
+
|
| 265 |
+
See Figure 1.
|
| 266 |
+
|
| 267 |
+
5. In welchem Bundesland befindet sich dieser Ort? In which German state is this location?
|
| 268 |
+
|
| 269 |
+
Only asked if the previous answer is 'Germany'. See Figure 1.
|
| 270 |
+
|
| 271 |
+
6. Wie sehr entspricht Ihr Dialekt dem traditionen Dialekt des Ortes? How much does your dialect resemble the traditional dialect of this location?
|
| 272 |
+
|
| 273 |
+

|
| 274 |
+
|
| 275 |
+
*1 - Mein Dialekt ist sehr traditionell und für Außenstehende aus anderen Regionen sehr schwer zu verstehen. 1 - My dialect is very traditional and very hard to understand for outsiders from other regions.
|
| 276 |
+
**5 - Mein Dialekt ist mehr regional gefährbtes Deutsch, das auch von Außenstehenden recht einzach verstanden wird. 5 - My dialect is more like regionally marked German that is relatively easily understood by outsiders.
|
| 277 |
+
|
| 278 |
+
7. Wie*häufig sprechen Sie Ihr Den Dialekt? How often do you speak your dialect?
|
| 279 |
+
|
| 280 |
+
The answer options are based on those in the surveys summarized by Soria et al. (2018).
|
| 281 |
+
|
| 282 |
+

|
| 283 |
+
|
| 284 |
+
8. Schreiben Sie manchmal ihren Dialekt? Do you ever write your dialect?
|
| 285 |
+
|
| 286 |
+
This question and the next one are modelled after questions by Millour (2020, pp. 228, 237-238).
|
| 287 |
+
|
| 288 |
+

|
| 289 |
+
|
| 290 |
+
*No, I don't have any opportunity for this
|
| 291 |
+
|
| 292 |
+
**Nein, mein Dialekt ist eine gesprochene Sprachform und ich möchte ihm nicht schreiben. No, my dialect is a spoken form of language and I don't want to write it
|
| 293 |
+
|
| 294 |
+
9. Was schreiben Sie in Ihr dem Dialekt? (Mehr-fachantwerten möglich) What do you write in your dialect? (Multiple answers possible)
|
| 295 |
+
|
| 296 |
+
Only asked if previous is 'yes'. 217 participants responded:
|
| 297 |
+
|
| 298 |
+

|
| 299 |
+
|
| 300 |
+
*Nachrichten in Chatprogrammen, Messagingern (wie WhatsApp), SMS Texts in messaging apps (like WhatsApp), text messages
|
| 301 |
+
**Sachtexte, z.B. als Blogposts oder auf Wikipedia Non-fiction texts, e.g., blog posts or Wikipedia articles
|
| 302 |
+
***Notes to myself, diary entries
|
| 303 |
+
|
| 304 |
+
10. Setzen Sie sich aktiv für den Erhalt Ihres Dialekts ein? (Mehrfachantwerten möglich) Are you actively involved in preserving your dialect? (Multiple answers possible)
|
| 305 |
+
|
| 306 |
+
This question is based on questions in the surveys by Soria et al. (2018) and Millour (2020, pp. 227, 235). 323 respondents answered:
|
| 307 |
+
|
| 308 |
+

|
| 309 |
+
*Yes, in a dialect preservation society
|
| 310 |
+
|
| 311 |
+
11. Wie alt sind Sie? How old are you?
|
| 312 |
+
|
| 313 |
+

|
| 314 |
+
|
| 315 |
+
12. Weitere Kommentare zu Ihr dem Dialekt oder zu den vorherigen Fragen: (Optional) Additional comments on your dialect or the preceding questions: (Optional)
|
| 316 |
+
|
| 317 |
+
61 write-in answers.
|
| 318 |
+
|
| 319 |
+
In diesen Abschnitt Fragen wir Sie zu Ihrer Meinung zu verschiedenen dialectbezogenen Themen. Dabei gibt es keine richtigen/falschen oder erwüchtschen/unerwässchten Antwerten, sondern wir sind an Ihrer persönlichen Meinung interessiert. In this section we ask you about your opinion on different dialect-related topics. There are no right/wrong or desired/undesired answers, we are simply interested in your personal opinion.
|
| 320 |
+
|
| 321 |
+
13. Stimmen Sie den folgenden Aussagen zu? Do you agree with the following statements?
|
| 322 |
+
|
| 323 |
+
Statements presented in a randomized order:
|
| 324 |
+
|
| 325 |
+
Die Vielfalt der entsprechenden Ausprüngungen meines Dialekts ist eine Stärke. The diversity of the different varieties of my dialect is a strength.
|
| 326 |
+
- Mein Dialekt ist in erster Linie eine gesprochene Sprachform und nicht für die schriftliche Kommunikation geeignet. My dialect is primarily a spoken form of language and not suited for written communication. This question is based on an answer option in the survey by Millour (2020, pp. 228, 237) (see also question 8 in this appendix).
|
| 327 |
+
- Ich möchte myden Dialekt in allen Lebensbereichen verwenden. I'd like to be able to use my dialect in any aspect of life. This question is based on a question by Soria et al. (2018) and Millour (2020, pp. 229, 239).
|
| 328 |
+
- Wenn ich einen Text lese, den{jemand anderes in meinem Dialekt verfasst hat, faltt es mir schwer,ihn zu verstehen.When I read text that someone else wrote in my dialect,I have trouble understanding it.
|
| 329 |
+
- Es sollte eine standardisierte Rechtschreibung für unseren Dialekt geben. There should be a standardized orthography for my dialect.
|
| 330 |
+
|
| 331 |
+
# Answer options:
|
| 332 |
+
|
| 333 |
+
- Ja, auf jeder Fall Yes, absolutely
|
| 334 |
+
Eher ja Rather yes
|
| 335 |
+
- Weder noch Neither/nor
|
| 336 |
+
- Eher mein Rather no
|
| 337 |
+
- Nein, gar nicht Absolutely not
|
| 338 |
+
- Keine Angabe Prefer not to say
|
| 339 |
+
|
| 340 |
+

|
| 341 |
+
The answer distributions (in $\%$ ) are as follows:
|
| 342 |
+
|
| 343 |
+
14. Weitere Kommentare zu dieser Abschnitt: (Optional) Additional comments on this section: (Optional)
|
| 344 |
+
|
| 345 |
+
48 write-in answers.
|
| 346 |
+
|
| 347 |
+
In diesen Abschnitt Fragen wir Sie zu Ihrer Meinung zu verschiedenen Sprachtechnologien. Dabei gibt es keine richtigen/falschen oder erwürschein-ten/unerwünschten Antwerten, sondern wir sind an Ihrer persönlichen Meinung interessiert. In this section we ask you about your opinion on different language technologies. There are no right/wrong or desired/undesired answers, we are simply interested in your personal opinion.
|
| 348 |
+
|
| 349 |
+
Übersetzungssprogramme erstellen eine automatische Übersetzung von Text aus einer Sprache in eine andere Sprache. Beispiele davon sind DeepL oder Google Translate. Machine translation software automatically translate text from one language into another. Examples are DeepL or Google Translate.
|
| 350 |
+
|
| 351 |
+
15. Stimmen Sie den folgenden Aussagen zu? Es sollte Übersetzungssprogramme geben, ... Do you agree with the following statements? There should be translation software...
|
| 352 |
+
|
| 353 |
+
...die hochdeutsche Texte in unseren Dialekt übersetzen. ...that translates Standard German texts into my dialect.
|
| 354 |
+
- ...die Texte aus anderen Sprachen in unseren Dialekt übersetzen. ...that translates texts from other languages into my dialect.
|
| 355 |
+
...die Texte aus meinem Dialekt ins Hochdeutsche übersetzen. ...that translates texts from my dialect into Standard German.
|
| 356 |
+
...die Texte aus meinem Dialekt in andere Sprachen übersetzen. ...that translates texts from my dialect into other languages.
|
| 357 |
+
|
| 358 |
+
Answer options:
|
| 359 |
+
|
| 360 |
+
- Ja, unbedingt Yes, absolutely
|
| 361 |
+
- Eher ja Rather yes
|
| 362 |
+
- Weder noch Neither/nor
|
| 363 |
+
- Eher mein Rather no
|
| 364 |
+
- Nein, das halte ich nicht für sinnvoll No, I don't think this is useful
|
| 365 |
+
- Das kann ich nicht bewerten I cannot judge this
|
| 366 |
+
|
| 367 |
+
See Figure 2 for answer distributions.
|
| 368 |
+
|
| 369 |
+
16. Welcher Ausage stimmen Sie mehr zu? Wenn ich einen Text in unseren Dialekt übersetzen lasse, ... With which statement do you agree more? When a text is translated into my dialect, ...
|
| 370 |
+
|
| 371 |
+
1-...soll die Übersetzung sprachlich meiner...*
|
| 372 |
+
|
| 373 |
+
52
|
| 374 |
+
|
| 375 |
+
2 61
|
| 376 |
+
|
| 377 |
+
3 67
|
| 378 |
+
|
| 379 |
+
4 32
|
| 380 |
+
|
| 381 |
+
5-...ist es mir egal, welcher (geschriebenen)...\*\* Ich möchte keinener geschriebenen Dialekt-Output.\*\*\*
|
| 382 |
+
|
| 383 |
+
43
|
| 384 |
+
|
| 385 |
+
*1 - ...soll die Übersetzung sprachlich meiner (geschriebenen) Version des Dialekts voll und ganz entsprechen. 1 - ...the translation should fully correspond to my own (written) version of the dialect. **5 - ...ist es mir egal, welcher (geschriebenen) Form meines Dialekts die Übersetzung sprachlich entspricht. 5 - ...I do not care which (written) version of my dialect the translation corresponds to. ***I do not want any written dialect output.
|
| 386 |
+
|
| 387 |
+
17. Weitere Kommentare zu Übersetzungsprogrammen: (Optional) Additional comments on machine translation software: (Optional) 41 write-in answers.
|
| 388 |
+
|
| 389 |
+
Rechtschreib- und Grammatikkorrekturprogramme markieren oder korrigieren mögliche Fehler in Texten, zum Beispiel bei der Eingabe in Microsoft Word. Spell-and grammar checkers highlight or fix potential errors in texts, for instance when writing text in Microsoft Word.
|
| 390 |
+
|
| 391 |
+
18. Stimmen Sie der folgenden Aussage zu? Essolle Rechtschreiben- und Grammatikkorrekturprogramme für unseren Dialekt geben. Do you agree with the following statement? There should be spell- and grammar checkers for my dialect.
|
| 392 |
+
|
| 393 |
+
Same answer options as for question 15. See Figure 2 for the answer distribution.
|
| 394 |
+
|
| 395 |
+
19. Weitere Kommentare zu Rechtschreib- und Grammatikkorrekturprogrammen: (Optional) Additional comments on spell- and grammar checkers: (Optional)
|
| 396 |
+
|
| 397 |
+
51 write-in answers.
|
| 398 |
+
|
| 399 |
+
Transkriptionsprogramme verschriftlichen gesprochene Sprache. Sie finden beispiselsweise bei automatisch erzeugten Untertiteln oder bei Diktiergeräten Einsatz. Speech-to-text systems transcribe spoken language. They are for instance used for automatically generating subtitles or in the context of dictation software.
|
| 400 |
+
|
| 401 |
+
20. Stimmen Sie den folgenden Aussagen zu? Es sollte Transkriptionsprogramme geben, ... Do you agree with the following statements? There should be speech-to-text software...
|
| 402 |
+
|
| 403 |
+
...die Audioaufnahmen in meinem Dialekt als geschriebenes Hochdeutsch wiedergeben. ...that transcribes audio recorded in my dialect as written Standard German.
|
| 404 |
+
|
| 405 |
+
...die Audioaufnahmen in meinem Dialekt als geschriebenen Dialekt wiedergeben. ...that transcribes audio recorded in my dialect as written dialect.
|
| 406 |
+
|
| 407 |
+
Same answer options as for question 15. See Figure 2 for answer distributions.
|
| 408 |
+
|
| 409 |
+
21. Weitere Kommentare zu Transkriptionsprogrammen: (Optional) Additional comments on speech-to-text software: (Optional)
|
| 410 |
+
|
| 411 |
+
33 write-in answers.
|
| 412 |
+
|
| 413 |
+
Text-to-Speech-Systeme Funktionieren umgekehrt wie Transkrptionsprogramme: sie erzeugen gesprochene Versionen von geschriebenem Text. Ein Beispiel dafür sind Bildschirmleseprogramme. Text-to-speech systems work the other way around as speech-to-text systems: they generate spoken versions of written text. One example are screen readers.
|
| 414 |
+
|
| 415 |
+
22. Stimmen Sie der folgenden Aussage zu? Es sollte Text-to-Speech-Systeme geben, die unseren Dialekt von geschriebener Form in gesprochene Form umwandeln. Do you agree with the following statement? There should be text-to-speech systems that synthesize dialectal audio for text written in my dialect.
|
| 416 |
+
|
| 417 |
+
Same answer options as for question 15. See Figure 2 for the answer distribution.
|
| 418 |
+
|
| 419 |
+
23. Weitere Kommentare zu Text-to-Speech-Systemen: (Optional) Additional comments on text-to-speech systems: (Optional)
|
| 420 |
+
|
| 421 |
+
22 write-in answers.
|
| 422 |
+
|
| 423 |
+
Sprachassistenten sind Programme, die geschriebene oder gesprochene Fragen beantwerten bzw. Befehle ausführren, zum Beispiel Siri oder Alexa.
|
| 424 |
+
|
| 425 |
+
Eng verwandt damit sind Chatbots: Programme, die textbasierte Dialoge ermöglich, bei denen ein Programm Antwerten auf Texteingaben von Nutzer\*innen erzeugt. Ein Beispiel dafür ist ChatGPT.
|
| 426 |
+
|
| 427 |
+
Digital assistants are programs that answer written or spoken questions and carry out commands, like Siri or Alexa.
|
| 428 |
+
|
| 429 |
+
Chatbots are closely related. They are software that enables text-based dialogues, wherein a program generates answers to text input from users. An example is ChatGPT.
|
| 430 |
+
|
| 431 |
+
24. Stimmen Sie den folgenden Aussagen zu? Do you agree with the following statements?
|
| 432 |
+
|
| 433 |
+
- Es sollte Sprachassistenten geben, die man mit Fragen/Befehlen in meinem Dialekt bedieten kann. There should be digital assistants that you can query with questions/commands in my dialect.
|
| 434 |
+
- Es sollte Sprachassistenten geben, die in meinem Dialekt auf Fragen/Befehle entwickten. There should be digital assistants that use my dialect when replying to questions/commands.
|
| 435 |
+
- Es sollte Chatbots geben, die auf Eingaben in meinem Dialekt entwicken können. There should be chatbots that can respond to inputs written in my dialect.
|
| 436 |
+
- Es sollte Chatbots geben, deren Antwerten in meinem Dialekt verfasst sind. There should be chatbots who respond in my dialect.
|
| 437 |
+
|
| 438 |
+
Same answer options as for question 15. See Figure 2 for answer the distributions.
|
| 439 |
+
|
| 440 |
+
25. Welcher Aussage stimmen Sie mehr zu? Wenn ein Sprachassistent oder ein Chatbot Antwarten in meinem Dialekt erzeugt, ... With which statement do you agree more? When a digital assistant or chatbot generates replies in my dialect,
|
| 441 |
+
|
| 442 |
+
1- ..sollen die Antwerten sprachlich meiner...\* 46
|
| 443 |
+
|
| 444 |
+
2 69
|
| 445 |
+
3 61
|
| 446 |
+
4 28
|
| 447 |
+
|
| 448 |
+
5-...ist es mir egal, welcher (geschriebenen oder...** 55
|
| 449 |
+
|
| 450 |
+
Ich möchte keinen geschrieben Den Dialekt-Output.***
|
| 451 |
+
|
| 452 |
+
68
|
| 453 |
+
|
| 454 |
+
*1 - ...sollen die Antwerten sprechtlich meiner (geschriebenen oder gesprochenen) Version des Dialektis voll und ganz entsprechen. 1 - ... the replies should fully correspond to my own (written or spoken) version of the dialect.
|
| 455 |
+
**5 - ...ist es mir egal, welcher (geschriebenen oder gesprochenen) Form meines Dialekts die Antwerten sprechlich entsprechen. 5 - ...I do not care which (written or spoken) version of my dialect the replies correspond to.
|
| 456 |
+
\*\*\*I do not want any written dialect output.
|
| 457 |
+
|
| 458 |
+
26. Weitere Kommentare zu Sprachassistenten oder Chatbots: (Optional) Additional comments on digital assistants or chatbots: (Optional)
|
| 459 |
+
|
| 460 |
+
25 write-in answers.
|
| 461 |
+
|
| 462 |
+
Suchmaschinen sind Programme, die nach einer Suchanfrage Datenbanken oder das Internet nach relevanten Ergebnissen durchsuchen, wie zum Beispiel Google. Search engines are programs that search a database or the web based on a search query, like Google.
|
| 463 |
+
|
| 464 |
+
# 27. Stimmen Sie der folgenden Aussage zu? Es sollte Suchmaschinen geben, bei denen ich mynen Dialekt als Ungabesprache verwenden
|
| 465 |
+
|
| 466 |
+
kann. Do you agree with the following statement? There should be search engines that support queries in my dialect.
|
| 467 |
+
|
| 468 |
+
Same answer options as for question 15. See Figure 2 for the answer distribution.
|
| 469 |
+
|
| 470 |
+
# 28. Weitere Kommentare zu Suchmaschinen: (Optional) Additional comments on search engines: (Optional)
|
| 471 |
+
|
| 472 |
+
15 write-in answers.
|
| 473 |
+
|
| 474 |
+
29. Sind Ihnen bereits Sprachtechnologien besteht, die ihren Dialekt unterstützen? Are you already aware of any language technologies for your dialect?
|
| 475 |
+
|
| 476 |
+

|
| 477 |
+
|
| 478 |
+
# 30. Stimmen Sie den folgenden Aussagen zu? Do you agree with the following statements?
|
| 479 |
+
|
| 480 |
+
Statements presented in a randomized order:
|
| 481 |
+
|
| 482 |
+
- Sprachtechnologie, die ich für sinnvoll halten, nutze ich auch selbst. If I find language technology useful, I also use it myself.
|
| 483 |
+
- Eine größere Unterstützung durch Sprachtechnologie wurde myen Dialekt attraktiver für jüngere Generationen machen. If my dialect were supported more by language technologies, the dialect would be more appealing for younger generations. This question is modelled after questions in the surveys by Soria et al. (2018) and Millour (2020, p. 229), asking about the hypothesized impact of a language's increased use online on the appeal for younger people.
|
| 484 |
+
|
| 485 |
+
- Wenn ich Sprachtechnologie für unseren Dialekt besteht, wurde ichihn haufter verwenden. If I had language technology for my dialect, I would use my dialect more often.
|
| 486 |
+
|
| 487 |
+
See question 13 for the answer options. Answer distributions (in $\%$
|
| 488 |
+
|
| 489 |
+

|
| 490 |
+
|
| 491 |
+
# 31. Weitere Kommentare zum Thema Sprachtechnologie oder allgemein zu dieser Umfrage: (Optional) Additional comments on language technology or generally regarding this survey: (Optional)
|
| 492 |
+
|
| 493 |
+
29 write-in answers.
|
| 494 |
+
|
| 495 |
+
# 32. Wie haben Sie von dieser Studie erhalten?
|
| 496 |
+
|
| 497 |
+
How did you find out about this study?
|
| 498 |
+
|
| 499 |
+

|
| 500 |
+
|
| 501 |
+
*Durch Forschende am Centrum für Informations- und Sprachverarbeitung (LMU) Via researchers at the Center for Information and Language Processing (LMU)
|
| 502 |
+
|
| 503 |
+
**Via dialect preservation societies
|
| 504 |
+
***(Otherwise) via acquaintances
|
| 505 |
+
|
| 506 |
+
Vielen Dank für ihre Teilnahme! Wir möchten uns ganz herzlich für ihre Mithilfe bedanken. Ihr Antwarten wurden gespeichert, Sie können das Browser-Fenster nun schreiben. Thank you for participating! We would like to thank you very much for your help. Your answers have been saved; you can close the browser window now.
|
| 507 |
+
|
| 508 |
+
<table><tr><td rowspan="2">Rank</td><td colspan="2">All</td><td colspan="2">Non-activists only</td></tr><tr><td>LTs</td><td>Mean</td><td>LTs</td><td>Mean</td></tr><tr><td>1</td><td>Assistant in (24)</td><td>3.75</td><td>Assistant in (24)</td><td>3.80</td></tr><tr><td>2</td><td>STT deu (20)</td><td>3.46</td><td>STT deu (20)</td><td>3.48</td></tr><tr><td>3</td><td>STT dial (20)</td><td>3.38</td><td>Chatbot in (24)</td><td>3.25</td></tr><tr><td>4</td><td>Chatbot in (24)</td><td>3.29</td><td>STT dial (20)</td><td>3.24</td></tr><tr><td>5</td><td>MT dial→deu (15)</td><td>3.17</td><td>Assistant out (24)</td><td>3.01</td></tr><tr><td>6</td><td>Assistant out (24)</td><td>3.14</td><td>MT dial→deu (15)</td><td>3.00</td></tr><tr><td>7</td><td>TTS (22)</td><td>3.13</td><td>TTS (22)</td><td>2.99</td></tr><tr><td>8</td><td>Search engines (27)</td><td>2.94</td><td>Search engines (27)</td><td>2.69</td></tr><tr><td>9</td><td>Chatbot out (24)</td><td>2.76</td><td>Chatbot out (24)</td><td>2.59</td></tr><tr><td>10</td><td>MT dial→oth (15)</td><td>2.73</td><td>MT dial→oth (15)</td><td>2.59</td></tr><tr><td>11</td><td>MT deu→dial (15)</td><td>2.71</td><td>MT deu→dial (15)</td><td>2.53</td></tr><tr><td>12</td><td>MT oth→dial (15)</td><td>2.39</td><td>MT oth→dial (15)</td><td>2.17</td></tr><tr><td>13</td><td>Spellcheckers (18)</td><td>2.38</td><td>Spellcheckers (18)</td><td>2.08</td></tr></table>
|
| 509 |
+
|
| 510 |
+
Table 1: Language technologies sorted by mean score given by all respondents and non-activists only (participants who did not indicate involvement in language preservation, §4.4). 'Mean' refers to the mean Likert score (see text). Numbers behind the LT names refer to questions in §A.
|
| 511 |
+
|
| 512 |
+
# B Correlation Scores
|
| 513 |
+
|
| 514 |
+
Figure 3 shows the Spearman's rank correlation coefficients $(\rho)$ between the variables investigated in the questionnaire, with $\rho$ values ranging from $-0.50$ to $+0.77$ .
|
| 515 |
+
|
| 516 |
+
For the correlation analysis and the subgroup comparisons (Appendix §D), the variable values are mapped so that higher values correspond to higher agreement with the statements in questions 13, 15, 18, 20, 22, 24, 27 and 30, and to higher dialect competence (question 1) and usage frequency (7), higher age (11) and age of dialect acquisition (3), more traditional dialects (6), and greater openness towards variation in the output of MT (16) and digital assistants/chatbots (25). The variable # writing contexts encodes the number of answer options selected in question 9. The variables writing (8) and activism (10) are binary such that 0 encodes the 'no' options and 1 stands for the 'yes' options.
|
| 517 |
+
|
| 518 |
+
The beginning of the first row in the figure can thus be read as follows: Dialect competence self-ratings are
|
| 519 |
+
|
| 520 |
+
- negatively correlated with the age of acquisition (i.e., respondents whose dialect is their first language generally give higher competence ratings),
|
| 521 |
+
|
| 522 |
+
- slightly positively correlated with language activism (i.e., fluent dialect speakers are slightly more likely to be engaged in dialect preservation activities, and vice versa),
|
| 523 |
+
- positively correlated with traditionality (i.e., competent dialect speakers tend to rate their dialect as more distinct from Standard German, and vice versa),
|
| 524 |
+
|
| 525 |
+
and so on.
|
| 526 |
+
|
| 527 |
+
# C LT Ranking
|
| 528 |
+
|
| 529 |
+
Table 1 shows the order of preferred LTs. This ranking is based on the mean scores when coding the answers as follows: $1 =$ useless, $2 =$ rather useless, $3 =$ neither/not, $4 =$ rather useful, $5 =$ useful. Non-answers ('cannot judge') are excluded.
|
| 530 |
+
|
| 531 |
+
If we remove the participants who indicated active engagement in dialect preservation (see §4.4 and question 10), the ranking only changes very slightly: chatbots with dialectal input and STT with dialectal output trade places (although they have nearly identical mean scores), and we observe the same for virtual assistants with dialectal output and machine translation from the dialect into Standard German.
|
| 532 |
+
|
| 533 |
+
# D Subgroup Comparisons
|
| 534 |
+
|
| 535 |
+
Tables 2 and 3 show how the responses by different subgroups of respondents differ for each variable.
|
| 536 |
+
|
| 537 |
+
We provide each subgroup's mean answers (using the same numeric coding as in the previous two appendix sections), as well as $t$ -test statistics (taking into account the scalar nature of the answer options) and $\chi^2$ test results.
|
| 538 |
+
|
| 539 |
+
In addition to the analyses in §4.4, we provide two more subgroup comparisons, albeit with small effect sizes:
|
| 540 |
+
|
| 541 |
+
Traditionality We compare the responses of speakers who rate their dialect as traditional and distinct from Standard German (the first two answers for question 6) to those who indicated speaking a variety more akin to a regiolect (the last two options). While these subgroups differ in their responses to the dialect-related questions, few of the differences regarding language technologies are statistically significant (Table 2).
|
| 542 |
+
|
| 543 |
+
Age Figure 3 shows that the variable age correlates with few other variables. With respect to the LTs, young participants tend to be somewhat more positive towards three of the overall most popular LTs: STT with Standard German output, and virtual assistants and chatbots with dialectal input.
|
| 544 |
+
|
| 545 |
+

|
| 546 |
+
Figure 3: Spearman's $\rho$ between variables. Blue dots show positively correlated variables (max.: +0.77), red dots negatively correlated ones (min.: -0.50). We only include correlations with $p$ -values under 0.05. The larger the dot, the smaller the $p$ -value. The numbers behind the variables refer to the questions in Appendix §A. For further explanations of how the variables are coded, see Appendix §B.
|
| 547 |
+
|
| 548 |
+
<table><tr><td colspan="2">Correlation (ρ)</td><td colspan="2">p-value</td></tr><tr><td>●</td><td>0.75</td><td>●</td><td><0.001</td></tr><tr><td>●</td><td>0.50</td><td>●</td><td><0.01</td></tr><tr><td>●</td><td>0.25</td><td>●</td><td><0.05</td></tr><tr><td>○</td><td>0.00</td><td></td><td>≥0.05</td></tr><tr><td>○</td><td>-0.25</td><td></td><td></td></tr><tr><td>●</td><td>-0.50</td><td></td><td></td></tr></table>
|
| 549 |
+
|
| 550 |
+
<table><tr><td rowspan="2">Variable</td><td colspan="4">Activists vs. non-activists</td><td colspan="4">Most vs. least trad. dialects</td></tr><tr><td>t-stat</td><td>\( \chi^2 \)</td><td>\( μ_{Act} \)</td><td>\( μ_{Non} \)</td><td>t-stat</td><td>\( \chi^2 \)</td><td>\( μ_{Most} \)</td><td>\( μ_{Least} \)</td></tr><tr><td>Dialect skills (1)</td><td>3.0 **</td><td>9.0 *</td><td>3.7</td><td>3.5</td><td>11.3 ***</td><td>83.2 ***</td><td>3.8</td><td>2.9</td></tr><tr><td>Age of acquisition (3)</td><td>1.3</td><td>5.4</td><td>1.4</td><td>1.3</td><td>-4.4 ***</td><td>29.9 ***</td><td>1.2</td><td>1.5</td></tr><tr><td>Age (11)</td><td>6.7 ***</td><td>43.5 ***</td><td>4.7</td><td>3.4</td><td>0.7</td><td>3.9</td><td>3.9</td><td>3.7</td></tr><tr><td>Activism (10)</td><td>—</td><td>—</td><td>1.0</td><td>0.0</td><td>5.7 ***</td><td>26.9 ***</td><td>0.5</td><td>0.1</td></tr><tr><td>Traditionality (6)</td><td>5.6 ***</td><td>31.6 ***</td><td>3.9</td><td>3.2</td><td>—</td><td>—</td><td>4.3</td><td>1.6</td></tr><tr><td>Frequency (7)</td><td>2.5 *</td><td>14.0 *</td><td>5.1</td><td>4.6</td><td>5.0 ***</td><td>25.9 ***</td><td>5.1</td><td>4.0</td></tr><tr><td>Writing (8)</td><td>3.9 ***</td><td>13.9 ***</td><td>0.8</td><td>0.6</td><td>5.0 ***</td><td>21.3 ***</td><td>0.8</td><td>0.4</td></tr><tr><td># Writing contexts (9)</td><td>4.9 ***</td><td>28.7 ***</td><td>3.3</td><td>2.2</td><td>3.4 ***</td><td>16.2 *</td><td>3.0</td><td>1.8</td></tr><tr><td>Any aspect (13)</td><td>5.6 ***</td><td>36.3 ***</td><td>3.7</td><td>2.8</td><td>6.2 ***</td><td>36.4 ***</td><td>3.5</td><td>2.2</td></tr><tr><td>Orthography (13)</td><td>6.0 ***</td><td>37.9 ***</td><td>2.8</td><td>2.0</td><td>2.8 **</td><td>12.6 *</td><td>2.5</td><td>1.9</td></tr><tr><td>Spoken only (13)</td><td>-4.2 ***</td><td>34.9 ***</td><td>2.9</td><td>3.5</td><td>-2.5 *</td><td>8.7</td><td>3.1</td><td>3.7</td></tr><tr><td>Diversity (13)</td><td>3.0 **</td><td>12.5 *</td><td>4.3</td><td>3.9</td><td>3.0 **</td><td>12.5 *</td><td>4.2</td><td>3.7</td></tr><tr><td>Reading is hard (13)</td><td>-1.6</td><td>5.5</td><td>2.2</td><td>2.4</td><td>-2.3 *</td><td>10.4 *</td><td>2.3</td><td>2.7</td></tr><tr><td>MT deu→dial (15)</td><td>3.0 **</td><td>14.7 **</td><td>3.0</td><td>2.5</td><td>0.9</td><td>2.0</td><td>2.7</td><td>2.5</td></tr><tr><td>MT oth→dial (15)</td><td>4.1 ***</td><td>21.8 ***</td><td>2.8</td><td>2.2</td><td>1.6</td><td>12.3 *</td><td>2.5</td><td>2.2</td></tr><tr><td>MT dial→deu (15)</td><td>3.1 **</td><td>13.1 *</td><td>3.5</td><td>3.0</td><td>1.5</td><td>7.9</td><td>3.2</td><td>2.9</td></tr><tr><td>MT dial→oth (15)</td><td>2.4 *</td><td>7.0</td><td>3.0</td><td>2.6</td><td>2.0 *</td><td>20.0 ***</td><td>2.9</td><td>2.4</td></tr><tr><td>MT variation (16)</td><td>-3.3 **</td><td>11.7 *</td><td>2.5</td><td>3.0</td><td>-4.2 ***</td><td>19.1 ***</td><td>2.6</td><td>3.6</td></tr><tr><td>Spellcheckers (18)</td><td>5.4 ***</td><td>34.2 ***</td><td>2.9</td><td>2.1</td><td>2.0 *</td><td>9.8 *</td><td>2.4</td><td>2.0</td></tr><tr><td>STT deu (20)</td><td>-0.3</td><td>3.1</td><td>3.4</td><td>3.5</td><td>1.0</td><td>8.9</td><td>3.5</td><td>3.3</td></tr><tr><td>STT dial (20)</td><td>2.4 *</td><td>8.7</td><td>3.6</td><td>3.2</td><td>2.6 **</td><td>10.1 *</td><td>3.5</td><td>2.9</td></tr><tr><td>TTS (22)</td><td>2.5 *</td><td>11.6 *</td><td>3.4</td><td>3.0</td><td>1.0</td><td>6.9</td><td>3.1</td><td>2.9</td></tr><tr><td>Assistant in (24)</td><td>-0.8</td><td>3.3</td><td>3.7</td><td>3.8</td><td>1.0</td><td>1.6</td><td>3.8</td><td>3.6</td></tr><tr><td>Assistant out (24)</td><td>2.3 *</td><td>5.6</td><td>3.4</td><td>3.0</td><td>-0.1</td><td>1.7</td><td>3.1</td><td>3.1</td></tr><tr><td>Chatbot in (24)</td><td>0.7</td><td>1.3</td><td>3.4</td><td>3.2</td><td>0.2</td><td>3.7</td><td>3.3</td><td>3.3</td></tr><tr><td>Chatbot out (24)</td><td>3.1 **</td><td>12.1 *</td><td>3.1</td><td>2.6</td><td>0.6</td><td>2.0</td><td>2.8</td><td>2.7</td></tr><tr><td>Assistant var. (25)</td><td>-3.5 ***</td><td>13.7 **</td><td>2.5</td><td>3.1</td><td>-4.9 ***</td><td>22.9 ***</td><td>2.7</td><td>3.7</td></tr><tr><td>Search engines (27)</td><td>4.2 ***</td><td>20.8 ***</td><td>3.4</td><td>2.7</td><td>0.9</td><td>4.2</td><td>2.9</td><td>2.7</td></tr><tr><td>Know existing (29)</td><td>0.6</td><td>0.2</td><td>0.2</td><td>0.1</td><td>2.0 *</td><td>3.1</td><td>0.2</td><td>0.1</td></tr><tr><td>Appeal (30)</td><td>5.4 ***</td><td>32.1 ***</td><td>4.0</td><td>3.2</td><td>1.5</td><td>8.0</td><td>3.6</td><td>3.3</td></tr><tr><td>Useful=use (30)</td><td>0.5</td><td>5.0</td><td>3.7</td><td>3.6</td><td>1.3</td><td>3.4</td><td>3.7</td><td>3.5</td></tr><tr><td>More often (30)</td><td>4.0 ***</td><td>16.5 **</td><td>3.1</td><td>2.5</td><td>0.1</td><td>1.0</td><td>2.6</td><td>2.6</td></tr></table>
|
| 551 |
+
|
| 552 |
+
Table 2: Differences between respondent subgroups. We show the results of $t$ -tests and $\chi^2$ tests between pairs of respondent groups: those who indicated involvement in dialect preservation efforts ('activists', question 10) vs. those who did not, and respondents who rate their dialect as one of the two most vs. two least traditional options (question 6). Positive $t$ -statistics indicate that the first group's values for the variable are higher than the second one's, and vice versa for negative values. Grey entries denote results with $p$ -values $\geq 0.05$ ; asterisks represent smaller $p$ -values: $* < 0.05$ , $** < 0.01$ , $*** < 0.001$ . The columns with $\mu$ present the mean Likert scores of the subgroups' responses (e.g., $\mu_{\mathrm{Act}}$ contains the activists' mean answers). The numbers behind the variables refer to the questions in Appendix §A. For information on the variables on how the variables are encoded as numbers, see Appendix §B.
|
| 553 |
+
|
| 554 |
+
<table><tr><td rowspan="2"></td><td colspan="2">NDS vs. (rest of) D/AT</td><td colspan="5">CH vs. (non-NDS) D/AT</td></tr><tr><td>t-stat</td><td>x²</td><td>t-stat</td><td>x²</td><td>μNDS</td><td>μD/AT</td><td>μCH</td></tr><tr><td>Dialect skills (1)</td><td>-1.6</td><td>5.5</td><td>4.7 ***</td><td>23.4 ***</td><td>3.4</td><td>3.5</td><td>4.0</td></tr><tr><td>Age of acquisition (3)</td><td>5.2 ***</td><td>25.2 ***</td><td>-3.2 **</td><td>10.0 **</td><td>1.7</td><td>1.3</td><td>1.1</td></tr><tr><td>Age (11)</td><td>6.3 ***</td><td>38.2 ***</td><td>-1.5</td><td>15.1 *</td><td>5.3</td><td>3.6</td><td>3.2</td></tr><tr><td>Activism (10)</td><td>5.3 ***</td><td>23.9 ***</td><td>-1.8</td><td>2.5</td><td>0.7</td><td>0.3</td><td>0.2</td></tr><tr><td>Traditionality (6)</td><td>2.8 **</td><td>9.9 *</td><td>3.3 **</td><td>16.6 **</td><td>3.7</td><td>3.3</td><td>3.8</td></tr><tr><td>Frequency (7)</td><td>-1.1</td><td>6.4</td><td>4.6 ***</td><td>23.0 ***</td><td>4.4</td><td>4.7</td><td>5.7</td></tr><tr><td>Writing (8)</td><td>1.2</td><td>1.1</td><td>3.8 ***</td><td>12.5 ***</td><td>0.7</td><td>0.6</td><td>0.9</td></tr><tr><td># Writing contexts (9)</td><td>6.5 ***</td><td>45.9 ***</td><td>4.5 ***</td><td>25.5 ***</td><td>4.0</td><td>2.1</td><td>3.2</td></tr><tr><td>Any aspect (13)</td><td>5.3 ***</td><td>27.6 ***</td><td>3.9 ***</td><td>27.3 ***</td><td>3.9</td><td>2.8</td><td>3.7</td></tr><tr><td>Orthography (13)</td><td>8.4 ***</td><td>63.6 ***</td><td>-2.0 *</td><td>4.8</td><td>3.6</td><td>2.0</td><td>1.7</td></tr><tr><td>Spoken only (13)</td><td>-5.6 ***</td><td>30.6 ***</td><td>-3.0 **</td><td>19.7 ***</td><td>2.5</td><td>3.6</td><td>3.0</td></tr><tr><td>Diversity (13)</td><td>1.0</td><td>3.1</td><td>2.7 **</td><td>7.3</td><td>4.2</td><td>4.0</td><td>4.5</td></tr><tr><td>Reading is hard (13)</td><td>-4.1 ***</td><td>18.3 **</td><td>-1.8</td><td>3.2</td><td>1.8</td><td>2.6</td><td>2.2</td></tr><tr><td>MT deu→dial (15)</td><td>4.6 ***</td><td>21.3 ***</td><td>-0.5</td><td>1.6</td><td>3.5</td><td>2.5</td><td>2.4</td></tr><tr><td>MT oth→dial (15)</td><td>5.0 ***</td><td>27.8 ***</td><td>-0.2</td><td>5.0</td><td>3.2</td><td>2.2</td><td>2.2</td></tr><tr><td>MT dial→deu (15)</td><td>2.6 **</td><td>9.9 *</td><td>1.0</td><td>4.3</td><td>3.6</td><td>3.0</td><td>3.3</td></tr><tr><td>MT dial→oth (15)</td><td>3.3 **</td><td>13.6 **</td><td>1.3</td><td>4.4</td><td>3.2</td><td>2.5</td><td>2.8</td></tr><tr><td>MT variation (16)</td><td>-2.4 *</td><td>11.6 *</td><td>0.7</td><td>4.5</td><td>2.3</td><td>2.9</td><td>3.0</td></tr><tr><td>Spellcheckers (18)</td><td>8.2 ***</td><td>68.3 ***</td><td>-2.1 *</td><td>8.2</td><td>3.7</td><td>2.1</td><td>1.7</td></tr><tr><td>STT deu (20)</td><td>-1.5</td><td>9.5</td><td>3.2 **</td><td>10.5 *</td><td>3.1</td><td>3.4</td><td>4.1</td></tr><tr><td>STT dial (20)</td><td>4.0 ***</td><td>17.5 **</td><td>2.6 **</td><td>10.7 *</td><td>4.0</td><td>3.1</td><td>3.7</td></tr><tr><td>TTS (22)</td><td>4.0 ***</td><td>15.9 **</td><td>0.5</td><td>1.5</td><td>3.8</td><td>2.9</td><td>3.1</td></tr><tr><td>Assistant in (24)</td><td>-0.8</td><td>7.6</td><td>2.7 **</td><td>9.1</td><td>3.5</td><td>3.7</td><td>4.2</td></tr><tr><td>Assistant out (24)</td><td>1.7</td><td>3.0</td><td>0.8</td><td>2.6</td><td>3.4</td><td>3.0</td><td>3.2</td></tr><tr><td>Chatbot in (24)</td><td>0.8</td><td>2.0</td><td>-0.6</td><td>2.1</td><td>3.4</td><td>3.2</td><td>3.1</td></tr><tr><td>Chatbot out (24)</td><td>3.7 ***</td><td>13.4 **</td><td>-0.4</td><td>2.8</td><td>3.4</td><td>2.6</td><td>2.5</td></tr><tr><td>Assistant var. (25)</td><td>-2.3 *</td><td>9.2</td><td>1.1</td><td>2.1</td><td>2.4</td><td>2.9</td><td>3.2</td></tr><tr><td>Search engines (27)</td><td>4.8 ***</td><td>26.1 ***</td><td>-1.6</td><td>3.8</td><td>3.8</td><td>2.8</td><td>2.4</td></tr><tr><td>Know existing (29)</td><td>4.1 ***</td><td>13.6 ***</td><td>8.2 ***</td><td>49.5 ***</td><td>0.2</td><td>0.1</td><td>0.5</td></tr><tr><td>Appeal (30)</td><td>5.1 ***</td><td>35.3 ***</td><td>-3.1 **</td><td>14.0 **</td><td>4.3</td><td>3.3</td><td>2.7</td></tr><tr><td>Useful=use (30)</td><td>0.1</td><td>1.4</td><td>1.6</td><td>3.8</td><td>3.6</td><td>3.6</td><td>3.9</td></tr><tr><td>More often (30)</td><td>3.7 ***</td><td>14.8 **</td><td>-1.6</td><td>3.1</td><td>3.2</td><td>2.5</td><td>2.2</td></tr></table>
|
| 555 |
+
|
| 556 |
+
Table 3: Differences between region-based respondent subgroups. We show the results of $t$ -tests and $\chi^2$ tests between Low German (NDS) or Swiss (CH) respondents compared to (non-Low-German-speaking) German and Austrian respondents (D/AT). Positive $t$ -statistics indicate that the first group's values for the variable are higher than the second one's, and vice versa for negative values. Grey entries denote results with $p$ -values $\geq 0.05$ ; asterisks represent smaller $p$ -values: $* < 0.05$ , $** < 0.01$ , $*** < 0.001$ . The columns with $\mu$ present the mean Likert scores of the subgroups' responses (e.g., $\mu_{\mathrm{NDS}}$ contains the mean answers provided by our Low Saxon respondents). The numbers behind the variables refer to the questions in Appendix §A. For information on the variables on how the variables are encoded as numbers, see Appendix §B.
|
whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f783af06663cbdc9cb5da979f415eece77332d2f15e93b3e4bacf9b8b0cfa973
|
| 3 |
+
size 1094976
|
whatdodialectspeakerswantasurveyofattitudestowardslanguagetechnologyforgermandialects/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:30994f22c40b9776fadbc51c171247f7106353f1c4481dbdf335fc67187f97b6
|
| 3 |
+
size 603914
|
whatdoesparameterfreeprobingreallyuncover/4d7b0ef9-25d0-4003-b68a-b056ef9853ab_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:83b2c426bf045279e2f5cd2ce7145936f22c18f04cff51d83c7104f47b4d7089
|
| 3 |
+
size 70544
|
whatdoesparameterfreeprobingreallyuncover/4d7b0ef9-25d0-4003-b68a-b056ef9853ab_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3024e131ad8000322fb0caf66a0d92eaec80a63134e1580619806ca9f63a235
|
| 3 |
+
size 81052
|
whatdoesparameterfreeprobingreallyuncover/4d7b0ef9-25d0-4003-b68a-b056ef9853ab_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:415b01f7351940b8ad47430318d0ea41f0725f24a6defca7e9d7d970ddbfad7f
|
| 3 |
+
size 295566
|
whatdoesparameterfreeprobingreallyuncover/full.md
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# What does Parameter-free Probing Really Uncover?
|
| 2 |
+
|
| 3 |
+
Tommi Buder-Gröndahl
|
| 4 |
+
|
| 5 |
+
University of Helsinki / Yliopistonkatu 3, 00014 Helsinki, Finland
|
| 6 |
+
|
| 7 |
+
tommi.grondahl@helsinki.fi
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Supervised approaches to probing large language models (LLMs) have been criticized of using pre-defined theory-laden target labels. As an alternative, parameter-free probing constructs structural representations bottom-up via information derived from the LLM alone. This has been suggested to capture a genuine "LLM-internal grammar". However, its relation to familiar linguistic formalisms remains unclear. I extend prior work on a parameter-free probing technique called perturbed masking applied to BERT, by comparing its results to the Universal Dependencies (UD) formalism for English. The results highlight several major discrepancies between BERT and UD, which lack correlates in linguistic theory. This raises the question of whether human grammar is the correct analogy to interpret BERT in the first place.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Probing large language models (LLMs) consists in mapping their internal states to linguistic classes or relations (Rogers et al., 2020; Belinkov, 2022). Most methods use supervised learning for training a probe to predict pre-determined labels (Hewitt and Manning, 2019; Tenney et al., 2019; Kuznetsov and Gurevych, 2020; Manning et al., 2020; Lasri et al., 2022). However, critics have deemed this insufficient for determining whether LLMs actually represent linguistic structures (Kulmizev and Nivre, 2022; Buder-Gröndahl, 2023). For representation proper, the labels should not only be predictable from the LLM; they should somehow capture its internal architecture on a high level of abstraction.
|
| 16 |
+
|
| 17 |
+
A possible way forward is to use parameter-free probing, which shuns separate probing classifiers by extracting structural information directly from the LLM (Clark et al., 2019; Mareček and Rosa, 2019; Wu et al., 2020). As a bottom-up approach, this has been interpreted as uncovering the grammar intrinsic to the LLM without relying on a priori presumptions derived from linguistic theory.
|
| 18 |
+
|
| 19 |
+
In this paper, I focus on a parameter-free probe called perturbed masking, originally presented and applied to BERT by Wu et al. (2020). While it has received criticism for underwhelming results compared to gold-standard parses (Niu et al., 2022), this overlooks its main goal of uncovering BERT's inherent syntax – which may well deviate from linguistic theory (Wu et al., 2020, 4173). Such deviations do not call for discarding it; instead, they provide insight into how BERT's architecture can differ from common linguistic assumptions.
|
| 20 |
+
|
| 21 |
+
I compare dependency graphs derived from BERT to the Universal Dependencies (UD) annotation for English, and uncover major discrepancies related to verbal argument structure, noun phrase structure, modifiers, and prepositions. In particular, BERT treats the root (in UD's annotation) as a head far more often than UD. This effect of being "attracted by the root" is especially strong in recursive embeddings, but also extends beyond these.
|
| 22 |
+
|
| 23 |
+
Moreover, BERT's behavior tends to resist linguistic explanation. For example, despite major disagreements within linguistic theory, argument structure is ubiquitously treated as clause-bound: no feasible analysis assimilates embedded clause arguments to main clause arguments. Yet, the BERT-parse regularly does exactly this. Indeed, the only cases where BERT's deviations from UD have a salient linguistic interpretation concern prepositions and some possessive constructions, where dependent-head relations are flipped.
|
| 24 |
+
|
| 25 |
+
The results thus point to the same direction as critiques of supervised probing: the assumption that BERT represents grammar in line with familiar linguistic formalisms lacks proper support. When this is not built directly into the experiment design (via pre-determined target labels), probing reveals fundamental disparities between BERT and commonly accepted syntactic principles. We are thus prompted to question whether human grammar is an appropriate analogy for BERT after all.
|
| 26 |
+
|
| 27 |
+
# 2 Methodology
|
| 28 |
+
|
| 29 |
+
I describe the parameter-free probing technique investigated (Section 2.1), the dataset (Section 2.2), and the experiment pipeline (Section 2.3).
|
| 30 |
+
|
| 31 |
+
# 2.1 Perturbed masking
|
| 32 |
+
|
| 33 |
+
Parameter-free probing aims to construct linguistic information directly from the LLM without separate training. Wu et al. (2020) present a prominent technique called perturbed masking, with which they aim to find "the 'natural' syntax inherent in BERT" (p. 4173) by utilizing an independently motivated relation of impact between tokens. I replicated their original setup, which uses the bert-base-uncased model presented in Wolf et al. (2020).
|
| 34 |
+
|
| 35 |
+
As input, BERT takes a sequence of tokens $\mathbf{x} = [x_1,\dots,x_n]$ . It maps each token $x_{i}$ to a contextual representation $H_{\theta}(\mathbf{x})_i$ , where the influence of each token $x_{j}\in \mathbf{x}$ arises via Transformer attention (Vaswani et al., 2017) based on model parameters $\theta$ . For perturbed masking, Wu et al. (2020) first mask token $x_{i}$ , giving $\mathbf{x}\backslash \{x_i\}$ . They then also mask token $x_{j}$ , giving $\mathbf{x}\backslash \{x_i,x_j\}$ . The impact of $x_{j}$ to the representation of $x_{i}$ is now measured as follows, where $d$ is Euclidean distance:
|
| 36 |
+
|
| 37 |
+
$$
|
| 38 |
+
f (x _ {i}, x _ {j}) = d (H _ {\theta} (\mathbf {x} \backslash \{x _ {i} \}) _ {i}, H _ {\theta} (\mathbf {x} \backslash \{x _ {i}, x _ {j} \}) _ {i})
|
| 39 |
+
$$
|
| 40 |
+
|
| 41 |
+
Impacts between all token pairs are collected into an impact matrix, which is given as input to an algorithm that constructs a directed dependency graph using the Eisner algorithm (Eisner, 1996). The intuitive idea is that heads have the highest impact on their dependents in the matrix.
|
| 42 |
+
|
| 43 |
+
# 2.2 Data
|
| 44 |
+
|
| 45 |
+
Following Wu et al. (2020), I used the English Parallel Universal Dependencies (PUD) dataset (Zeman et al., 2017). Consisting of 1000 sentences of which I discarded seven (see Appendix A), it covers 21047 UD-annotated tokens.
|
| 46 |
+
|
| 47 |
+
# 2.3 Experiments
|
| 48 |
+
|
| 49 |
+
UD assigns each word a head and a dependency relation type (deprel), as exemplified below:4
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
|
| 53 |
+
The arrow is read as marking a head-dependent relation (in this direction). The root is its own head, and is typically the main verb. The BERT-parse of the same sentence maps all tokens to the root ends:
|
| 54 |
+
|
| 55 |
+

|
| 56 |
+
|
| 57 |
+
Here, UD and BERT differ in which head they assign to the determiner the. I denote this by marking the UD-assigned head-dependent relation above and the BERT-assigned relation below:
|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
|
| 61 |
+
The challenge in interpreting BERT-parses is that they only give head-dependent relations, not deprels. We thus need external deprels as the theoretical basis of comparing BERT and UD. For this, I use UD-annotations as follows:
|
| 62 |
+
|
| 63 |
+
$Dep(x)$ : deprel assigned to $x$ by UD
|
| 64 |
+
|
| 65 |
+
$Head_{UD}(x)$ : head assigned to $x$ by UD
|
| 66 |
+
|
| 67 |
+
$Head_{BERT}(x)$ : head assigned to $x$ by BERT
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
H _ {U} (x) = \operatorname {D e p} \left(\operatorname {H e a d} _ {U D} (x)\right)
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
H _ {B} (x) = \operatorname {D e p} \left(\operatorname {H e a d} _ {B E R T} (x)\right)
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
That is, I compare UD- and BERT-assigned heads in terms of their UD-deprels. These values for the determiner in the example above are:
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
D e p (t h e) = d e t
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
H e a d _ {U D} (t h e) = c o m m e r c i a l
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
H e a d _ {B E R T} (t h e) = e n d s
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
H _ {U} (t h e) = D e p (c o m m e r c i a l) = n s u b j
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
H _ {B} (t h e) = D e p (e n d s) = r o o t
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
Note that, since $Dep$ is derived from UD, $H_{B}$ should not be read as directly describing how BERT treats the head. Instead, it describes how UD would treat the head assigned by BERT.
|
| 100 |
+
|
| 101 |
+
By classifying discrepancies between BERT and UD, I assess their prevalence and nature in the PUD data. I focus on four phenomena: argument structure, noun phrase (NP) structure, adjective/adverb modifiers, and prepositional phrases (PPs). Sourcecode for the experiments is openly available.5
|
| 102 |
+
|
| 103 |
+
# 3 Results
|
| 104 |
+
|
| 105 |
+
I replicated the original results of Wu et al. (2020) with their best setup on the PUD data, and investigated shifts between BERT and UD in terms of $Dep$ , $H_{U}$ , and $H_{B}$ . Section 3.1 presents general findings, Sections 3.2-3.5 cover linguistic details, and Appendix B provides the raw data.
|
| 106 |
+
|
| 107 |
+
# 3.1 General findings
|
| 108 |
+
|
| 109 |
+
Of all 21047 tokens, $58\%$ were subject to a head-dependent shift between UD and BERT. Nearly all Dep-types were involved here, and a clear majority $(74\%)$ had a shift ratio over $50\%$ . Clearly the most common $H_{B}$ was root; i.e. shifts typically involved BERT assigning a head which was the root in the UD-parse. This covered $35\%$ of all shifts.
|
| 110 |
+
|
| 111 |
+
Wu et al. (2020, 4169) suggest that BERT mostly learns local dependencies. To assess this, we calculated dependent-head distances from both parses, and obtained contrasting results: the average is higher in BERT (3.66) than in UD (3.52). Locality thus does not explain the discrepancies. A likely explanation for the increased average dependent-head distance in BERT is its tendency to over-assign the root as a head. As covered in upcoming sections, this can lead to longer dependent-head distances in cases like embedded clauses, where the original UD-head is closer to its dependent than the root.
|
| 112 |
+
|
| 113 |
+
# 3.2 Argument structure
|
| 114 |
+
|
| 115 |
+
Table 1 collects shifts per $Dep - H_{U}$ pair for active and passive clause subjects (nsubj, nsubj:pass) and direct objects (obj).<sup>7</sup>
|
| 116 |
+
|
| 117 |
+
In arguments of the root, BERT and UD mostly overlap with shift ratios of $15\% - 29\%$ . However, with embedded clauses (ccomp, xcomp, conj, acl:relcl), BERT regularly continues to assign arguments to the root, with far higher shift ratios $(64\% - 94\%)$ and root as the most common $H_{B}$ . An example is shown below, where BERT assigns the main verb as the head of an embedded subject:
|
| 118 |
+
|
| 119 |
+

|
| 120 |
+
|
| 121 |
+
The BERT-parse thus seems to shun recursion, preferring the root even for embedded arguments.
|
| 122 |
+
|
| 123 |
+
<table><tr><td>Dep</td><td>HU</td><td>Ratio</td><td>Count</td></tr><tr><td rowspan="6">nsubj</td><td>root</td><td>0.24</td><td>198</td></tr><tr><td>acl:relcl</td><td>0.81</td><td>140</td></tr><tr><td>ccomp</td><td>0.92</td><td>101</td></tr><tr><td>advcl</td><td>0.79</td><td>80</td></tr><tr><td>conj</td><td>0.83</td><td>68</td></tr><tr><td>parataxis</td><td>0.64</td><td>46</td></tr><tr><td rowspan="3">nsubj:pass</td><td>root</td><td>0.29</td><td>38</td></tr><tr><td>acl:relcl</td><td>0.94</td><td>32</td></tr><tr><td>advcl</td><td>0.91</td><td>21</td></tr><tr><td rowspan="7">obj</td><td>advcl</td><td>0.66</td><td>86</td></tr><tr><td>xcomp</td><td>0.75</td><td>82</td></tr><tr><td>acl:relcl</td><td>0.78</td><td>58</td></tr><tr><td>conj</td><td>0.66</td><td>58</td></tr><tr><td>acl</td><td>0.73</td><td>52</td></tr><tr><td>root</td><td>0.15</td><td>47</td></tr><tr><td>ccomp</td><td>0.73</td><td>29</td></tr></table>
|
| 124 |
+
|
| 125 |
+
Table 1: Verbal argument structure: subjects and objects.
|
| 126 |
+
|
| 127 |
+
The same pattern also repeats for objects:
|
| 128 |
+
|
| 129 |
+

|
| 130 |
+
|
| 131 |
+
While the explanation of this behavior is not fully clear, in general it shows that the root has an especially high impact for determining the contextual embeddings of other words. One salient possibility is that this arises because the root is usually a main clause verb, which has central influence on both grammatical matters (such as inflection or valency) and semantic matters (such as the possible semantic classes of arguments). Hence, when BERT is pre-trained via masked-token prediction (Devlin et al., 2019), attending to the main clause verb is likely to give useful information pertaining to many masked tokens. A general high impact for the root would follow, in line with these findings.
|
| 132 |
+
|
| 133 |
+
# 3.3 Noun phrase structure
|
| 134 |
+
|
| 135 |
+
Table 2 lists NP-related shifts for three variants of Dep: determiners (det), possessors (nmod:poss), and numerals (nummod). Some of these shifts are grammatically salient: for instance, UD treats the possessor as headed by the possessed noun, but BERT often takes it to be headed by the clitic 's:
|
| 136 |
+
|
| 137 |
+

|
| 138 |
+
|
| 139 |
+
<table><tr><td>Dep</td><td>HU</td><td>Ratio</td><td>Count</td></tr><tr><td rowspan="8">det</td><td>obl</td><td>0.52</td><td>261</td></tr><tr><td>obj</td><td>0.67</td><td>253</td></tr><tr><td>nsubj</td><td>0.54</td><td>208</td></tr><tr><td>nmod</td><td>0.49</td><td>191</td></tr><tr><td>conj</td><td>0.57</td><td>44</td></tr><tr><td>nsubj:pass</td><td>0.54</td><td>43</td></tr><tr><td>nmod:poss</td><td>0.64</td><td>23</td></tr><tr><td>appos</td><td>0.68</td><td>21</td></tr><tr><td rowspan="4">nmod:poss</td><td>obj</td><td>0.70</td><td>56</td></tr><tr><td>nmod</td><td>0.72</td><td>55</td></tr><tr><td>obl</td><td>0.58</td><td>54</td></tr><tr><td>nsubj</td><td>0.70</td><td>53</td></tr><tr><td rowspan="2">nummod</td><td>obl</td><td>0.69</td><td>55</td></tr><tr><td>nmod</td><td>0.71</td><td>25</td></tr></table>
|
| 140 |
+
|
| 141 |
+
However, many cases are linguistically incoherent. For example, BERT sometimes takes possessors to modify a preposition rather than a noun:
|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
|
| 145 |
+
As usual, BERT also regularly assigns the root as the head, as for the determiner $(a)$ shown here:
|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
|
| 149 |
+
In principle, the DP-analysis in formal linguistics treats determiners as noun phrase heads (Abney, 1987), and might initially justify taking the determiner to head the object (a time). However, this would require the noun (time) to be headed by the determiner, but instead it is headed by the root as well. BERT thus does not implement the DP-analysis; the determiner is simply attracted by the root. The same occurs for numeral modifiers:
|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
|
| 153 |
+
Since possessors, determiners, and numerals are the sine qua non of NP-arguments/modifiers, these results illustrate a drastic shift between BERT and widely shared syntactic assumptions about NPs.
|
| 154 |
+
|
| 155 |
+
# 3.4 Adjective and adverb modifiers
|
| 156 |
+
|
| 157 |
+
Table 3 shows shifts related to adjectives (amod), adverbs (advmod), and nominal modifiers (nmod).
|
| 158 |
+
|
| 159 |
+
Table 2: Determiners, possessors, and numerals.
|
| 160 |
+
|
| 161 |
+
<table><tr><td>Dep</td><td>HU</td><td>Ratio</td><td>Count</td></tr><tr><td rowspan="7">amod</td><td>obj</td><td>0.62</td><td>151</td></tr><tr><td>obl</td><td>0.52</td><td>151</td></tr><tr><td>nmod</td><td>0.53</td><td>132</td></tr><tr><td>nsubj</td><td>0.53</td><td>118</td></tr><tr><td>conj</td><td>0.63</td><td>56</td></tr><tr><td>nsubj:pass</td><td>0.52</td><td>29</td></tr><tr><td>compound</td><td>0.57</td><td>21</td></tr><tr><td rowspan="10">advmod</td><td>root</td><td>0.18</td><td>57</td></tr><tr><td>conj</td><td>0.62</td><td>53</td></tr><tr><td>advcl</td><td>0.72</td><td>51</td></tr><tr><td>acl:relcl</td><td>0.73</td><td>40</td></tr><tr><td>amod</td><td>0.73</td><td>36</td></tr><tr><td>advmod</td><td>0.71</td><td>32</td></tr><tr><td>nummod</td><td>0.75</td><td>27</td></tr><tr><td>ccomp</td><td>0.68</td><td>27</td></tr><tr><td>obl</td><td>0.72</td><td>21</td></tr><tr><td>xcomp</td><td>0.72</td><td>21</td></tr><tr><td rowspan="8">nmod</td><td>obl</td><td>0.88</td><td>243</td></tr><tr><td>obj</td><td>0.89</td><td>202</td></tr><tr><td>nsubj</td><td>0.87</td><td>163</td></tr><tr><td>nmod</td><td>0.84</td><td>127</td></tr><tr><td>conj</td><td>0.88</td><td>59</td></tr><tr><td>nsubj:pass</td><td>0.83</td><td>34</td></tr><tr><td>appos</td><td>0.85</td><td>23</td></tr><tr><td>root</td><td>0.38</td><td>20</td></tr></table>
|
| 162 |
+
|
| 163 |
+
Table 3: Adjectival, adverbial, and nominal modifiers.
|
| 164 |
+
|
| 165 |
+
The root is a prominent $H_{B}$ in embedded clauses as well as nested modifiers, indicating that BERT does not reliably treat modifiers recursively. For example, embedded wh-adverbs such as why are often assigned as dependents of the main verb:
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
|
| 169 |
+
However, the lack of recursion is insufficient to explain all modifier-related shifts. In particular, adjectives of even non-embedded noun phrases are regularly treated as dependents of the root:
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
|
| 173 |
+
This behavior resists interpretation in all prominent syntactic frameworks on adjectives, which ubiquitously treat them as modifiers of nouns or NPs (c.f. Baker 2003; Dixon 2004; Hofherr and Matushansky (ed.) 2010).
|
| 174 |
+
|
| 175 |
+
<table><tr><td>Dep</td><td>HU</td><td>Ratio</td><td>Count</td></tr><tr><td rowspan="3">case</td><td>obl</td><td>0.72</td><td>877</td></tr><tr><td>nmod</td><td>0.73</td><td>783</td></tr><tr><td>nmod:poss</td><td>0.83</td><td>85</td></tr><tr><td rowspan="8">obl</td><td>root</td><td>0.47</td><td>283</td></tr><tr><td>acl:relcl</td><td>0.97</td><td>117</td></tr><tr><td>advcl</td><td>0.95</td><td>92</td></tr><tr><td>conj</td><td>0.91</td><td>90</td></tr><tr><td>xcomp</td><td>0.95</td><td>89</td></tr><tr><td>acl</td><td>0.93</td><td>88</td></tr><tr><td>ccomp</td><td>0.96</td><td>50</td></tr><tr><td>parataxis</td><td>0.96</td><td>25</td></tr></table>
|
| 176 |
+
|
| 177 |
+
Table 4: Prepositional phrases.
|
| 178 |
+
|
| 179 |
+
# 3.5 Prepositional phrases
|
| 180 |
+
|
| 181 |
+
Table 4 collects shifts related to prepositions or clitics (case) and their complements (obl). BERT regularly treats prepositions as dependents of the token modified by the PP, while UD takes them to be headed by the complement noun:
|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
|
| 185 |
+
BERT also regularly treats the complement as the preposition's dependent, in contrast to UD linking it directly to the token modified by the PP:
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
|
| 189 |
+
This is especially interesting since here UD prefers the root as opposed to BERT, unlike in our other findings. It thus looks like a genuine syntactic difference. However, the pattern is no longer reliable when the PP modifies a non-root, as shown by the high shift ratios with embedded clauses as $H_{U}$ . The most prominent $H_{B}$ here was again root.
|
| 190 |
+
|
| 191 |
+
# 3.6 Summary
|
| 192 |
+
|
| 193 |
+
I draw four take-home messages:
|
| 194 |
+
|
| 195 |
+
1. The root is treated as a head far more by BERT than by UD, even across phrase boundaries.
|
| 196 |
+
2. BERT's overlap with UD drastically decreases in embeddings, displaying a lack of recursion.
|
| 197 |
+
3. Headedness in PPs is systematically flipped between UD and BERT.
|
| 198 |
+
4. Overall, BERT-parses commonly lack a coherent linguistic interpretation.
|
| 199 |
+
|
| 200 |
+
# 4 Discussion
|
| 201 |
+
|
| 202 |
+
The results are not easily explained by some trivial non-linguistic property. Locality does not account for BERT's deviations from UD, since the average head-dependent distance is actually higher in BERT-parses (Section 3.1). Another initial possibility could be that BERT mimics naive right-chain performance.<sup>8</sup> However, most examples in Sections 3.2–3.5 involve BERT assigning the head leftward (i.e. the dependent rightward). Sometimes this even goes directly against right-chain-like annotation in UD, as in example (11) (Section 3.4).
|
| 203 |
+
|
| 204 |
+
It is also worth raising the controversial status of the UD format itself (c.f. Rehbein et al. 2017; Osborne and Gerdes 2019). The central issue here concerns function words, which UD treats as dependents of content words – going against alternative formats such as Surface-syntactic Universal Dependencies (SUD) (Gerdes et al., 2018) where these relations are reversed. The corresponding distinction appears in our results as well, with respect to prepositions and NPs (Section 3.5). BERT's performance might thus accord better alternative formats to UD, such as SUD.
|
| 205 |
+
|
| 206 |
+
That said, most discrepancies discussed in Section 3 are not specific only to UD. All mainstream syntactic frameworks distinguish between arguments/Modifiers of main and embedded clauses (Sections 3.2, 3.4), and treat possessors, determiners, numerals, or adjectives as modifying nouns rather than verbs (Sections 3.3, 3.4). With the possible exception of (root-modifying) PPs (Section 3.5), the shifts are not made linguistically coherent by minor changes to the syntactic formalism.
|
| 207 |
+
|
| 208 |
+
# 5 Conclusions and future work
|
| 209 |
+
|
| 210 |
+
This study uncovered several discrepancies between BERT and UD. While some were syntactically interpretable, BERT's prevailing tendency to treat the root as a head across phrase boundaries lacks a clear linguistic analogy. This puts to question the idea that BERT should be interpreted in line with traditional grammatical formalisms. Instead, it highlights the need to explain LLMs in their own terms – avoiding reliance on a priori linguistic assumptions not motivated by LLMs themselves.
|
| 211 |
+
|
| 212 |
+
# Limitations
|
| 213 |
+
|
| 214 |
+
This short paper focused on one model architecture (BERT), one parameter-free probing technique (perturbed masking), and one English dataset (PUD). Extending the work to cover multiple variants of each is an important future prospect. I would especially highlight the importance of inter-lingual comparison, as well as more careful attention to assumptions behind the linguistic formalism.
|
| 215 |
+
|
| 216 |
+
Methodologically, this study combined quantitative and qualitative analysis, both of which have limitations. Numerical information alone (in Tables 1-4) is insufficient for yielding thorough syntactic details on dependent-head shifts. For obtaining such further analyses, specific parse-pairs between BERT and UD need to be assessed, which is how the example cases were attained. But – as manual work – this is bound to have a smaller coverage. Without seeing any easy way out of this trade-off, I emphasize the need for further work extending both quantitative and qualitative coverage of related phenomena. I hope to have provided a fruitful starting-point for this line of research.
|
| 217 |
+
|
| 218 |
+
# Ethics Statement
|
| 219 |
+
|
| 220 |
+
Prior source code and data used in the experiments is available as open-source, and the link is given in the paper (Section 2.1). No privacy-sensitive or otherwise harmful data was used, and no experiments on humans or non-human animals were conducted. The source code of the experiments is made available as open-source (Section 2.3).
|
| 221 |
+
|
| 222 |
+
# Acknowledgements
|
| 223 |
+
|
| 224 |
+
I thank Jörg Tiedemann and Timothee Mickus for helpful discussions related to the paper. This project was funded by the Academy of Finland (decision number 350775).
|
| 225 |
+
|
| 226 |
+
# References
|
| 227 |
+
|
| 228 |
+
Steven Abney. 1987. The English Noun Phrase in its Sentential Aspect. PhD thesis, Massachusetts Institute of Technology.
|
| 229 |
+
Mark Baker. 2003. Lexical Categories. Cambridge University Press, Cambridge.
|
| 230 |
+
Yonatan Belinkov. 2022. Probing classifiers: Promises, shortcomings, and advances. Computational Linguistics, 48(1):207-219.
|
| 231 |
+
Tommi Buder-Gröndahl. 2023. The ambiguity of BERTology: what do large language models represent? Synthese, 203:15.
|
| 232 |
+
Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D. Manning. 2019. What does BERT look at? An analysis of BERT's attention. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 276-286.
|
| 233 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics, pages 4171-4186.
|
| 234 |
+
Robert M. W. Dixon. 2004. Adjective classes in typological perspective. In Robert M. W. Dixon and Alexandra Y. Aikhenvald, editors, Explorations in Linguistic Typology 1, pages 1-49. Oxford University Press, New York.
|
| 235 |
+
Jason M. Eisner. 1996. Three new probabilistic models for dependency parsing: An exploration. In Proceedings of the 16th conference on Computational linguistics: Volume 1, pages 340-345.
|
| 236 |
+
Kim Gerdes, Bruno Guillaume, Sylvain Kahane, and Guy Perrier. 2018. SUD or surface-syntactic Universal Dependencies: An annotation scheme near-isomorphic to UD. In Proceedings of the Second Workshop on Universal Dependencies (UDW 2018), pages 66-74.
|
| 237 |
+
John Hewitt and Christopher D. Manning. 2019. A structural probe for finding syntax in word representations. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4129-4138.
|
| 238 |
+
Patricia Cabredo Hofherr and Ora Matushansky (ed.). 2010. *Adjectives: Formal Analyses in Syntax and Semantics*. John Benjamins, Amsterdam.
|
| 239 |
+
Artur Kulmizev and Joakim Nivre. 2022. Schrödinger's tree—on syntax and neural language models. Frontiers in Artificial Intelligence, 5.
|
| 240 |
+
|
| 241 |
+
Ilia Kuznetsov and Iryna Gurevych. 2020. A matter of framing: The impact of linguistic formalism on probing results. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, pages 171-182.
|
| 242 |
+
Karim Lasri, Tiago Pimentel, Alessandro Lenci, Thierry Poibea, and Ryan Cotterell. 2022. Probing for the usage of grammatical number. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics Volume 1: Long Papers, pages 8818-8831.
|
| 243 |
+
Christopher D. Manning, Kevin Clark, and John Hewitt. 2020. Emergent linguistic structure in artificial neural networks trained by self-supervision. PNAS, 117(48):30046-30054.
|
| 244 |
+
David Mareček and Rudolf Rosa. 2019. From balustrades to Pierre Vinken: Looking for syntax in transformer self-attentions. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 263-275.
|
| 245 |
+
Jingcheng Niu, Wenjie Lu, Eric Corlett, and Gerald Penn. 2022. Using Roark-Hollingshead distance to probe BERT's syntactic competence. In Proceedings of the Fifth BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP, pages 325-334.
|
| 246 |
+
T. Osborne and K. Gerdes. 2019. The status of function words in dependency grammar: A critique of universal dependencies (UD). Glossa, 4(1):17.
|
| 247 |
+
I. Rehbein, J. Steen, B. Do, and Anette Frank. 2017. Universal dependencies are hard to parse – or are they? In Proceedings of the Fourth International Conference on Dependency Linguistics, pages 218–228.
|
| 248 |
+
Anna Rogers, Olga Kovaleva, and Anna Rumshisky. 2020. A primer in BERTology: What we know about how BERT works. Transactions of the Association for Computational Linguistics, 8:842-866.
|
| 249 |
+
Ian Tenney, Dipanjan Das, and Ellie Pavlick. 2019. BERT rediscovers the classical NLP pipeline. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4593-4601.
|
| 250 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhins. 2017. Attention is all you need. In Proceedings of the 31st International Conference on Neural Information Processing, pages 6000-6010.
|
| 251 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame,
|
| 252 |
+
|
| 253 |
+
Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45.
|
| 254 |
+
Zhiyong Wu, Yun Chen, Ben Kao, and Qun Liu. 2020. Perturbed masking: Parameter-free probing for analyzing and interpreting BERT. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4166-4176.
|
| 255 |
+
Daniel Zeman, Martin Popel, Milan Straka, Jan Hajic, Joakim Nivre, Filip Ginter, Juhani Luotolahti, Sampo Pyysalo, Slav Petrov, Martin Potthast, Francis Tyers, Elena Badmaeva, Memduh Gokirmak, Anna Nedoluzhko, Silvie Cinkova, Jan Hajic jr., Jaroslava Hlavacova, Václava Kettnerová, Zdenka Urešová, Jenna Kanerva, Stina Ojala, Anna Missilä, Christopher D. Manning, Sebastian Schuster, Siva Reddy, Dima Taji, Nizar Habash, Herman Leung, Marie-Catherine de Marneffe, Manuela Sanguinetti, Maria Simi, Hiroshi Kanayama, Valeria de Paiva, Kira Droganova, Héctor Martínez Alonso, Căgni Žöltekin, Umut Sulubacak, Hans Uszkoreit, Vivien Macketanz, Aljoscha Burchardt, Kim Harris, Katrin Marheinecke, Georg Rehm, Tolga Kayadelen, Mohammed Attia, Ali Elkahky, Zhuoran Yu, Emily Pitler, Saran Lertpradit, Michael Mandl, Jesse Kirchner, Hector Fernandez Alcalde, Jana Strnadová, Esha Banerjee, Ruli Manurung, Antonio Stella, Atsuko Shimada, Sookyoung Kwak, Gustavo Mendonça, Tatiana Lando, Rattima Nitisaroj, and Josie Li. 2017. Conll 2017 shared task: Multilingual parsing from raw text to universal dependencies. In CoNLL 2017 Shared Task, pages 1-19.
|
| 256 |
+
|
| 257 |
+
# A Appendix: Discarded data
|
| 258 |
+
|
| 259 |
+
The algorithm for generating a dependency graph – obtained from Wu et al. (2020) – assumes that token IDs are unique and match positions in the sentence. However, in some coordinated sentences, the UD parse has the same ID appearing in two consecutive tokens. The BERT-parse, in turn, treats the repeated tokens as having separate IDs, which creates a disparity. Table 5 shows an example:
|
| 260 |
+
|
| 261 |
+
<table><tr><td>Token</td><td>Dep</td><td>ID (UD)</td><td>ID (BERT)</td></tr><tr><td>Durán</td><td>nsubj</td><td>1</td><td>1</td></tr><tr><td>acts</td><td>root</td><td>2</td><td>2</td></tr><tr><td>acts</td><td>conj</td><td>2</td><td>3</td></tr><tr><td>as</td><td>case</td><td>3</td><td>4</td></tr><tr><td>spokesman</td><td>obl</td><td>4</td><td>5</td></tr><tr><td>and</td><td>cc</td><td>5</td><td>6</td></tr><tr><td>Ángel</td><td>conj</td><td>6</td><td>7</td></tr><tr><td>Pintado</td><td>flat</td><td>7</td><td>8</td></tr><tr><td>as</td><td>case</td><td>8</td><td>9</td></tr><tr><td>treasurer</td><td>obl</td><td>9</td><td>10</td></tr></table>
|
| 262 |
+
|
| 263 |
+
Here, the verb (acts) is repeated since it serves a double role as the root and a conjunct. UD assigns the same ID (2) to both instances, but BERT uses an increasing counter of IDs. Hence, after the repetition, the respective token IDs between UD and BERT no longer match. Since dependent-head pairs are encoded in terms of IDs, this results in artificial disparities between the parses.
|
| 264 |
+
|
| 265 |
+
Because the number of such sentences in the PUD data was marginal (7), I discarded them in the experiments to avoid this problem. However, the original UAS, UUAS, and NED scores – obtained via replicating Wu et al. (2020) – are calculated from the full PUD data containing these sentences (see Footnote 6).
|
| 266 |
+
|
| 267 |
+
# B Appendix: complete results
|
| 268 |
+
|
| 269 |
+
Table 6 displays each $Dep$ that was subject to a dependent-head shift between BERT and UD. Tables 7-8 show the same per $H_{U}$ and $H_{B}$ , respectively. Table 9 lists all shifts that appeared at least 20 times in the format $Dep - H_{U} - H_{B}$ . This comprises the data discussed in the main paper, from which Tables 1-4 are derived.
|
| 270 |
+
|
| 271 |
+
Table 5: Mismatch between UD and BERT in token IDs.
|
| 272 |
+
|
| 273 |
+
<table><tr><td>Dep</td><td>Ratio</td><td>Count</td></tr><tr><td>case</td><td>0.7251</td><td>1799</td></tr><tr><td>punct</td><td>0.5135</td><td>1252</td></tr><tr><td>det</td><td>0.5433</td><td>1105</td></tr><tr><td>nmod</td><td>0.8500</td><td>912</td></tr><tr><td>obl</td><td>0.7082</td><td>869</td></tr><tr><td>amod</td><td>0.5402</td><td>719</td></tr><tr><td>nsubj</td><td>0.4683</td><td>650</td></tr><tr><td>compound</td><td>0.6675</td><td>538</td></tr><tr><td>conj</td><td>0.8176</td><td>511</td></tr><tr><td>mark</td><td>0.7964</td><td>442</td></tr><tr><td>obj</td><td>0.5011</td><td>438</td></tr><tr><td>cc</td><td>0.7615</td><td>431</td></tr><tr><td>advmod</td><td>0.5035</td><td>426</td></tr><tr><td>nmod:poss</td><td>0.6703</td><td>244</td></tr><tr><td>advcl</td><td>0.7158</td><td>209</td></tr><tr><td>aux</td><td>0.4474</td><td>183</td></tr><tr><td>acl:relcl</td><td>0.8483</td><td>179</td></tr><tr><td>xcomp</td><td>0.5815</td><td>157</td></tr><tr><td>nummod</td><td>0.6071</td><td>153</td></tr><tr><td>nsubj:pass</td><td>0.5720</td><td>135</td></tr><tr><td>acl</td><td>0.6895</td><td>131</td></tr><tr><td>appos</td><td>0.8310</td><td>118</td></tr><tr><td>flat</td><td>0.4978</td><td>114</td></tr><tr><td>cop</td><td>0.3270</td><td>103</td></tr><tr><td>ccomp</td><td>0.7259</td><td>98</td></tr><tr><td>aux:pass</td><td>0.2915</td><td>79</td></tr><tr><td>parataxis</td><td>0.5979</td><td>58</td></tr><tr><td>fixed</td><td>0.5243</td><td>54</td></tr><tr><td>root</td><td>0.0363</td><td>36</td></tr><tr><td>compound:prt</td><td>0.4714</td><td>33</td></tr><tr><td>nmod:tmod</td><td>0.6667</td><td>26</td></tr><tr><td>csubj</td><td>0.5926</td><td>16</td></tr><tr><td>expl</td><td>0.2459</td><td>15</td></tr><tr><td>obl:npmod</td><td>0.7000</td><td>14</td></tr><tr><td>obl:tmod</td><td>0.6111</td><td>11</td></tr><tr><td>nmod:npmod</td><td>0.5263</td><td>10</td></tr><tr><td>det:predet</td><td>0.8889</td><td>8</td></tr><tr><td>cc:preconj</td><td>0.5455</td><td>6</td></tr><tr><td>csubj:pass</td><td>1.0000</td><td>3</td></tr><tr><td>dislocated</td><td>1.0000</td><td>2</td></tr><tr><td>reparandum</td><td>1.0000</td><td>1</td></tr><tr><td>discourse</td><td>1.0000</td><td>1</td></tr><tr><td>iobj</td><td>0.1000</td><td>1</td></tr></table>
|
| 274 |
+
|
| 275 |
+
Table 6: All dependency-head shifts ordered by $Dep$ ("Ratio": ratio of shifts from all tokens with the $Dep$ ).
|
| 276 |
+
|
| 277 |
+
<table><tr><td>HU</td><td>Ratio</td><td>Count</td></tr><tr><td>obl</td><td>0.6802</td><td>2048</td></tr><tr><td>root</td><td>0.2664</td><td>1694</td></tr><tr><td>nmod</td><td>0.6788</td><td>1655</td></tr><tr><td>conj</td><td>0.7654</td><td>1292</td></tr><tr><td>obj</td><td>0.7283</td><td>946</td></tr><tr><td>nsubj</td><td>0.6651</td><td>872</td></tr><tr><td>advcl</td><td>0.7791</td><td>663</td></tr><tr><td>acl:relcl</td><td>0.8109</td><td>579</td></tr><tr><td>xcomp</td><td>0.8168</td><td>495</td></tr><tr><td>ccomp</td><td>0.8327</td><td>458</td></tr><tr><td>acl</td><td>0.7762</td><td>281</td></tr><tr><td>appos</td><td>0.7301</td><td>238</td></tr><tr><td>parataxis</td><td>0.7409</td><td>223</td></tr><tr><td>nsubj:pass</td><td>0.6494</td><td>176</td></tr><tr><td>amod</td><td>0.7368</td><td>140</td></tr><tr><td>nmod:poss</td><td>0.7707</td><td>121</td></tr><tr><td>compound</td><td>0.6289</td><td>100</td></tr><tr><td>advmod</td><td>0.7810</td><td>82</td></tr><tr><td>csubj</td><td>0.7703</td><td>57</td></tr><tr><td>nummod</td><td>0.8036</td><td>45</td></tr><tr><td>flat</td><td>0.8276</td><td>24</td></tr><tr><td>cc</td><td>0.8750</td><td>14</td></tr><tr><td>obl:npmod</td><td>0.6667</td><td>14</td></tr><tr><td>obl:tmod</td><td>0.5833</td><td>14</td></tr><tr><td>csubj:pass</td><td>0.8667</td><td>13</td></tr><tr><td>mark</td><td>0.6000</td><td>9</td></tr><tr><td>nmod:tmod</td><td>0.2857</td><td>8</td></tr><tr><td>case</td><td>0.1591</td><td>7</td></tr><tr><td>dislocated</td><td>1.0000</td><td>6</td></tr><tr><td>nmod:npmod</td><td>0.8571</td><td>6</td></tr><tr><td>iobj</td><td>0.8333</td><td>5</td></tr><tr><td>dep</td><td>1.0000</td><td>2</td></tr><tr><td>det</td><td>0.6667</td><td>2</td></tr><tr><td>cc:preconj</td><td>1.0000</td><td>1</td></tr></table>
|
| 278 |
+
|
| 279 |
+
Table 7: All dependency-head shifts ordered by $H_{U}$ ("Ratio": ratio of shifts from all tokens with the $H_{U}$ ).
|
| 280 |
+
|
| 281 |
+
<table><tr><td>HD</td><td>Ratio</td><td>Count</td></tr><tr><td>root</td><td>0.4763</td><td>4244</td></tr><tr><td>case</td><td>0.9684</td><td>1135</td></tr><tr><td>amod</td><td>0.9386</td><td>764</td></tr><tr><td>compound</td><td>0.9107</td><td>602</td></tr><tr><td>nsubj</td><td>0.5525</td><td>542</td></tr><tr><td>obl</td><td>0.3431</td><td>503</td></tr><tr><td>nmod</td><td>0.3771</td><td>474</td></tr><tr><td>det</td><td>0.9978</td><td>453</td></tr><tr><td>punct</td><td>1.0000</td><td>404</td></tr><tr><td>obj</td><td>0.5306</td><td>399</td></tr><tr><td>advmod</td><td>0.9425</td><td>377</td></tr><tr><td>cc</td><td>0.9936</td><td>310</td></tr><tr><td>conj</td><td>0.4107</td><td>276</td></tr><tr><td>mark</td><td>0.9636</td><td>159</td></tr><tr><td>nummod</td><td>0.9341</td><td>156</td></tr><tr><td>advcl</td><td>0.4519</td><td>155</td></tr><tr><td>cop</td><td>1.0000</td><td>122</td></tr><tr><td>nsubj:pass</td><td>0.5622</td><td>122</td></tr><tr><td>nmod:poss</td><td>0.7707</td><td>121</td></tr><tr><td>aux</td><td>1.0000</td><td>119</td></tr><tr><td>xcomp</td><td>0.5174</td><td>119</td></tr><tr><td>acl</td><td>0.5622</td><td>104</td></tr><tr><td>flat</td><td>0.9533</td><td>102</td></tr><tr><td>aux:pass</td><td>1.0000</td><td>92</td></tr><tr><td>acl:relcl</td><td>0.3571</td><td>75</td></tr><tr><td>parataxis</td><td>0.4621</td><td>67</td></tr><tr><td>ccomp</td><td>0.3907</td><td>59</td></tr><tr><td>appos</td><td>0.3931</td><td>57</td></tr><tr><td>fixed</td><td>1.0000</td><td>55</td></tr><tr><td>compound:prt</td><td>1.0000</td><td>33</td></tr><tr><td>nmod:tmod</td><td>0.5455</td><td>24</td></tr><tr><td>expl</td><td>1.0000</td><td>14</td></tr><tr><td>obl:npmod</td><td>0.6316</td><td>12</td></tr><tr><td>det:predet</td><td>1.0000</td><td>9</td></tr><tr><td>nmod:npmod</td><td>0.9000</td><td>9</td></tr><tr><td>csubj</td><td>0.3462</td><td>9</td></tr><tr><td>cc:preconj</td><td>1.0000</td><td>4</td></tr><tr><td>obl:tmod</td><td>0.2308</td><td>3</td></tr><tr><td>reparandum</td><td>0.6667</td><td>2</td></tr><tr><td>dislocated</td><td>1.0000</td><td>1</td></tr><tr><td>discourse</td><td>1.0000</td><td>1</td></tr><tr><td>vocative</td><td>1.0000</td><td>1</td></tr><tr><td>cs subj:pass</td><td>0.3333</td><td>1</td></tr></table>
|
| 282 |
+
|
| 283 |
+
Table 8: All dependency-head shifts ordered by $H_B$ ("Ratio": ratio of shifts from all tokens with the $H_B$ ).
|
| 284 |
+
|
| 285 |
+
<table><tr><td colspan="3">Dep-HU-HB shift (count)</td></tr><tr><td>case-obl-root (521)</td><td>case-nmod-root (231)</td><td>cc-conj-root (191)</td></tr><tr><td>det-object-root (141)</td><td>det-nsubj-root (134)</td><td>case-nmod-obl (122)</td></tr><tr><td>punct-root-obl (117)</td><td>nmod-obl-root (107)</td><td>det-obl-case (101)</td></tr><tr><td>det-nmod-case (100)</td><td>case-nmod-obj (99)</td><td>obl-root-case (97)</td></tr><tr><td>mark-xcomp-root (87)</td><td>nmod-nsubj-root (85)</td><td>mark-advcl-root (84)</td></tr><tr><td>nmod-object-root (83)</td><td>punct-root-nsubj (79)</td><td>case-nmod-nsubj (79)</td></tr><tr><td>case-nmod-nmod (73)</td><td>det-obl-amod (66)</td><td>nsubj-ccomp-root (66)</td></tr><tr><td>amod-object-root (64)</td><td>det-obl-root (62)</td><td>amod-obl-root (61)</td></tr><tr><td>case-nmod:poss-root (56)</td><td>nmod-nmod-root (54)</td><td>punct-root-advmod (53)</td></tr><tr><td>case-obl-acl (52)</td><td>nsubj-acl:relcl-root (52)</td><td>amod-nsubj-root (49)</td></tr><tr><td>punct-root-punct (45)</td><td>compound-nsubj-root (45)</td><td>mark-ccomp-root (44)</td></tr><tr><td>compound-obl-root (44)</td><td>compound-nmod-root (43)</td><td>obl-xcomp-root (43)</td></tr><tr><td>obl-acl-root (43)</td><td>obl-acl:relcl-root (43)</td><td>punct-conj-cc (41)</td></tr><tr><td>obl-conj-root (41)</td><td>amod-obj-det (40)</td><td>obl-root-amod (40)</td></tr><tr><td>punct-root-nmod (38)</td><td>amod-nmod-root (38)</td><td>obl-advcl-root (38)</td></tr><tr><td>obl-root-compound (38)</td><td>nsubj-advcl-root (37)</td><td>obj-advcl-root (36)</td></tr><tr><td>nummod-obl-root (36)</td><td>punct-root-parataxis (35)</td><td>nsubj-root-amod (35)</td></tr><tr><td>obj-xcomp-root (35)</td><td>punct-conj-conj (35)</td><td>nmod-obl-case (34)</td></tr><tr><td>case-obl-advcl (33)</td><td>case-obl-conj (33)</td><td>punct-conj-root (32)</td></tr><tr><td>nmod-object-case (32)</td><td>det-nmod-amod (31)</td><td>amod-nmod-case (31)</td></tr><tr><td>nmod-nmod-case (31)</td><td>nsubj-root-compound (31)</td><td>nmod:poss-obl-case (31)</td></tr><tr><td>punct-appos-root (30)</td><td>case-obl-acl:relcl (30)</td><td>conj-nmod-root (30)</td></tr><tr><td>case-nmod-det (29)</td><td>det-nsubj-amod (28)</td><td>nmod-obj-amod (28)</td></tr><tr><td>cc-conj-obl (27)</td><td>punct-conj-nmod (26)</td><td>case-nmod-conj (26)</td></tr><tr><td>det-nmod-root (26)</td><td>det-object-advcl (26)</td><td>nmod-obl-compound (26)</td></tr><tr><td>det-nmod-compound (25)</td><td>nmod-conj-root (25)</td><td>compound-object-root (25)</td></tr><tr><td>nsubj-conj-root (25)</td><td>obj-acl-root (25)</td><td>det-nsubj:pass-root (24)</td></tr><tr><td>obl-root-nmod (24)</td><td>conj-nsubj-root (24)</td><td>amod-obl-det (23)</td></tr><tr><td>nmod:poss-nmod-case (23)</td><td>nmod:poss-nsubj-root (23)</td><td>punct-conj-obl (22)</td></tr><tr><td>det-object-amod (22)</td><td>obl-acl:relcl-case (22)</td><td>nsubj-root-case (22)</td></tr><tr><td>cc-conj-nmod (22)</td><td>advmod-advcl-root (22)</td><td>conj-nmod-cc (22)</td></tr><tr><td>nmod-nsubj-case (21)</td><td>obl-root-nummod (21)</td><td>flat-nsubj-root (21)</td></tr><tr><td>obj-acl:relcl-root (21)</td><td>acl-object-root (21)</td><td>punct-root-det (20)</td></tr><tr><td>case-obl-xcomp (20)</td><td>nmod-obl-amod (20)</td><td>compound-obl-det (20)</td></tr><tr><td>compound-nmod-case (20)</td><td>obl-ccomp-root (20)</td><td></td></tr></table>
|
| 286 |
+
|
| 287 |
+
Table 9: $Dep - H_{U} - H_{B}$ shifts and their counts (minimum count: 20).
|
whatdoesparameterfreeprobingreallyuncover/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:767f7a0ad65acaacc19ae6ddb97625f832bb3135b8a85a600c5124de92b23560
|
| 3 |
+
size 904610
|
whatdoesparameterfreeprobingreallyuncover/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebc9b92d406c338c2e04948abd5682911934d54e24b92185eb7d6e0eed54f5d8
|
| 3 |
+
size 301736
|
zeroshotcrosslingualrerankingwithlargelanguagemodelsforlowresourcelanguages/3b48e9e0-529b-4b40-a25d-9321ae86f716_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:715cb3307829f6891e7252a07391d56248352040ba523c6437e3978b7f11fa88
|
| 3 |
+
size 50894
|
zeroshotcrosslingualrerankingwithlargelanguagemodelsforlowresourcelanguages/3b48e9e0-529b-4b40-a25d-9321ae86f716_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14812a53388d62943b86f7339ea3402377632b29fd042cede186e37e8087ae18
|
| 3 |
+
size 62138
|