bibtex_url
stringlengths
41
53
acl_proceedings
stringlengths
38
50
bibtext
stringlengths
528
3.02k
abstract
stringlengths
17
2.35k
authors
listlengths
1
44
title
stringlengths
18
190
id
stringlengths
7
19
arxiv_id
stringlengths
10
10
GitHub
listlengths
1
1
paper_page
stringclasses
528 values
n_linked_authors
int64
-1
15
upvotes
int64
-1
77
num_comments
int64
-1
10
n_authors
int64
-1
52
Models
listlengths
0
100
Datasets
listlengths
0
15
Spaces
listlengths
0
46
paper_page_exists_pre_conf
int64
0
1
type
stringclasses
2 values
https://aclanthology.org/2023.conll-babylm.8.bib
https://aclanthology.org/2023.conll-babylm.8/
@inproceedings{edman-bylinina-2023-much, title = "Too Much Information: Keeping Training Simple for {B}aby{LM}s", author = "Edman, Lukas and Bylinina, Lisa", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ci...
No abstract found
[ "Edman, Lukas", "Bylinina, Lisa" ]
Too Much Information: Keeping Training Simple for BabyLMs
conll-babylm.8
2311.01955
[ "" ]
https://huggingface.co/papers/2311.01955
1
0
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.9.bib
https://aclanthology.org/2023.conll-babylm.9/
@inproceedings{chobey-etal-2023-training, title = "Can training neural language models on a curriculum with developmentally plausible data improve alignment with human reading behavior?", author = "Chobey, Aryaman and Smith, Oliver and Wang, Anzi and Prasad, Grusha", editor = "Warstadt,...
No abstract found
[ "Chobey, Aryaman", "Smith, Oliver", "Wang, Anzi", "Prasad, Grusha" ]
Can training neural language models on a curriculum with developmentally plausible data improve alignment with human reading behavior?
conll-babylm.9
2311.18761
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.10.bib
https://aclanthology.org/2023.conll-babylm.10/
@inproceedings{martinez-etal-2023-climb, title = "{CLIMB} {--} Curriculum Learning for Infant-inspired Model Building", author = "Martinez, Richard Diehl and McGovern, Hope and Goriely, Zebulon and Davis, Christopher and Caines, Andrew and Buttery, Paula and Beinborn, L...
No abstract found
[ "Martinez, Richard Diehl", "McGovern, Hope", "Goriely, Zebulon", "Davis, Christopher", "Caines, Andrew", "Buttery, Paula", "Beinborn, Lisa" ]
CLIMB – Curriculum Learning for Infant-inspired Model Building
conll-babylm.10
2311.08886
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.11.bib
https://aclanthology.org/2023.conll-babylm.11/
@inproceedings{amariucai-warstadt-2023-acquiring, title = "Acquiring Linguistic Knowledge from Multimodal Input", author = "Amariucai, Theodor and Warstadt, Alexander Scott", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chen...
No abstract found
[ "Amariucai, Theodor", "Warstadt, Alex", "er Scott" ]
Acquiring Linguistic Knowledge from Multimodal Input
conll-babylm.11
2402.17936
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.12.bib
https://aclanthology.org/2023.conll-babylm.12/
@inproceedings{steuer-etal-2023-large, title = "Large {GPT}-like Models are Bad Babies: A Closer Look at the Relationship between Linguistic Competence and Psycholinguistic Measures", author = "Steuer, Julius and Mosbach, Marius and Klakow, Dietrich", editor = "Warstadt, Alex and Muelle...
No abstract found
[ "Steuer, Julius", "Mosbach, Marius", "Klakow, Dietrich" ]
Large GPT-like Models are Bad Babies: A Closer Look at the Relationship between Linguistic Competence and Psycholinguistic Measures
conll-babylm.12
2311.04547
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.13.bib
https://aclanthology.org/2023.conll-babylm.13/
@inproceedings{zhang-etal-2023-babys, title = "Baby{'}s {C}o{T}hought: Leveraging Large Language Models for Enhanced Reasoning in Compact Models", author = {Zhang, Zheyu and Yang, Han and Ma, Bolei and R{\"u}gamer, David and Nie, Ercong}, editor = "Warstadt, Alex and Muell...
No abstract found
[ "Zhang, Zheyu", "Yang, Han", "Ma, Bolei", "R{\\\"u}gamer, David", "Nie, Ercong" ]
Baby's CoThought: Leveraging Large Language Models for Enhanced Reasoning in Compact Models
conll-babylm.13
2308.01684
[ "https://github.com/oooranz/baby-cothought" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.14.bib
https://aclanthology.org/2023.conll-babylm.14/
@inproceedings{veysel-cagatan-2023-toddlerberta, title = "{T}oddler{BERT}a: Exploiting {B}aby{BERT}a for Grammar Learning and Language Understanding", author = {Veysel {\c{C}}a{\u{g}}atan, {\"O}mer}, editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and ...
No abstract found
[ "Veysel {\\c{C}}a{\\u{g}}atan, {\\\"O}mer" ]
ToddlerBERTa: Exploiting BabyBERTa for Grammar Learning and Language Understanding
conll-babylm.14
2308.16336
[ "" ]
https://huggingface.co/papers/2308.16336
0
0
2
1
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.15.bib
https://aclanthology.org/2023.conll-babylm.15/
@inproceedings{thoma-etal-2023-cogmemlm, title = "{C}og{M}em{LM}: Human-Like Memory Mechanisms Improve Performance and Cognitive Plausibility of {LLM}s", author = "Thoma, Lukas and Weyers, Ivonne and {\c{C}}ano, Erion and Schweter, Stefan and Mueller, Jutta L and Roth, Benjami...
No abstract found
[ "Thoma, Lukas", "Weyers, Ivonne", "{\\c{C}}ano, Erion", "Schweter, Stefan", "Mueller, Jutta L", "Roth, Benjamin" ]
CogMemLM: Human-Like Memory Mechanisms Improve Performance and Cognitive Plausibility of LLMs
conll-babylm.15
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.16.bib
https://aclanthology.org/2023.conll-babylm.16/
@inproceedings{zhao-etal-2023-babystories, title = "{B}aby{S}tories: Can Reinforcement Learning Teach Baby Language Models to Write Better Stories?", author = "Zhao, Xingmeng and Wang, Tongnian and Osborn, Sheri and Rios, Anthony", editor = "Warstadt, Alex and Mueller, Aaron and...
No abstract found
[ "Zhao, Xingmeng", "Wang, Tongnian", "Osborn, Sheri", "Rios, Anthony" ]
BabyStories: Can Reinforcement Learning Teach Baby Language Models to Write Better Stories?
conll-babylm.16
2310.16681
[ "https://github.com/zephyr1022/babystories-utsa" ]
https://huggingface.co/papers/2310.16681
0
0
0
4
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.17.bib
https://aclanthology.org/2023.conll-babylm.17/
@inproceedings{debenedetto-2023-byte, title = "Byte-ranked Curriculum Learning for {B}aby{LM} Strict-small Shared Task 2023", author = "DeBenedetto, Justin", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Jua...
No abstract found
[ "DeBenedetto, Justin" ]
Byte-ranked Curriculum Learning for BabyLM Strict-small Shared Task 2023
conll-babylm.17
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.18.bib
https://aclanthology.org/2023.conll-babylm.18/
@inproceedings{cheng-etal-2023-mcgill, title = "{M}c{G}ill {B}aby{LM} Shared Task Submission: The Effects of Data Formatting and Structural Biases", author = "Cheng, Ziling and Aralikatte, Rahul and Porada, Ian and Spinoso-Di Piano, Cesare and Cheung, Jackie CK", editor = "Warsta...
No abstract found
[ "Cheng, Ziling", "Aralikatte, Rahul", "Porada, Ian", "Spinoso-Di Piano, Cesare", "Cheung, Jackie CK" ]
McGill BabyLM Shared Task Submission: The Effects of Data Formatting and Structural Biases
conll-babylm.18
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.19.bib
https://aclanthology.org/2023.conll-babylm.19/
@inproceedings{samuel-2023-mean, title = "Mean {BERT}s make erratic language teachers: the effectiveness of latent bootstrapping in low-resource settings", author = "Samuel, David", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chen...
No abstract found
[ "Samuel, David" ]
Mean BERTs make erratic language teachers: the effectiveness of latent bootstrapping in low-resource settings
conll-babylm.19
2310.19420
[ "https://github.com/ltgoslo/boot-bert" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.20.bib
https://aclanthology.org/2023.conll-babylm.20/
@inproceedings{georges-gabriel-charpentier-samuel-2023-layers, title = "Not all layers are equally as important: Every Layer Counts {BERT}", author = "Georges Gabriel Charpentier, Lucas and Samuel, David", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox...
No abstract found
[ "Georges Gabriel Charpentier, Lucas", "Samuel, David" ]
Not all layers are equally as important: Every Layer Counts BERT
conll-babylm.20
2311.02265
[ "" ]
https://huggingface.co/papers/2311.02265
0
1
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.21.bib
https://aclanthology.org/2023.conll-babylm.21/
@inproceedings{wolf-etal-2023-whisbert, title = "{W}his{BERT}: Multimodal Text-Audio Language Modeling on 100{M} Words", author = "Wolf, Lukas and Kotar, Klemen and Tuckute, Greta and Hosseini, Eghbal and I. Regev, Tamar and Gotlieb Wilcox, Ethan and Warstadt, Alexander...
No abstract found
[ "Wolf, Lukas", "Kotar, Klemen", "Tuckute, Greta", "Hosseini, Eghbal", "I. Regev, Tamar", "Gotlieb Wilcox, Ethan", "Warstadt, Alex", "er Scott" ]
WhisBERT: Multimodal Text-Audio Language Modeling on 100M Words
conll-babylm.21
2312.02931
[ "https://github.com/lu-wo/whisbert" ]
https://huggingface.co/papers/2312.02931
5
6
1
7
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.22.bib
https://aclanthology.org/2023.conll-babylm.22/
@inproceedings{hong-etal-2023-surprisal, title = "A surprisal oracle for active curriculum language modeling", author = "Hong, Xudong and Lo{\'a}iciga, Sharid and Sayeed, Asad", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zh...
No abstract found
[ "Hong, Xudong", "Lo{\\'a}iciga, Sharid", "Sayeed, Asad" ]
A surprisal oracle for active curriculum language modeling
conll-babylm.22
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.23.bib
https://aclanthology.org/2023.conll-babylm.23/
@inproceedings{mi-2023-mmi01, title = "Mmi01 at The {B}aby{LM} Challenge: Linguistically Motivated Curriculum Learning for Pretraining in Low-Resource Settings", author = "Mi, Maggie", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, C...
No abstract found
[ "Mi, Maggie" ]
Mmi01 at The BabyLM Challenge: Linguistically Motivated Curriculum Learning for Pretraining in Low-Resource Settings
conll-babylm.23
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.24.bib
https://aclanthology.org/2023.conll-babylm.24/
@inproceedings{timiryasov-tastet-2023-baby, title = "Baby Llama: knowledge distillation from an ensemble of teachers trained on a small dataset with no performance penalty", author = "Timiryasov, Inar and Tastet, Jean-Loup", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshe...
No abstract found
[ "Timiryasov, Inar", "Tastet, Jean-Loup" ]
Baby Llama: knowledge distillation from an ensemble of teachers trained on a small dataset with no performance penalty
conll-babylm.24
2308.02019
[ "https://github.com/timinar/babyllama" ]
https://huggingface.co/papers/2308.02019
0
0
0
2
[ "timinar/baby-llama-58m", "andrijdavid/baby-llama-58m-GGUF", "RichardErkhov/timinar_-_baby-llama-58m-4bits", "RichardErkhov/timinar_-_baby-llama-58m-8bits" ]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.25.bib
https://aclanthology.org/2023.conll-babylm.25/
@inproceedings{oba-etal-2023-babylm, title = "{B}aby{LM} Challenge: Curriculum learning based on sentence complexity approximating language acquisition", author = "Oba, Miyu and Haga, Akari and Fukatsu, Akiyo and Oseki, Yohei", editor = "Warstadt, Alex and Mueller, Aaron and ...
No abstract found
[ "Oba, Miyu", "Haga, Akari", "Fukatsu, Akiyo", "Oseki, Yohei" ]
BabyLM Challenge: Curriculum learning based on sentence complexity approximating language acquisition
conll-babylm.25
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.26.bib
https://aclanthology.org/2023.conll-babylm.26/
@inproceedings{berend-2023-better, title = "Better Together: Jointly Using Masked Latent Semantic Modeling and Masked Language Modeling for Sample Efficient Pre-training", author = "Berend, G{\'a}bor", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan an...
No abstract found
[ "Berend, G{\\'a}bor" ]
Better Together: Jointly Using Masked Latent Semantic Modeling and Masked Language Modeling for Sample Efficient Pre-training
conll-babylm.26
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.27.bib
https://aclanthology.org/2023.conll-babylm.27/
@inproceedings{govindarajan-etal-2023-lil, title = "Lil-Bevo: Explorations of Strategies for Training Language Models in More Humanlike Ways", author = "Govindarajan, Venkata S and Rodriguez, Juan Diego and Bostrom, Kaj and Mahowald, Kyle", editor = "Warstadt, Alex and Mueller, A...
No abstract found
[ "Govindarajan, Venkata S", "Rodriguez, Juan Diego", "Bostrom, Kaj", "Mahowald, Kyle" ]
Lil-Bevo: Explorations of Strategies for Training Language Models in More Humanlike Ways
conll-babylm.27
2310.17591
[ "https://github.com/venkatasg/lil-bevo" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.28.bib
https://aclanthology.org/2023.conll-babylm.28/
@inproceedings{xiao-etal-2023-towards, title = "Towards more Human-like Language Models based on Contextualizer Pretraining Strategy", author = "Xiao, Chenghao and Hudson, G Thomas and Al Moubayed, Noura", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and ...
No abstract found
[ "Xiao, Chenghao", "Hudson, G Thomas", "Al Moubayed, Noura" ]
Towards more Human-like Language Models based on Contextualizer Pretraining Strategy
conll-babylm.28
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.29.bib
https://aclanthology.org/2023.conll-babylm.29/
@inproceedings{momen-etal-2023-increasing, title = "Increasing The Performance of Cognitively Inspired Data-Efficient Language Models via Implicit Structure Building", author = "Momen, Omar and Arps, David and Kallmeyer, Laura", editor = "Warstadt, Alex and Mueller, Aaron and Cho...
No abstract found
[ "Momen, Omar", "Arps, David", "Kallmeyer, Laura" ]
Increasing The Performance of Cognitively Inspired Data-Efficient Language Models via Implicit Structure Building
conll-babylm.29
2310.20589
[ "https://github.com/omarmomen14/structformer-babylm" ]
https://huggingface.co/papers/2310.20589
1
0
0
3
[ "omarmomen/structroberta_sx_final", "omarmomen/babylm_tokenizer_32k", "omarmomen/structroberta_s1_final", "omarmomen/structroberta_s2_final", "omarmomen/structroberta_sx2_final", "omarmomen/transformer_base_final_2", "omarmomen/structformer_s1_final_with_pos", "omarmomen/structformer_s2_final_with_pos...
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.30.bib
https://aclanthology.org/2023.conll-babylm.30/
@inproceedings{bhardwaj-etal-2023-pre, title = "Pre-training {LLM}s using human-like development data corpus", author = "Bhardwaj, Khushi and Shah, Raj Sanjay and Varma, Sashank", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and ...
No abstract found
[ "Bhardwaj, Khushi", "Shah, Raj Sanjay", "Varma, Sashank" ]
Pre-training LLMs using human-like development data corpus
conll-babylm.30
2311.04666
[ "" ]
https://huggingface.co/papers/2311.04666
1
0
0
3
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.31.bib
https://aclanthology.org/2023.conll-babylm.31/
@inproceedings{opper-etal-2023-effect, title = "On the effect of curriculum learning with developmental data for grammar acquisition", author = "Opper, Mattia and Morrison, J. and Siddharth, N.", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, E...
No abstract found
[ "Opper, Mattia", "Morrison, J.", "Siddharth, N." ]
On the effect of curriculum learning with developmental data for grammar acquisition
conll-babylm.31
2311.00128
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.32.bib
https://aclanthology.org/2023.conll-babylm.32/
@inproceedings{borazjanizadeh-2023-optimizing, title = "Optimizing {GPT}-2 Pretraining on {B}aby{LM} Corpus with Difficulty-based Sentence Reordering", author = "Borazjanizadeh, Nasim", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, ...
No abstract found
[ "Borazjanizadeh, Nasim" ]
Optimizing GPT-2 Pretraining on BabyLM Corpus with Difficulty-based Sentence Reordering
conll-babylm.32
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.1.bib
https://aclanthology.org/2023.crac-main.1/
@inproceedings{de-langhe-etal-2023-filling, title = "Filling in the Gaps: Efficient Event Coreference Resolution using Graph Autoencoder Networks", author = "De Langhe, Loic and De Clercq, Orphee and Hoste, Veronique", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sa...
No abstract found
[ "De Langhe, Loic", "De Clercq, Orphee", "Hoste, Veronique" ]
Filling in the Gaps: Efficient Event Coreference Resolution using Graph Autoencoder Networks
crac-main.1
2310.11965
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.2.bib
https://aclanthology.org/2023.crac-main.2/
@inproceedings{doosterlinck-etal-2023-caw, title = "{CAW}-coref: Conjunction-Aware Word-level Coreference Resolution", author = "D{'}Oosterlinck, Karel and Bitew, Semere Kiros and Papineau, Brandon and Potts, Christopher and Demeester, Thomas and Develder, Chris", editor =...
No abstract found
[ "D{'}Oosterlinck, Karel", "Bitew, Semere Kiros", "Papineau, Br", "on", "Potts, Christopher", "Demeester, Thomas", "Develder, Chris" ]
CAW-coref: Conjunction-Aware Word-level Coreference Resolution
crac-main.2
2310.06165
[ "https://github.com/kareldo/wl-coref" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.3.bib
https://aclanthology.org/2023.crac-main.3/
@inproceedings{wazni-sadrzadeh-2023-towards, title = "Towards Transparency in Coreference Resolution: A Quantum-Inspired Approach", author = "Wazni, Hadi and Sadrzadeh, Mehrnoosh", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", booktit...
No abstract found
[ "Wazni, Hadi", "Sadrzadeh, Mehrnoosh" ]
Towards Transparency in Coreference Resolution: A Quantum-Inspired Approach
crac-main.3
2312.00688
[ "https://github.com/hwazni/qcoref" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.4.bib
https://aclanthology.org/2023.crac-main.4/
@inproceedings{ye-etal-2023-scalar, title = "Scalar Anaphora: Annotating Degrees of Coreference in Text", author = "Ye, Bingyang and Tu, Jingxuan and Pustejovsky, James", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", booktitle ...
No abstract found
[ "Ye, Bingyang", "Tu, Jingxuan", "Pustejovsky, James" ]
Scalar Anaphora: Annotating Degrees of Coreference in Text
crac-main.4
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.5.bib
https://aclanthology.org/2023.crac-main.5/
@inproceedings{mullick-etal-2023-better, title = "Better Handling Coreference Resolution in Aspect Level Sentiment Classification by Fine-Tuning Language Models", author = "Mullick, Dhruv and Ghanem, Bilal and Fyshe, Alona", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradh...
No abstract found
[ "Mullick, Dhruv", "Ghanem, Bilal", "Fyshe, Alona" ]
Better Handling Coreference Resolution in Aspect Level Sentiment Classification by Fine-Tuning Language Models
crac-main.5
2307.05646
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.6.bib
https://aclanthology.org/2023.crac-main.6/
@inproceedings{simovic-chambers-2023-pragmatics, title = "The pragmatics of characters{'} mental perspectives in pronominal reference resolution", author = "Simovic, Tiana and Chambers, Craig", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo"...
No abstract found
[ "Simovic, Tiana", "Chambers, Craig" ]
The pragmatics of characters' mental perspectives in pronominal reference resolution
crac-main.6
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.7.bib
https://aclanthology.org/2023.crac-main.7/
@inproceedings{ates-etal-2023-marrs, title = "{MARRS}: Multimodal Reference Resolution System", author = "Ates, Halim Cagri and Bhargava, Shruti and Li, Site and Lu, Jiarui and Maddula, Siddhardha and Moniz, Joel Ruben Antony and Nalamalapu, Anil Kumar and Nguyen...
No abstract found
[ "Ates, Halim Cagri", "Bhargava, Shruti", "Li, Site", "Lu, Jiarui", "Maddula, Siddhardha", "Moniz, Joel Ruben Antony", "Nalamalapu, Anil Kumar", "Nguyen, Roman Hoang", "Ozyildirim, Melis", "Patel, Alkesh", "Piraviperumal, Dhivya", "Renkens, Vincent", "Samal, Ankit", "Tran, Thy", "Tseng, B...
MARRS: Multimodal Reference Resolution System
crac-main.7
2311.01650
[ "" ]
https://huggingface.co/papers/2311.01650
0
2
0
18
[]
[]
[]
1
Poster
https://aclanthology.org/2023.crac-main.8.bib
https://aclanthology.org/2023.crac-main.8/
@inproceedings{okulska-wisnios-2023-towards, title = "Towards Harmful Erotic Content Detection through Coreference-Driven Contextual Analysis", author = "Okulska, Inez and Wisnios, Emilia", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", ...
No abstract found
[ "Okulska, Inez", "Wisnios, Emilia" ]
Towards Harmful Erotic Content Detection through Coreference-Driven Contextual Analysis
crac-main.8
2310.14325
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.9.bib
https://aclanthology.org/2023.crac-main.9/
@inproceedings{rim-pustejovsky-2023-integrated, title = "Integrated Annotation of Event Structure, Object States, and Entity Coreference", author = "Rim, Kyeongmin and Pustejovsky, James", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", ...
No abstract found
[ "Rim, Kyeongmin", "Pustejovsky, James" ]
Integrated Annotation of Event Structure, Object States, and Entity Coreference
crac-main.9
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-sharedtask.1.bib
https://aclanthology.org/2023.crac-sharedtask.1/
@inproceedings{zabokrtsky-etal-2023-findings, title = "Findings of the Second Shared Task on Multilingual Coreference Resolution", author = "{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Konopik, Miloslav and Nedoluzhko, Anna and Nov{\'a}k, Michal and Ogrodniczuk, Maciej and Popel, M...
This paper summarizes the second edition of the shared task on multilingual coreference resolution, held with the CRAC 2023 workshop. Just like last year, participants of the shared task were to create trainable systems that detect mentions and group them based on identity coreference; however, this year{'}s edition us...
[ "{\\v{Z}}abokrtsk{\\'y}, Zden{\\v{e}}k", "Konopik, Miloslav", "Nedoluzhko, Anna", "Nov{\\'a}k, Michal", "Ogrodniczuk, Maciej", "Popel, Martin", "Prazak, Ondrej", "Sido, Jakub", "Zeman, Daniel" ]
Findings of the Second Shared Task on Multilingual Coreference Resolution
crac-sharedtask.1
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-sharedtask.2.bib
https://aclanthology.org/2023.crac-sharedtask.2/
@inproceedings{skachkova-etal-2023-multilingual, title = "Multilingual coreference resolution: Adapt and Generate", author = "Skachkova, Natalia and Anikina, Tatiana and Mokhova, Anna", editor = "{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Ogrodniczuk, Maciej", booktitle = "Proceedings o...
The paper presents two multilingual coreference resolution systems submitted for the CRAC Shared Task 2023. The DFKI-Adapt system achieves 61.86 F1 score on the shared task test data, outperforming the official baseline by 4.9 F1 points. This system uses a combination of different features and training settings, includ...
[ "Skachkova, Natalia", "Anikina, Tatiana", "Mokhova, Anna" ]
Multilingual coreference resolution: Adapt and Generate
crac-sharedtask.2
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-sharedtask.3.bib
https://aclanthology.org/2023.crac-sharedtask.3/
@inproceedings{pamay-arslan-etal-2023-neural, title = "Neural End-to-End Coreference Resolution using Morphological Information", author = {Pamay Arslan, Tu{\u{g}}ba and Acar, Kutay and Eryi{\u{g}}it, G{\"u}l{\c{s}}en}, editor = "{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Ogrodniczuk, Macie...
In morphologically rich languages, words consist of morphemes containing deeper information in morphology, and thus such languages may necessitate the use of morpheme-level representations as well as word representations. This study introduces a neural multilingual end-to-end coreference resolution system by incorporat...
[ "Pamay Arslan, Tu{\\u{g}}ba", "Acar, Kutay", "Eryi{\\u{g}}it, G{\\\"u}l{\\c{s}}en" ]
Neural End-to-End Coreference Resolution using Morphological Information
crac-sharedtask.3
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-sharedtask.4.bib
https://aclanthology.org/2023.crac-sharedtask.4/
@inproceedings{straka-2023-ufal, title = "{{\'U}FAL} {C}or{P}ipe at {CRAC} 2023: Larger Context Improves Multilingual Coreference Resolution", author = "Straka, Milan", editor = "{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Ogrodniczuk, Maciej", booktitle = "Proceedings of the CRAC 2023 Shared Task on ...
We present CorPipe, the winning entry to the CRAC 2023 Shared Task on Multilingual Coreference Resolution. Our system is an improved version of our earlier multilingual coreference pipeline, and it surpasses other participants by a large margin of 4.5 percent points. CorPipe first performs mention detection, followed b...
[ "Straka, Milan" ]
ÚFAL CorPipe at CRAC 2023: Larger Context Improves Multilingual Coreference Resolution
crac-sharedtask.4
2311.14391
[ "https://github.com/ufal/crac2023-corpipe" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-sharedtask.5.bib
https://aclanthology.org/2023.crac-sharedtask.5/
@inproceedings{porada-cheung-2023-mcgill, title = "{M}c{G}ill at {CRAC} 2023: Multilingual Generalization of Entity-Ranking Coreference Resolution Models", author = "Porada, Ian and Cheung, Jackie Chi Kit", editor = "{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Ogrodniczuk, Maciej", booktitle = ...
Our submission to the CRAC 2023 shared task, described herein, is an adapted entity-ranking model jointly trained on all 17 datasets spanning 12 languages. Our model outperforms the shared task baselines by a difference in F1 score of +8.47, achieving an ultimate F1 score of 65.43 and fourth place in the shared task. W...
[ "Porada, Ian", "Cheung, Jackie Chi Kit" ]
McGill at CRAC 2023: Multilingual Generalization of Entity-Ranking Coreference Resolution Models
crac-sharedtask.5
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.1.bib
https://aclanthology.org/2023.gem-1.1/
@inproceedings{theron-2023-contextualizing, title = "Contextualizing the Limits of Model {\&} Evaluation Dataset Curation on Semantic Similarity Classification Tasks", author = "Theron, Daniel", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and ...
This paper demonstrates how the limitations of pre-trained models and open evaluation datasets factor into assessing the performance of binary semantic similarity classification tasks. As (1) end-user-facing documentation around the curation of these datasets and pre-trained model training regimes is often not easily a...
[ "Theron, Daniel" ]
Contextualizing the Limits of Model & Evaluation Dataset Curation on Semantic Similarity Classification Tasks
gem-1.1
2311.04927
[ "" ]
https://huggingface.co/papers/2311.04927
1
0
0
1
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.2.bib
https://aclanthology.org/2023.gem-1.2/
@inproceedings{mendonca-etal-2023-dialogue, title = "Dialogue Quality and Emotion Annotations for Customer Support Conversations", author = "Mendonca, John and Pereira, Patr{\'\i}cia and Menezes, Miguel and Cabarr{\~a}o, Vera and Farinha, Ana C and Moniz, Helena and Lav...
Task-oriented conversational datasets often lack topic variability and linguistic diversity. However, with the advent of Large Language Models (LLMs) pretrained on extensive, multilingual and diverse text data, these limitations seem overcome. Nevertheless, their generalisability to different languages and domains in d...
[ "Mendonca, John", "Pereira, Patr{\\'\\i}cia", "Menezes, Miguel", "Cabarr{\\~a}o, Vera", "Farinha, Ana C", "Moniz, Helena", "Lavie, Alon", "Trancoso, Isabel" ]
Dialogue Quality and Emotion Annotations for Customer Support Conversations
gem-1.2
2311.13910
[ "https://github.com/johndmendonca/maia-dqe" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.3.bib
https://aclanthology.org/2023.gem-1.3/
@inproceedings{jensen-hojmark-2023-formalizing, title = "Formalizing content creation and evaluation methods for {AI}-generated social media content", author = "Jensen, Christian and H{\o}jmark, Axel", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, El...
This study explores the use of large language models (LLMs), such as ChatGPT and GPT-4, in creating high-quality text-based social media content for businesses on LinkedIn. We introduce a novel architecture incorporating external knowledge bases and a multi-step writing approach, which extracts facts from company websi...
[ "Jensen, Christian", "H{\\o}jmark, Axel" ]
Formalizing content creation and evaluation methods for AI-generated social media content
gem-1.3
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.4.bib
https://aclanthology.org/2023.gem-1.4/
@inproceedings{mehri-shwartz-2023-automatic, title = "Automatic Evaluation of Generative Models with Instruction Tuning", author = "Mehri, Shuhaib and Shwartz, Vered", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh...
Automatic evaluation of natural language generation has long been an elusive goal in NLP. A recent paradigm fine-tunes pre-trained language models to emulate human judgements for a particular task and evaluation criterion. Inspired by the generalization ability of instruction-tuned models, we propose a learned metric b...
[ "Mehri, Shuhaib", "Shwartz, Vered" ]
Automatic Evaluation of Generative Models with Instruction Tuning
gem-1.4
2310.20072
[ "https://github.com/shuhaibm/heap" ]
https://huggingface.co/papers/2310.20072
0
0
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.5.bib
https://aclanthology.org/2023.gem-1.5/
@inproceedings{du-etal-2023-effective, title = "Effective Proxy for Human Labeling: Ensemble Disagreement Scores in Large Language Models for Industrial {NLP}", author = "Du, Wei and Advani, Laksh and Gambhir, Yashmeet and Perry, Daniel and Shiralkar, Prashant and Xing, Zhengz...
Large language models (LLMs) have demonstrated significant capability to generalize across a large number of NLP tasks. For industry applications, it is imperative to assess the performance of the LLM on unlabeled production data from time to time to validate for a real-world setting. Human labeling to assess model err...
[ "Du, Wei", "Advani, Laksh", "Gambhir, Yashmeet", "Perry, Daniel", "Shiralkar, Prashant", "Xing, Zhengzheng", "Colak, Aaron" ]
Effective Proxy for Human Labeling: Ensemble Disagreement Scores in Large Language Models for Industrial NLP
gem-1.5
2309.05619
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.6.bib
https://aclanthology.org/2023.gem-1.6/
@inproceedings{oneil-etal-2023-automatic, title = "Automatic Reflection Generation for Peer-to-Peer Counseling", author = "O{'}neil, Emma and Sedoc, Jo{\~a}o and Yang, Diyi and Zhu, Haiyi and Ungar, Lyle", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\...
Online peer counseling platforms enable conversations between millions of people seeking and offering mental health support. Among counseling skills, reflective listening, i.e., capturing and returning to the client something the client has said, is important for positive therapeutic outcomes. We introduce a reflection...
[ "O{'}neil, Emma", "Sedoc, Jo{\\~a}o", "Yang, Diyi", "Zhu, Haiyi", "Ungar, Lyle" ]
Automatic Reflection Generation for Peer-to-Peer Counseling
gem-1.6
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.7.bib
https://aclanthology.org/2023.gem-1.7/
@inproceedings{harvill-etal-2023-one-shot, title = "One-Shot and Few-Shot Exemplification Modeling", author = "Harvill, John and Yoon, Hee Suk and Yoon, Eunseop and Hasegawa-Johnson, Mark and Yoo, Chang", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~...
Exemplification modeling is a task where the goal is to produce a viable example sentence that uses a target word with a target definition. The task is non-trivial for polysemous words, and previous works have only explored settings where ample labeled training data is available. In this paper, we demonstrate that exem...
[ "Harvill, John", "Yoon, Hee Suk", "Yoon, Eunseop", "Hasegawa-Johnson, Mark", "Yoo, Chang" ]
One-Shot and Few-Shot Exemplification Modeling
gem-1.7
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.8.bib
https://aclanthology.org/2023.gem-1.8/
@inproceedings{zhou-etal-2023-leveraging, title = "Leveraging Large Language Models for Enhanced Product Descriptions in e{C}ommerce", author = "Zhou, Jianghong and Liu, Bo and Acharya, Jhalak and Hong, Yao and Lee, Kuang-Chih and Wen, Musen", editor = "Gehrmann, Sebastian...
In the dynamic field of eCommerce, the quality and comprehensiveness of product descriptions are pivotal for enhancing search visibility and customer engagement. Effective product descriptions can address the {`}cold start{'} problem, align with market trends, and ultimately lead to increased click-through rates. Tradi...
[ "Zhou, Jianghong", "Liu, Bo", "Acharya, Jhalak", "Hong, Yao", "Lee, Kuang-Chih", "Wen, Musen" ]
Leveraging Large Language Models for Enhanced Product Descriptions in eCommerce
gem-1.8
2310.18357
[ "" ]
https://huggingface.co/papers/2310.18357
0
0
0
5
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.9.bib
https://aclanthology.org/2023.gem-1.9/
@inproceedings{amouyal-etal-2023-qampari, title = "{QAMPARI}: A Benchmark for Open-domain Questions with Many Answers", author = "Amouyal, Samuel and Wolfson, Tomer and Rubin, Ohad and Yoran, Ori and Herzig, Jonathan and Berant, Jonathan", editor = "Gehrmann, Sebastian an...
Existing benchmarks for open-domain question answering (ODQA) typically focus on questions whose answers are all in a single paragraph. By contrast, many natural questions, such as {``}What players were drafted by the Brooklyn Nets?{''} have a long list of answers extracted from multiple paragraphs. Answering such ques...
[ "Amouyal, Samuel", "Wolfson, Tomer", "Rubin, Ohad", "Yoran, Ori", "Herzig, Jonathan", "Berant, Jonathan" ]
QAMPARI: A Benchmark for Open-domain Questions with Many Answers
gem-1.9
2205.12665
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.10.bib
https://aclanthology.org/2023.gem-1.10/
@inproceedings{kour-etal-2023-unveiling, title = "Unveiling Safety Vulnerabilities of Large Language Models", author = "Kour, George and Zalmanovici, Marcel and Zwerdling, Naama and Goldbraich, Esther and Fandina, Ora and Anaby Tavor, Ateret and Raz, Orna and Far...
As large language models become more prevalent, their possible harmful or inappropriate responses are a cause for concern. This paper introduces a unique dataset containing adversarial examples in the form of questions, we call AttaQ, designed to provoke such harmful or inappropriate responses. We assess the efficacy o...
[ "Kour, George", "Zalmanovici, Marcel", "Zwerdling, Naama", "Goldbraich, Esther", "F", "ina, Ora", "Anaby Tavor, Ateret", "Raz, Orna", "Farchi, Eitan" ]
Unveiling Safety Vulnerabilities of Large Language Models
gem-1.10
2311.04124
[ "" ]
https://huggingface.co/papers/2311.04124
4
6
0
8
[]
[ "ibm/AttaQ" ]
[]
1
Poster
https://aclanthology.org/2023.gem-1.11.bib
https://aclanthology.org/2023.gem-1.11/
@inproceedings{mallick-etal-2023-adapting, title = "Adapting Pre-trained Generative Models for Extractive Question Answering", author = "Mallick, Prabir and Nayak, Tapas and Bhattacharya, Indrajit", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark...
Pre-trained Generative models such as BART, T5, etc. have gained prominence as a preferred method for text generation in various natural language processing tasks, including abstractive long-form question answering (QA) and summarization. However, the potential of generative models in extractive QA tasks, where discrim...
[ "Mallick, Prabir", "Nayak, Tapas", "Bhattacharya, Indrajit" ]
Adapting Pre-trained Generative Models for Extractive Question Answering
gem-1.11
2311.02961
[ "" ]
https://huggingface.co/papers/2311.02961
0
1
0
3
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.12.bib
https://aclanthology.org/2023.gem-1.12/
@inproceedings{rabinovich-etal-2023-predicting, title = "Predicting Question-Answering Performance of Large Language Models through Semantic Consistency", author = "Rabinovich, Ella and Ackerman, Samuel and Raz, Orna and Farchi, Eitan and Anaby Tavor, Ateret", editor = "Gehrmann,...
Semantic consistency of a language model is broadly defined as the model{'}s ability to produce semantically-equivalent outputs, given semantically-equivalent inputs. We address the task of assessing question-answering (QA) semantic consistency of contemporary large language models (LLMs) by manually creating a benchma...
[ "Rabinovich, Ella", "Ackerman, Samuel", "Raz, Orna", "Farchi, Eitan", "Anaby Tavor, Ateret" ]
Predicting Question-Answering Performance of Large Language Models through Semantic Consistency
gem-1.12
2311.01152
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.13.bib
https://aclanthology.org/2023.gem-1.13/
@inproceedings{yu-etal-2023-towards, title = "Towards Effective Long-Form {QA} with Evidence Augmentation", author = "Yu, Mengxia and Rosenthal, Sara and Bornea, Mihaela and Sil, Avi", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Eliza...
In this study, we focus on the challenge of improving Long-form Question Answering (LFQA) by extracting and effectively utilizing knowledge from a large set of retrieved passages. We first demonstrate the importance of accurate evidence retrieval for LFQA, showing that optimal extracted knowledge from passages signific...
[ "Yu, Mengxia", "Rosenthal, Sara", "Bornea, Mihaela", "Sil, Avi" ]
Towards Effective Long-Form QA with Evidence Augmentation
gem-1.13
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.14.bib
https://aclanthology.org/2023.gem-1.14/
@inproceedings{wang-sha-2023-harnessing, title = "Harnessing the Plug-and-Play Controller by Prompting", author = "Wang, Hao and Sha, Lei", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi ...
Controllable text generation is a growing field within natural language generation (NLG) that focuses on producing text that meets specific constraints in real-world applications. Previous approaches, such as plug-and-play controllers (PPCs), aimed to steer the properties of generated text in a flexible manner. However...
[ "Wang, Hao", "Sha, Lei" ]
Harnessing the Plug-and-Play Controller by Prompting
gem-1.14
2402.04160
[ "" ]
https://huggingface.co/papers/2402.04160
0
1
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.15.bib
https://aclanthology.org/2023.gem-1.15/
@inproceedings{kwak-etal-2023-context, title = "Context and Literacy Aware Learnable Metric for Text Simplification", author = "Kwak, Jeongwon and Park, Hyeryun and Kim, Kyungmo and Choi, Jinwook", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and ...
Automatic evaluation of text simplification is important; but assessing its transformation into simpler sentences can be challenging for various reasons. However, the most commonly used metric in text simplification, SARI, fails to capture the difficulty of generating words that are not present in the references, regar...
[ "Kwak, Jeongwon", "Park, Hyeryun", "Kim, Kyungmo", "Choi, Jinwook" ]
Context and Literacy Aware Learnable Metric for Text Simplification
gem-1.15
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.16.bib
https://aclanthology.org/2023.gem-1.16/
@inproceedings{abdullin-etal-2023-synthetic, title = "Synthetic Dialogue Dataset Generation using {LLM} Agents", author = "Abdullin, Yelaman and Molla, Diego and Ofoghi, Bahadorreza and Yearwood, John and Li, Qingyang", editor = "Gehrmann, Sebastian and Wang, Alex and ...
Linear programming (LP) problems are pervasive in real-life applications. However, despite their apparent simplicity, an untrained user may find it difficult to determine the linear model of their specific problem. We envisage the creation of a goal-oriented conversational agent that will engage in conversation with th...
[ "Abdullin, Yelaman", "Molla, Diego", "Ofoghi, Bahadorreza", "Yearwood, John", "Li, Qingyang" ]
Synthetic Dialogue Dataset Generation using LLM Agents
gem-1.16
2401.17461
[ "https://github.com/eabdullin/optimouse-quest" ]
https://huggingface.co/papers/2401.17461
1
1
0
5
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.17.bib
https://aclanthology.org/2023.gem-1.17/
@inproceedings{lee-etal-2023-empirical, title = "An Empirical {B}ayes Framework for Open-Domain Dialogue Generation", author = "Lee, Jing Yang and Lee, Kong Aik and Gan, Woon Seng", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and ...
To engage human users in meaningful conversation, open-domain dialogue agents are required to generate diverse and contextually coherent dialogue. Despite recent advancements, which can be attributed to the usage of pretrained language models, the generation of diverse and coherent dialogue remains an open research pro...
[ "Lee, Jing Yang", "Lee, Kong Aik", "Gan, Woon Seng" ]
An Empirical Bayes Framework for Open-Domain Dialogue Generation
gem-1.17
2311.10945
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.18.bib
https://aclanthology.org/2023.gem-1.18/
@inproceedings{imperial-tayyar-madabushi-2023-flesch, title = "Flesch or Fumble? Evaluating Readability Standard Alignment of Instruction-Tuned Language Models", author = "Imperial, Joseph Marvin and Tayyar Madabushi, Harish", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\...
Readability metrics and standards such as Flesch Kincaid Grade Level (FKGL) and the Common European Framework of Reference for Languages (CEFR) exist to guide teachers and educators to properly assess the complexity of educational materials before administering them for classroom use. In this study, we select a diverse...
[ "Imperial, Joseph Marvin", "Tayyar Madabushi, Harish" ]
Flesch or Fumble? Evaluating Readability Standard Alignment of Instruction-Tuned Language Models
gem-1.18
2309.05454
[ "https://github.com/imperialite/readability-standard-alignment" ]
https://huggingface.co/papers/2309.05454
1
0
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.19.bib
https://aclanthology.org/2023.gem-1.19/
@inproceedings{mcdanel-liu-2023-chatgpt, title = "{C}hat{GPT} as a {J}ava Decompiler", author = "Mcdanel, Bradley and Liu, Zhanhao", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi...
We propose a novel approach using instruction-tuned large language models (LLMs), such as ChatGPT, to automatically decompile entire Java classes. Our method relies only on a textual representation of the Java bytecode and corresponding unit tests generated from the bytecode. While no additional domain knowledge or fin...
[ "Mcdanel, Bradley", "Liu, Zhanhao" ]
ChatGPT as a Java Decompiler
gem-1.19
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.20.bib
https://aclanthology.org/2023.gem-1.20/
@inproceedings{demeter-etal-2023-multi, title = "Multi-domain Summarization from Leaderboards to Practice: Re-examining Automatic and Human Evaluation", author = "Demeter, David and Agarwal, Oshin and Ben Igeri, Simon and Sterbentz, Marko and Molino, Neil and Conroy, John and...
Existing literature does not give much guidance on how to build the best possible multi-domain summarization model from existing components. We present an extensive evaluation of popular pre-trained models on a wide range of datasets to inform the selection of both the model and the training data for robust summarizati...
[ "Demeter, David", "Agarwal, Oshin", "Ben Igeri, Simon", "Sterbentz, Marko", "Molino, Neil", "Conroy, John", "Nenkova, Ani" ]
Multi-domain Summarization from Leaderboards to Practice: Re-examining Automatic and Human Evaluation
gem-1.20
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.21.bib
https://aclanthology.org/2023.gem-1.21/
@inproceedings{barriere-etal-2023-targeted, title = "Targeted Image Data Augmentation Increases Basic Skills Captioning Robustness", author = "Barriere, Valentin and Del Rio, Felipe and Carvallo, Andres and Aspillaga, Carlos and Herrera-Berg, Eugenio and Buc, Cristian", ed...
Artificial neural networks typically struggle in generalizing to out-of-context examples. One reason for this limitation is caused by having datasets that incorporate only partial information regarding the potential correlational structure of the world. In this work, we propose TIDA (Targeted Image-editing Data Augment...
[ "Barriere, Valentin", "Del Rio, Felipe", "Carvallo, Andres", "Aspillaga, Carlos", "Herrera-Berg, Eugenio", "Buc, Cristian" ]
Targeted Image Data Augmentation Increases Basic Skills Captioning Robustness
gem-1.21
2309.15991
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.22.bib
https://aclanthology.org/2023.gem-1.22/
@inproceedings{ohmer-etal-2023-separating, title = "Separating form and meaning: Using self-consistency to quantify task understanding across multiple senses", author = "Ohmer, Xenia and Bruni, Elia and Hupkes, Dieuwke", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{...
At the staggering pace with which the capabilities of large language models (LLMs) are increasing, creating future-proof evaluation sets to assess their understanding becomes more and more challenging. In this paper, we propose a novel paradigm for evaluating LLMs which leverages the idea that correct world understandi...
[ "Ohmer, Xenia", "Bruni, Elia", "Hupkes, Dieuwke" ]
Separating form and meaning: Using self-consistency to quantify task understanding across multiple senses
gem-1.22
2305.11662
[ "https://github.com/xeniaohmer/multisense_consistency" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.23.bib
https://aclanthology.org/2023.gem-1.23/
@inproceedings{gatto-etal-2023-text, title = "Text Encoders Lack Knowledge: Leveraging Generative {LLM}s for Domain-Specific Semantic Textual Similarity", author = "Gatto, Joseph and Sharif, Omar and Seegmiller, Parker and Bohlman, Philip and Preum, Sarah", editor = "Gehrmann, Se...
Amidst the sharp rise in the evaluation of large language models (LLMs) on various tasks, we find that semantic textual similarity (STS) has been under-explored. In this study, we show that STS can be cast as a text generation problem while maintaining strong performance on multiple STS benchmarks. Additionally, we sho...
[ "Gatto, Joseph", "Sharif, Omar", "Seegmiller, Parker", "Bohlman, Philip", "Preum, Sarah" ]
Text Encoders Lack Knowledge: Leveraging Generative LLMs for Domain-Specific Semantic Textual Similarity
gem-1.23
2309.06541
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.24.bib
https://aclanthology.org/2023.gem-1.24/
@inproceedings{sasse-etal-2023-burst, title = "To Burst or Not to Burst: Generating and Quantifying Improbable Text", author = "Sasse, Kuleen and Sarioglu Kayi, Efsun and Barham, Samuel and Staley, Edward", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o ...
While large language models (LLMs) are extremely capable at text generation, their outputs are still distinguishable from human-authored text. We explore this separation across many metrics over text, many sampling techniques, many types of text data, and across two popular LLMs, LLaMA and Vicuna. Along the way, we int...
[ "Sasse, Kuleen", "Sarioglu Kayi, Efsun", "Barham, Samuel", "Staley, Edward" ]
To Burst or Not to Burst: Generating and Quantifying Improbable Text
gem-1.24
2401.15476
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.25.bib
https://aclanthology.org/2023.gem-1.25/
@inproceedings{fu-etal-2023-large, title = "Are Large Language Models Reliable Judges? A Study on the Factuality Evaluation Capabilities of {LLM}s", author = "Fu, Xue-Yong and Laskar, Md Tahmid Rahman and Chen, Cheng and Tn, Shashi Bhushan", editor = "Gehrmann, Sebastian and Wang...
In recent years, large language models (LLMs) have drawn significant attention due to their impressive emergent capabilities that were not observed in earlier language models. One emerging area where LLMs have been widely used in recent times is the utilization of LLMs as the evaluator of the texts generated by various...
[ "Fu, Xue-Yong", "Laskar, Md Tahmid Rahman", "Chen, Cheng", "Tn, Shashi Bhushan" ]
Are Large Language Models Reliable Judges? A Study on the Factuality Evaluation Capabilities of LLMs
gem-1.25
2311.00681
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.26.bib
https://aclanthology.org/2023.gem-1.26/
@inproceedings{roy-basu-2023-rankaug, title = "{R}ank{A}ug: Augmented data ranking for text classification", author = "Roy, Tiasa and Basu, Priyam", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu,...
Research on data generation and augmentation has been focused majorly around enhancing generation models, leaving a notable gap in the exploration and refinement of methods for evaluating synthetic data. There are several text similarity metrics within the context of generated data filtering which can impact the perfor...
[ "Roy, Tiasa", "Basu, Priyam" ]
RankAug: Augmented data ranking for text classification
gem-1.26
2311.04535
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.27.bib
https://aclanthology.org/2023.gem-1.27/
@inproceedings{caswell-etal-2023-separating, title = "Separating the Wheat from the Chaff with {BREAD}: An open-source benchmark and metrics to detect redundancy in text", author = "Caswell, Isaac and Wang, Lisa and Papadimitriou, Isabel", editor = "Gehrmann, Sebastian and Wang, Alex a...
Data quality is a problem that perpetually resurfaces throughout the field of NLP, regardless of task, domain, or architecture, and remains especially severe for lower-resource languages. A typical and insidious issue, affecting both training data and model output, is data that is repetitive and dominated by linguistic...
[ "Caswell, Isaac", "Wang, Lisa", "Papadimitriou, Isabel" ]
Separating the Wheat from the Chaff with BREAD: An open-source benchmark and metrics to detect redundancy in text
gem-1.27
2311.06440
[ "https://github.com/toizzy/bread" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.28.bib
https://aclanthology.org/2023.gem-1.28/
@inproceedings{boubdir-etal-2023-elo, title = "Elo Uncovered: Robustness and Best Practices in Language Model Evaluation", author = "Boubdir, Meriem and Kim, Edward and Ermis, Beyza and Hooker, Sara and Fadaee, Marzieh", editor = "Gehrmann, Sebastian and Wang, Alex and ...
In Natural Language Processing (NLP), the Elo rating system, well-established for ranking dynamic competitors in games like chess, has seen increasing adoption for evaluating Large Language Models (LLMs) through {``}A vs B{''} paired comparisons. However, while popular, the system{'}s suitability for assessing entities...
[ "Boubdir, Meriem", "Kim, Edward", "Ermis, Beyza", "Hooker, Sara", "Fadaee, Marzieh" ]
Elo Uncovered: Robustness and Best Practices in Language Model Evaluation
gem-1.28
2311.17295
[ "" ]
https://huggingface.co/papers/2311.17295
4
0
0
5
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.29.bib
https://aclanthology.org/2023.gem-1.29/
@inproceedings{lotfi-etal-2023-personalitychat, title = "{P}ersonality{C}hat: Conversation Distillation for Personalized Dialog Modeling with Facts and Traits", author = "Lotfi, Ehsan and De Bruyn, Maxime and Buhmann, Jeska and Daelemans, Walter", editor = "Gehrmann, Sebastian and ...
The new wave of Large Language Models (LLM) has offered an efficient tool to curate sizeable conversational datasets. So far studies have mainly focused on task-oriented or generic open-domain dialogs, and have not fully explored the ability of LLMs in following complicated prompts. In this work, we focus on personaliz...
[ "Lotfi, Ehsan", "De Bruyn, Maxime", "Buhmann, Jeska", "Daelemans, Walter" ]
PersonalityChat: Conversation Distillation for Personalized Dialog Modeling with Facts and Traits
gem-1.29
2401.07363
[ "https://github.com/elotfi/personalitychat" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.30.bib
https://aclanthology.org/2023.gem-1.30/
@inproceedings{chanthran-etal-2023-well, title = "How well {C}hat{GPT} understand {M}alaysian {E}nglish? An Evaluation on Named Entity Recognition and Relation Extraction", author = "Chanthran, Mohanraj and Soon, Lay-Ki and Fang, Ong Huey and Selvaretnam, Bhawani", editor = "Gehrmann, S...
Recently, ChatGPT has attracted a lot of interest from both researchers and the general public. While the performance of ChatGPT in Named Entity Recognition and Relation Extraction from Standard English texts is satisfactory, it remains to be seen if it can perform similarly for Malaysian English. Malaysian English is ...
[ "Chanthran, Mohanraj", "Soon, Lay-Ki", "Fang, Ong Huey", "Selvaretnam, Bhawani" ]
How well ChatGPT understand Malaysian English? An Evaluation on Named Entity Recognition and Relation Extraction
gem-1.30
2311.11583
[ "https://github.com/mohanraj-nlp/chatgpt-malaysian-english" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.31.bib
https://aclanthology.org/2023.gem-1.31/
@inproceedings{tikhonov-yamshchikov-2023-post, title = "Post {T}uring: Mapping the landscape of {LLM} Evaluation", author = "Tikhonov, Alexey and Yamshchikov, Ivan P.", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustub...
In the rapidly evolving landscape of Large Language Models (LLMs), introduction of well-defined and standardized evaluation methodologies remains a crucial challenge. This paper traces the historical trajectory of LLM evaluations, from the foundational questions posed by Alan Turing to the modern era of AI research. We...
[ "Tikhonov, Alexey", "Yamshchikov, Ivan P." ]
Post Turing: Mapping the landscape of LLM Evaluation
gem-1.31
2311.02049
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.32.bib
https://aclanthology.org/2023.gem-1.32/
@inproceedings{abburi-etal-2023-simple, title = "A Simple yet Efficient Ensemble Approach for {AI}-generated Text Detection", author = "Abburi, Harika and Roy, Kalyani and Suesserman, Michael and Pudota, Nirmala and Veeramani, Balaji and Bowen, Edward and Bhattacharya, ...
Recent Large Language Models (LLMs) have demonstrated remarkable capabilities in generating text that closely resembles human writing across wide range of styles and genres. However, such capabilities are prone to potential abuse, such as fake news generation, spam email creation, and misuse in academic assignments. He...
[ "Abburi, Harika", "Roy, Kalyani", "Suesserman, Michael", "Pudota, Nirmala", "Veeramani, Balaji", "Bowen, Edward", "Bhattacharya, Sanmitra" ]
A Simple yet Efficient Ensemble Approach for AI-generated Text Detection
gem-1.32
2311.03084
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.1.bib
https://aclanthology.org/2023.genbench-1.1/
@inproceedings{saini-etal-2023-90, title = "90{\%} F1 Score in Relation Triple Extraction: Is it Real?", author = "Saini, Pratik and Pal, Samiran and Nayak, Tapas and Bhattacharya, Indrajit", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and ...
Extracting relational triples from text is a crucial task for constructing knowledge bases. Recent advancements in joint entity and relation extraction models have demonstrated remarkable F1 scores ({\mbox{$\geq$}} 90{\%}) in accurately extracting relational triples from free text. However, these models have been evalu...
[ "Saini, Pratik", "Pal, Samiran", "Nayak, Tapas", "Bhattacharya, Indrajit" ]
90% F1 Score in Relation Triple Extraction: Is it Real?
genbench-1.1
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.2.bib
https://aclanthology.org/2023.genbench-1.2/
@inproceedings{diera-etal-2023-gencodesearchnet, title = "{G}en{C}ode{S}earch{N}et: A Benchmark Test Suite for Evaluating Generalization in Programming Language Understanding", author = "Diera, Andor and Dahou, Abdelhalim and Galke, Lukas and Karl, Fabian and Sihler, Florian and ...
Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. These capabilities are heavily influen...
[ "Diera, Andor", "Dahou, Abdelhalim", "Galke, Lukas", "Karl, Fabian", "Sihler, Florian", "Scherp, Ansgar" ]
GenCodeSearchNet: A Benchmark Test Suite for Evaluating Generalization in Programming Language Understanding
genbench-1.2
2311.09707
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.3.bib
https://aclanthology.org/2023.genbench-1.3/
@inproceedings{arora-etal-2023-adapt, title = "Adapt and Decompose: Efficient Generalization of Text-to-{SQL} via Domain Adapted Least-To-Most Prompting", author = "Arora, Aseem and Bhaisaheb, Shabbirhussain and Nigam, Harshit and Patwardhan, Manasi and Vig, Lovekesh and Shrof...
Cross-domain and cross-compositional generalization of Text-to-SQL semantic parsing is a challenging task. Existing Large Language Model (LLM) based solutions rely on inference-time retrieval of few-shot exemplars from the training set to synthesize a run-time prompt for each Natural Language (NL) test query. In contra...
[ "Arora, Aseem", "Bhaisaheb, Shabbirhussain", "Nigam, Harshit", "Patwardhan, Manasi", "Vig, Lovekesh", "Shroff, Gautam" ]
Adapt and Decompose: Efficient Generalization of Text-to-SQL via Domain Adapted Least-To-Most Prompting
genbench-1.3
2308.02582
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.4.bib
https://aclanthology.org/2023.genbench-1.4/
@inproceedings{javier-vazquez-martinez-etal-2023-evaluating, title = "Evaluating Neural Language Models as Cognitive Models of Language Acquisition", author = "V{\'a}zquez Mart{\'\i}nez, H{\'e}ctor and Lea Heuser, Annika and Yang, Charles and Kodner, Jordan", editor = "Hupkes, Dieuwke ...
The success of neural language models (LMs) on many technological tasks has brought about their potential relevance as scientific theories of language despite some clear differences between LM training and child language acquisition. In this paper we argue that some of the most prominent benchmarks for evaluating the s...
[ "V{\\'a}zquez Mart{\\'\\i}nez, H{\\'e}ctor", "Lea Heuser, Annika", "Yang, Charles", "Kodner, Jordan" ]
Evaluating Neural Language Models as Cognitive Models of Language Acquisition
genbench-1.4
2310.20093
[ "https://github.com/hjvm/benchmarking_acquisition" ]
https://huggingface.co/papers/2310.20093
0
0
0
4
[]
[]
[]
1
Poster
https://aclanthology.org/2023.genbench-1.5.bib
https://aclanthology.org/2023.genbench-1.5/
@inproceedings{mondal-etal-2023-robust, title = "Robust Code Summarization", author = "Mondal, Debanjan and Lodha, Abhilasha and Sahoo, Ankita and Kumari, Beena", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and K...
This paper delves into the intricacies of code summarization using advanced transformer-based language models. Through empirical studies, we evaluate the efficacy of code summarization by altering function and variable names to explore whether models truly understand code semantics or merely rely on textual cues. We ha...
[ "Mondal, Debanjan", "Lodha, Abhilasha", "Sahoo, Ankita", "Kumari, Beena" ]
Robust Code Summarization
genbench-1.5
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.6.bib
https://aclanthology.org/2023.genbench-1.6/
@inproceedings{stepanova-ross-2023-temporal, title = "Temporal Generalizability in Multimodal Misinformation Detection", author = {Stepanova, Nataliya and Ross, Bj{\"o}rn}, editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazem...
Misinformation detection models degrade in performance over time, but the precise causes of this remain under-researched, in particular for multimodal models. We present experiments investigating the impact of temporal shift on performance of multimodal automatic misinformation detection classifiers. Working with the r...
[ "Stepanova, Nataliya", "Ross, Bj{\\\"o}rn" ]
Temporal Generalizability in Multimodal Misinformation Detection
genbench-1.6
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.7.bib
https://aclanthology.org/2023.genbench-1.7/
@inproceedings{ginn-palmer-2023-robust, title = "Robust Generalization Strategies for Morpheme Glossing in an Endangered Language Documentation Context", author = "Ginn, Michael and Palmer, Alexis", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha...
Generalization is of particular importance in resource-constrained settings, where the available training data may represent only a small fraction of the distribution of possible texts. We investigate the ability of morpheme labeling models to generalize by evaluating their performance on unseen genres of text, and we ...
[ "Ginn, Michael", "Palmer, Alexis" ]
Robust Generalization Strategies for Morpheme Glossing in an Endangered Language Documentation Context
genbench-1.7
2311.02777
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.8.bib
https://aclanthology.org/2023.genbench-1.8/
@inproceedings{hung-etal-2023-walking, title = "Walking a Tightrope {--} Evaluating Large Language Models in High-Risk Domains", author = "Hung, Chia-Chien and Ben Rim, Wiem and Frost, Lindsay and Bruckner, Lars and Lawrence, Carolin", editor = "Hupkes, Dieuwke and Dankers...
High-risk domains pose unique challenges that require language models to provide accurate and safe responses. Despite the great success of large language models (LLMs), such as ChatGPT and its variants, their performance in high-risk domains remains unclear. Our study delves into an in-depth analysis of the performance...
[ "Hung, Chia-Chien", "Ben Rim, Wiem", "Frost, Lindsay", "Bruckner, Lars", "Lawrence, Carolin" ]
Walking a Tightrope – Evaluating Large Language Models in High-Risk Domains
genbench-1.8
2311.14966
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.9.bib
https://aclanthology.org/2023.genbench-1.9/
@inproceedings{zufle-etal-2023-latent, title = "Latent Feature-based Data Splits to Improve Generalisation Evaluation: A Hate Speech Detection Case Study", author = {Z{\"u}fle, Maike and Dankers, Verna and Titov, Ivan}, editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, K...
With the ever-growing presence of social media platforms comes the increased spread of harmful content and the need for robust hate speech detection systems. Such systems easily overfit to specific targets and keywords, and evaluating them without considering distribution shifts that might occur between train and test ...
[ "Z{\\\"u}fle, Maike", "Dankers, Verna", "Titov, Ivan" ]
Latent Feature-based Data Splits to Improve Generalisation Evaluation: A Hate Speech Detection Case Study
genbench-1.9
2311.10236
[ "https://github.com/maikezuefle/latent-feature-splits" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.10.bib
https://aclanthology.org/2023.genbench-1.10/
@inproceedings{kamali-kordjamshidi-2023-syntax, title = "Syntax-Guided Transformers: Elevating Compositional Generalization and Grounding in Multimodal Environments", author = "Kamali, Danial and Kordjamshidi, Parisa", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaa...
Compositional generalization, the ability of intelligent models to extrapolate understanding of components to novel compositions, is a fundamental yet challenging facet in AI research, especially within multimodal environments. In this work, we address this challenge by exploiting the syntactic structure of language to...
[ "Kamali, Danial", "Kordjamshidi, Parisa" ]
Syntax-Guided Transformers: Elevating Compositional Generalization and Grounding in Multimodal Environments
genbench-1.10
2311.04364
[ "" ]
https://huggingface.co/papers/2311.04364
1
0
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.genbench-1.11.bib
https://aclanthology.org/2023.genbench-1.11/
@inproceedings{reymond-steinert-threlkeld-2023-mscan, title = "m{SCAN}: A Dataset for Multilingual Compositional Generalisation Evaluation", author = "Reymond, Am{\'e}lie and Steinert-Threlkeld, Shane", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and S...
Language models achieve remarkable results on a variety of tasks, yet still struggle on compositional generalisation benchmarks. The majority of these benchmarks evaluate performance in English only, leaving us with the question of whether these results generalise to other languages. As an initial step to answering thi...
[ "Reymond, Am{\\'e}lie", "Steinert-Threlkeld, Shane" ]
mSCAN: A Dataset for Multilingual Compositional Generalisation Evaluation
genbench-1.11
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.12.bib
https://aclanthology.org/2023.genbench-1.12/
@inproceedings{wilson-frank-2023-inductive, title = "Inductive Bias Is in the Eye of the Beholder", author = "Wilson, Michael and Frank, Robert", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and ...
Due to the finite nature of any evidence used in learning, systematic generalization is crucially reliant on the presence of inductive bias (Mitchell, 1980). We examine inductive biases in different types of sequence-to-sequence neural network models, including CNNs, LSTMs (with and without attention), and transformers...
[ "Wilson, Michael", "Frank, Robert" ]
Inductive Bias Is in the Eye of the Beholder
genbench-1.12
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.13.bib
https://aclanthology.org/2023.genbench-1.13/
@inproceedings{merlo-etal-2023-blackbird, title = "Blackbird Language Matrices Tasks for Generalization", author = "Merlo, Paola and Jiang, Chunyang and Samo, Giuseppe and Nastase, Vivi", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Si...
To develop a system with near-human language capabilities, we need to understand current systems{'} generalisation and compositional abilities. We approach this by generating compositional, structured data, inspired from visual intelligence tests, that depend on the problem-solvers being able to disentangle objects and...
[ "Merlo, Paola", "Jiang, Chunyang", "Samo, Giuseppe", "Nastase, Vivi" ]
Blackbird Language Matrices Tasks for Generalization
genbench-1.13
2306.11444
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.14.bib
https://aclanthology.org/2023.genbench-1.14/
@inproceedings{milios-etal-2023-context, title = "In-Context Learning for Text Classification with Many Labels", author = "Milios, Aristides and Reddy, Siva and Bahdanau, Dzmitry", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv ...
In-context learning (ICL) using large language models for tasks with many labels is challenging due to the limited context window, which makes it difficult to fit a sufficient number of examples in the prompt. In this paper, we use a pre-trained dense retrieval model to bypass this limitation, giving the model only a p...
[ "Milios, Aristides", "Reddy, Siva", "Bahdanau, Dzmitry" ]
In-Context Learning for Text Classification with Many Labels
genbench-1.14
2309.10954
[ "" ]
https://huggingface.co/papers/2309.10954
0
0
0
3
[]
[]
[]
1
Poster
https://aclanthology.org/2023.genbench-1.15.bib
https://aclanthology.org/2023.genbench-1.15/
@inproceedings{zhifei-wang-steinert-threlkeld-2023-gqg, title = "{GQG}: Generalized Quantifier Generalization - A Dataset for Evaluating Quantifier Semantics Understanding in Language Models", author = "Zhifei Wang, Leroy and Steinert-Threlkeld, Shane", editor = "Hupkes, Dieuwke and Dankers, V...
We present a new dataset consisting of various quantifier expressions to evaluate the generalization abilities of language models. The dataset contains 18,360 prompts encompassing diverse quantifiers, forming the basis of a new framework for assessing semantic understanding in this domain. We test the effectiveness of ...
[ "Zhifei Wang, Leroy", "Steinert-Threlkeld, Shane" ]
GQG: Generalized Quantifier Generalization - A Dataset for Evaluating Quantifier Semantics Understanding in Language Models
genbench-1.15
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.16.bib
https://aclanthology.org/2023.genbench-1.16/
@inproceedings{pengpun-etal-2023-cross, title = "Cross-Lingual Data Augmentation For {T}hai Question-Answering", author = "Pengpun, Parinthapat and Udomcharoenchaikit, Can and Buaphet, Weerayut and Limkonchotiwat, Peerat", editor = "Hupkes, Dieuwke and Dankers, Verna and B...
This paper presents an innovative data augmentation framework with data quality control designed to enhance the robustness of Question Answering (QA) models in low-resource languages, particularly Thai. Recognizing the challenges posed by the scarcity and quality of training data, we leverage data augmentation techniqu...
[ "Pengpun, Parinthapat", "Udomcharoenchaikit, Can", "Buaphet, Weerayut", "Limkonchotiwat, Peerat" ]
Cross-Lingual Data Augmentation For Thai Question-Answering
genbench-1.16
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.17.bib
https://aclanthology.org/2023.genbench-1.17/
@inproceedings{moisio-etal-2023-using, title = "On using distribution-based compositionality assessment to evaluate compositional generalisation in machine translation", author = "Moisio, Anssi and Creutz, Mathias and Kurimo, Mikko", editor = "Hupkes, Dieuwke and Dankers, Verna and ...
Compositional generalisation (CG), in NLP and in machine learning more generally, has been assessed mostly using artificial datasets. It is important to develop benchmarks to assess CG also in real-world natural language tasks in order to understand the abilities and limitations of systems deployed in the wild. To this...
[ "Moisio, Anssi", "Creutz, Mathias", "Kurimo, Mikko" ]
On using distribution-based compositionality assessment to evaluate compositional generalisation in machine translation
genbench-1.17
2311.08249
[ "https://github.com/aalto-speech/dbca" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.18.bib
https://aclanthology.org/2023.genbench-1.18/
@inproceedings{somov-tutubalina-2023-shifted, title = "Shifted {PAUQ}: Distribution shift in text-to-{SQL}", author = "Somov, Oleg and Tutubalina, Elena", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein...
Semantic parsing plays a pivotal role in advancing the accessibility of human-computer interaction on a large scale. Spider, a widely recognized dataset for text2SQL, contains a wide range of natural language (NL) questions in English and corresponding SQL queries. Original splits of Spider and its adapted to Russian l...
[ "Somov, Oleg", "Tutubalina, Elena" ]
Shifted PAUQ: Distribution shift in text-to-SQL
genbench-1.18
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.1.bib
https://aclanthology.org/2023.lchange-1.1/
@inproceedings{ehrenworth-keith-2023-literary-intertextual, title = "Literary Intertextual Semantic Change Detection: Application and Motivation for Evaluating Models on Small Corpora", author = "Ehrenworth, Jackson and Keith, Katherine", editor = "Tahmasebi, Nina and Montariol, Syrielle and ...
Lexical semantic change detection is the study of how words change meaning between corpora. While Schlechtweg et al. (2020) standardized both datasets and evaluation metrics for this shared task, for those interested in applying semantic change detection models to small corpora{---}e.g., in the digital humanities{---}t...
[ "Ehrenworth, Jackson", "Keith, Katherine" ]
Literary Intertextual Semantic Change Detection: Application and Motivation for Evaluating Models on Small Corpora
lchange-1.1
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.2.bib
https://aclanthology.org/2023.lchange-1.2/
@inproceedings{lendvai-etal-2023-domain-adapting, title = "Domain-Adapting {BERT} for Attributing Manuscript, Century and Region in Pre-{M}odern {S}lavic Texts", author = "Lendvai, Piroska and Reichel, Uwe and Jouravel, Anna and Rabus, Achim and Renje, Elena", editor = "Tahmasebi...
Our study presents a stratified dataset compiled from six different Slavic bodies of text, for cross-linguistic and diachronic analyses of Slavic Pre-Modern language variants. We demonstrate unsupervised domain adaptation and supervised finetuning of BERT on these low-resource, historical Slavic variants, for the purpo...
[ "Lendvai, Piroska", "Reichel, Uwe", "Jouravel, Anna", "Rabus, Achim", "Renje, Elena" ]
Domain-Adapting BERT for Attributing Manuscript, Century and Region in Pre-Modern Slavic Texts
lchange-1.2
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.3.bib
https://aclanthology.org/2023.lchange-1.3/
@inproceedings{list-etal-2023-representing-computing, title = "Representing and Computing Uncertainty in Phonological Reconstruction", author = "List, Johann-Mattis and Hill, Nathan and Forkel, Robert and Blum, Frederic", editor = "Tahmasebi, Nina and Montariol, Syrielle and ...
Despite the inherently fuzzy nature of reconstructions in historical linguistics, most scholars do not represent their uncertainty when proposing proto-forms. With the increasing success of recently proposed approaches to automating certain aspects of the traditional comparative method, the formal representation of pro...
[ "List, Johann-Mattis", "Hill, Nathan", "Forkel, Robert", "Blum, Frederic" ]
Representing and Computing Uncertainty in Phonological Reconstruction
lchange-1.3
2310.12727
[ "https://github.com/lingpy/fuzzy" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.4.bib
https://aclanthology.org/2023.lchange-1.4/
@inproceedings{beck-kollner-2023-ghisbert-training, title = "{GH}is{BERT} {--} Training {BERT} from scratch for lexical semantic investigations across historical {G}erman language stages", author = {Beck, Christin and K{\"o}llner, Marisa}, editor = "Tahmasebi, Nina and Montariol, Syrielle and...
While static embeddings have dominated computational approaches to lexical semantic change for quite some time, recent approaches try to leverage the contextualized embeddings generated by the language model BERT for identifying semantic shifts in historical texts. However, despite their usability for detecting changes...
[ "Beck, Christin", "K{\\\"o}llner, Marisa" ]
GHisBERT – Training BERT from scratch for lexical semantic investigations across historical German language stages
lchange-1.4
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.5.bib
https://aclanthology.org/2023.lchange-1.5/
@inproceedings{jafari-etal-2023-longitudinal-study, title = "A longitudinal study about gradual changes in the {I}ranian Online Public Sphere pre and post of {`}Mahsa Moment{'}: Focusing on {T}witter", author = "Jafari, Sadegh and Fathi, Amin and Hajizadegan, Abolfazl and Kazemeini, Amirmoh...
Mahsa Amini{'}s death shocked Iranian society. The effects of this event and the subsequent tragedies in Iran not only in realspace but also in cyberspace, including Twitter, were tremendous and unimaginable. We explore how Twitter has changed after Mahsa Amini{'}s death by analyzing the sentiments of Iranian users in ...
[ "Jafari, Sadegh", "Fathi, Amin", "Hajizadegan, Abolfazl", "Kazemeini, Amirmohammad", "Eetemadi, Sauleh" ]
A longitudinal study about gradual changes in the Iranian Online Public Sphere pre and post of `Mahsa Moment': Focusing on Twitter
lchange-1.5
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.6.bib
https://aclanthology.org/2023.lchange-1.6/
@inproceedings{boholm-sayeed-2023-political-dogwhistles, title = "Political dogwhistles and community divergence in semantic change", author = "Boholm, Max and Sayeed, Asad", editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and He...
We test whether the development of political dogwhistles can be observed using language change measures; specifically, does the development of a {``}hidden{''} message in a dogwhistle show up as differences in semantic change between communities over time? We take Swedish-language dogwhistles related to the on-going im...
[ "Boholm, Max", "Sayeed, Asad" ]
Political dogwhistles and community divergence in semantic change
lchange-1.6
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.7.bib
https://aclanthology.org/2023.lchange-1.7/
@inproceedings{dehouck-etal-2023-evosem-database, title = "{E}vo{S}em: A database of polysemous cognate sets", author = "Dehouck, Mathieu and Fran{\c{c}}ois, Alex and Kalyan, Siva and Pastor, Martial and Kletz, David", editor = "Tahmasebi, Nina and Montariol, Syrielle and...
Polysemies, or {``}colexifications{''}, are of great interest in cognitive and historical linguistics, since meanings that are frequently expressed by the same lexeme are likely to be conceptually similar, and lie along a common pathway of semantic change. We argue that these types of inferences can be more reliably dr...
[ "Dehouck, Mathieu", "Fran{\\c{c}}ois, Alex", "Kalyan, Siva", "Pastor, Martial", "Kletz, David" ]
EvoSem: A database of polysemous cognate sets
lchange-1.7
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.8.bib
https://aclanthology.org/2023.lchange-1.8/
@inproceedings{afanasev-2023-multi-lect, title = "Multi-lect automatic detection of {S}wadesh list items from raw corpus data in {E}ast {S}lavic languages", author = "Afanasev, Ilia", editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and ...
The article introduces a novel task of multi-lect automatic detection of Swadesh list items from raw corpora. The task aids the early stageof historical linguistics study by helping the researcher compile word lists for further analysis.In this paper, I test multi-lect automatic detection on the East Slavic lects{'} da...
[ "Afanasev, Ilia" ]
Multi-lect automatic detection of Swadesh list items from raw corpus data in East Slavic languages
lchange-1.8
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.9.bib
https://aclanthology.org/2023.lchange-1.9/
@inproceedings{adams-etal-2023-anchors-embedding, title = "Anchors in Embedding Space: A Simple Concept Tracking Approach to Support Conceptual History Research", author = "Adams, Jetske and Larson, Martha and Verheul, Jaap and Boyden, Michael", editor = "Tahmasebi, Nina and Mont...
We introduce a simple concept tracking approach to support conceptual history research. Building on the existing practices of conceptual historians, we use dictionaries to identify {``}anchors{''}, which represent primary dimensions of meaning of a concept. Then, we create a plot showing how a key concept has evolved o...
[ "Adams, Jetske", "Larson, Martha", "Verheul, Jaap", "Boyden, Michael" ]
Anchors in Embedding Space: A Simple Concept Tracking Approach to Support Conceptual History Research
lchange-1.9
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.10.bib
https://aclanthology.org/2023.lchange-1.10/
@inproceedings{chen-etal-2023-chiwug-graph, title = "{C}hi{WUG}: A Graph-based Evaluation Dataset for {C}hinese Lexical Semantic Change Detection", author = "Chen, Jing and Chersoni, Emmanuele and Schlechtweg, Dominik and Prokic, Jelena and Huang, Chu-Ren", editor = "Tahmasebi, N...
Recent studies suggested that language models are efficient tools for measuring lexical semantic change. In our paper, we present the compilation of the first graph-based evaluation dataset for lexical semantic change in the context of the Chinese language, specifically covering the periods of pre- and post- Reform and...
[ "Chen, Jing", "Chersoni, Emmanuele", "Schlechtweg, Dominik", "Prokic, Jelena", "Huang, Chu-Ren" ]
ChiWUG: A Graph-based Evaluation Dataset for Chinese Lexical Semantic Change Detection
lchange-1.10
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.11.bib
https://aclanthology.org/2023.lchange-1.11/
@inproceedings{hoeken-etal-2023-towards-detecting, title = "Towards Detecting Lexical Change of Hate Speech in Historical Data", author = {Hoeken, Sanne and Spliethoff, Sophie and Schwandt, Silke and Zarrie{\ss}, Sina and Alacam, {\"O}zge}, editor = "Tahmasebi, Nina and Mo...
The investigation of lexical change has predominantly focused on generic language evolution, not suited for detecting shifts in a particular domain, such as hate speech. Our study introduces the task of identifying changes in lexical semantics related to hate speech within historical texts. We present an interdisciplin...
[ "Hoeken, Sanne", "Spliethoff, Sophie", "Schw", "t, Silke", "Zarrie{\\ss}, Sina", "Alacam, {\\\"O}zge" ]
Towards Detecting Lexical Change of Hate Speech in Historical Data
lchange-1.11
null
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster