| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:13:37.697136Z" |
| }, |
| "title": "Organizing Committee", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Fisch", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Talmor", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Akari", |
| "middle": [], |
| "last": "Asai", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Danish", |
| "middle": [], |
| "last": "Contractor", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "Wallace" |
| ], |
| "last": "Gautier", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Izacard", |
| "middle": [], |
| "last": "Huan", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Sun", |
| "middle": [ |
| "Jifan" |
| ], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Jinhyuk", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Herzig", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Lamm", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Bartolo", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Mor", |
| "middle": [], |
| "last": "Geva", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Panupong", |
| "middle": [], |
| "last": "Pasupat", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Pedro", |
| "middle": [], |
| "last": "Rodriguez", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Qi", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Rajarshi", |
| "middle": [], |
| "last": "Das", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Rodrigo", |
| "middle": [], |
| "last": "Nogueira", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Shayne", |
| "middle": [], |
| "last": "Longpre", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Shuohang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Xinya", |
| "middle": [], |
| "last": "Du", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Yichen", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Yizhong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Yuxiang", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Cui", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [ |
| "Tulio" |
| ], |
| "last": "Ribeiro", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [], |
| "body_text": [ |
| { |
| "text": "Message from the Organizers Our workshop brings together researchers studying machine reading for question answering (MRQA). MRQA has emerged as an important testbed for evaluating how computer systems understand natural language, as well as a crucial technology for applications such as search engines and dialog systems. In recent years, MRQA systems have become much more accurate, and are even capable of retrieving evidence documents on the fly or answering without retrieved documents. Datasets and models have been developed to target many different aspects of the problem, including multi-hop reasoning, numerical reasoning, or commonsense reasoning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Despite this progress, there are still many important desiderata that most MRQA systems do not adequately consider: multilinguality and interpretability. In the 3rd MRQA workshop, we therefore focus on these two emerging and crucial aspects of question answering models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Systems today are predominantly evaluated by measuring accuracy on English benchmarks, yet an ideal question answering system would support a diverse range of languages. With recent developments of multilingual question answering datasets, it is timely to study how MRQA models can be designed to support typologically diverse languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Many systems produce correct answers for the wrong reason and are unable to explain their predictions. Given the opaque nature of modern large-scale pre-trained neural models, it is important to study how MRQA systems can offer users a way to trust (or not trust) an otherwise black-box model's predictions, as well as offer practitioners ways to diagnose critical modeling issues or dataset biases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "As in past years, we sought paper submissions of previously unpublished work. To reflect our focus on our two themes, we had separate tracks for multilinguality and interpretability-related papers, as well as a general research track. Across these three tracks, we received 21 total paper submissions after withdrawals -14 for the general research track, 5 for the multilingual track, and 2 for the interpretability track. While the submission counts have decreased from last year, we found the average quality of submitted papers to be higher than previous years. After discussion among the organizers, we have accepted a total of 16 papers and awarded one best paper and two honorable mention papers. We also have accepted 23 non-archival submissions that were accepted at other related conferences (such as papers accepted at the main conference/findings of ACL, EMNLP, SIGIR) to be presented at our workshop. Our final program therefore includes 39 papers, of which 16 papers are included in these proceedings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We are excited to host six stellar invited speakers. In the morning session, Reut Tsarfaty, Jon Clark, and Yiming Cui will give talks on multilinguality in question answering; in the afternoon session, Jonathan Berant, Marco Tulio Ribeiro, and Hannaneh Hajishirzi will give talks on interpretability in question answering. We thank these speakers, our program committee, the EMNLP workshop chairs, and our sponsors, Baidu and Facebook, for helping to make this workshop possible.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "of Contents MFAQ: a Multilingual FAQ Dataset Maxime De Bruyn", |
| "authors": [ |
| { |
| "first": "Ehsan", |
| "middle": [], |
| "last": "Lotfi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeska", |
| "middle": [], |
| "last": "Buhmann", |
| "suffix": "" |
| }, |
| { |
| "first": ".", |
| "middle": [ |
| "." |
| ], |
| "last": "Walter Daelemans", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "of Contents MFAQ: a Multilingual FAQ Dataset Maxime De Bruyn, Ehsan Lotfi, Jeska Buhmann and Walter Daelemans . . . . . . . . . . . . . . . . . . . . . . . 1", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Rethinking the Objectives of Extractive Question Answering Martin Fajcik", |
| "authors": [ |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Jon", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Smrz", |
| "suffix": "" |
| }, |
| { |
| "first": ".", |
| "middle": [ |
| "." |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rethinking the Objectives of Extractive Question Answering Martin Fajcik, Josef Jon and Pavel Smrz . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 14", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "What Would it Take to get Biomedical QA Systems into Practice?", |
| "authors": [ |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Kell", |
| "suffix": "" |
| }, |
| { |
| "first": "Iain", |
| "middle": [], |
| "last": "Marshall", |
| "suffix": "" |
| }, |
| { |
| "first": "Byron", |
| "middle": [], |
| "last": "Wallace", |
| "suffix": "" |
| }, |
| { |
| "first": "Andre", |
| "middle": [], |
| "last": "Jaun", |
| "suffix": "" |
| }, |
| { |
| "first": ".", |
| "middle": [ |
| "." |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "What Would it Take to get Biomedical QA Systems into Practice? Gregory Kell, Iain Marshall, Byron Wallace and Andre Jaun . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 28", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Improving Non-English Question Answering and Passage Retrieval Timo M\u00f6ller", |
| "authors": [ |
| { |
| "first": "Germandpr", |
| "middle": [], |
| "last": "Germanquad", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "GermanQuAD and GermanDPR: Improving Non-English Question Answering and Passage Retrieval Timo M\u00f6ller, Julian Risch and Malte Pietsch . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 42", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Zero-Shot Clinical Questionnaire Filling From Human-Machine Interactions", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zero-Shot Clinical Questionnaire Filling From Human-Machine Interactions Farnaz Ghassemi Toudeshki, Philippe Jolivet, Alexandre Durand-Salmon and Anna Liednikova 51", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Can Question Generation Debias Question Answering Models? A Case Study on Question-Context Lexical Overlap Kazutoshi Shinoda, Saku Sugawara and Akiko Aizawa", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Can Question Generation Debias Question Answering Models? A Case Study on Question-Context Lexical Overlap Kazutoshi Shinoda, Saku Sugawara and Akiko Aizawa . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 63", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "What Can a Generative Language Model Answer About a Passage? Douglas Summers-Stay, Claire Bonial and", |
| "authors": [ |
| { |
| "first": "Clare", |
| "middle": [], |
| "last": "Voss", |
| "suffix": "" |
| }, |
| { |
| "first": ".", |
| "middle": [ |
| "." |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "What Can a Generative Language Model Answer About a Passage? Douglas Summers-Stay, Claire Bonial and Clare Voss . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 73", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Multi-modal Retrieval of Tables and Texts Using Tri-encoder Models Bogdan Kosti\u0107", |
| "authors": [ |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Risch", |
| "suffix": "" |
| }, |
| { |
| "first": ".", |
| "middle": [ |
| "." |
| ], |
| "last": "Timo M\u00f6ller", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Multi-modal Retrieval of Tables and Texts Using Tri-encoder Models Bogdan Kosti\u0107, Julian Risch and Timo M\u00f6ller . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 82", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Eliciting Bias in Question Answering Models through Ambiguity Andrew Mao", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eliciting Bias in Question Answering Models through Ambiguity Andrew Mao, Naveen Raman, Matthew Shu, Eric Li, Franklin Yang and Jordan Boyd-Graber . . 92", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "113 GANDALF: a General Character Name Description Dataset for Long Fiction Fredrik Carlsson, Magnus Sahlgren, Fredrik Olsson and Amaru Cuba Gyllensten", |
| "authors": [ |
| { |
| "first": ".", |
| "middle": [ |
| "." |
| ], |
| "last": "Hung-Yi Lee", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Unsupervised Multiple Choices Question Answering: Start Learning from Basic Knowledge Chi-Liang Liu and", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Unsupervised Multiple Choices Question Answering: Start Learning from Basic Knowledge Chi-Liang Liu and Hung-yi Lee . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 113 GANDALF: a General Character Name Description Dataset for Long Fiction Fredrik Carlsson, Magnus Sahlgren, Fredrik Olsson and Amaru Cuba Gyllensten . . . . . . . . . . . . 119", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Investigating Post-pretraining Representation Alignment for Cross-Lingual Question Answering Fahim Faisal and Antonios Anastasopoulos", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Investigating Post-pretraining Representation Alignment for Cross-Lingual Question Answering Fahim Faisal and Antonios Anastasopoulos . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 133", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Semantic Answer Similarity for Evaluating Question Answering Models", |
| "authors": [ |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Risch", |
| "suffix": "" |
| }, |
| { |
| "first": "Timo", |
| "middle": [], |
| "last": "M\u00f6ller", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Gutsch", |
| "suffix": "" |
| }, |
| { |
| "first": "Malte", |
| "middle": [], |
| "last": "Pietsch", |
| "suffix": "" |
| }, |
| { |
| "first": ".", |
| "middle": [ |
| "." |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Semantic Answer Similarity for Evaluating Question Answering Models Julian Risch, Timo M\u00f6ller, Julian Gutsch and Malte Pietsch . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 149", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Simple and Efficient ways to Improve REALM Vidhisha Balachandran", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Yulia", |
| "middle": [], |
| "last": "Tsvetkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [ |
| ". . . . . . . . . . . . . . ." |
| ], |
| "last": "Parmar", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simple and Efficient ways to Improve REALM Vidhisha Balachandran, Ashish Vaswani, Yulia Tsvetkov and Niki Parmar . . . . . . . . . . . . . . . . . . . 158", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Mitigating False-Negative Contexts in Multi-Document Question Answering with Retrieval Marginalization Ansong Ni, Matt Gardner", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Wednesday", |
| "suffix": "" |
| }, |
| { |
| "first": "Naveen", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Raman", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Shu", |
| "suffix": "" |
| }, |
| { |
| "first": "Franklin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "-Graber ; Wentao", |
| "middle": [], |
| "last": "Boyd", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiani", |
| "middle": [], |
| "last": "Cui", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ye", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "10 Question Answering over Electronic Devices: A New Benchmark Dataset and a Multi-Task Learning based QA Framework Abhilash Nandy, Soumya Sharma, Shubham Maddhashiya", |
| "volume": "13", |
| "issue": "", |
| "pages": "10--14", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wednesday, November 10, 2021 (continued) 13:10-14:10 Poster Session (archival track) 13:10-14:10 GermanQuAD and GermanDPR: Improving Non-English Question Answering and Passage Retrieval Timo M\u00f6ller, Julian Risch and Malte Pietsch 13:10-14:10 Zero-Shot Clinical Questionnaire Filling From Human-Machine Interactions Farnaz Ghassemi Toudeshki, Philippe Jolivet, Alexandre Durand-Salmon and Anna Liednikova 13:10-14:10 Can Question Generation Debias Question Answering Models? A Case Study on Question-Context Lexical Overlap Kazutoshi Shinoda, Saku Sugawara and Akiko Aizawa 13:10-14:10 What Can a Generative Language Model Answer About a Passage? Douglas Summers-Stay, Claire Bonial and Clare Voss 13:10-14:10 Multi-modal Retrieval of Tables and Texts Using Tri-encoder Models Bogdan Kosti\u0107, Julian Risch and Timo M\u00f6ller 13:10-14:10 Eliciting Bias in Question Answering Models through Ambiguity Andrew Mao, Naveen Raman, Matthew Shu, Eric Li, Franklin Yang and Jordan Boyd-Graber 13:10-14:10 Bilingual Alignment Pre-Training for Zero-Shot Cross-Lingual Transfer Ziqing Yang, Wentao Ma, Yiming Cui, Jiani Ye, Wanxiang Che and Shijin Wang 13:10-14:10 ParaShoot: A Hebrew Question Answering Dataset Omri Keren and Omer Levy 13:10-14:10 Unsupervised Multiple Choices Question Answering: Start Learning from Basic Knowledge Chi-Liang Liu and Hung-yi Lee 13:10-14:10 GANDALF: a General Character Name Description Dataset for Long Fiction Fredrik Carlsson, Magnus Sahlgren, Fredrik Olsson and Amaru Cuba Gyllensten 13:10-14:10 Investigating Post-pretraining Representation Alignment for Cross-Lingual Ques- tion Answering Fahim Faisal and Antonios Anastasopoulos x Wednesday, November 10, 2021 (continued) 13:10-14:10 Semantic Answer Similarity for Evaluating Question Answering Models Julian Risch, Timo M\u00f6ller, Julian Gutsch and Malte Pietsch 13:10-14:10 Simple and Efficient ways to Improve REALM Vidhisha Balachandran, Ashish Vaswani, Yulia Tsvetkov and Niki Parmar 13:10-14:10 Poster Session (non-archival track) 13:10-14:10 Synthetic Target Domain Supervision for Open Retrieval QA Revanth Gangi Reddy, Bhavani Iyer, Md Arafat Sultan, Rong Zhang, Avirup Sil, Vittorio Castelli, Radu Florian, Salim Roukos 13:10-14:10 Entity-based Knowledge Conflicts in Question Answering Shayne Longpre, Kartik Perisetla, Anthony Chen, Nikhil Ramesh, Chris Dubois, Sameer Singh 13:10-14:10 Mitigating False-Negative Contexts in Multi-Document Question Answering with Retrieval Marginalization Ansong Ni, Matt Gardner, Pradeep Dasigi 13:10-14:10 Generative Context Pair Selection for Multi-hop Question Answering Dheeru Dua,Cicero Nogueira dos Santos,Patrick Ng,Ben Athiwaratkun,Bing Xi- ang,Matt Gardner,Sameer Singh 13:10-14:10 Learning with Instance Bundles for Reading Comprehension Dheeru Dua, Pradeep Dasigi,Sameer Singh,Matt Gardner 13:10-14:10 Can NLI Models Verify QA Systems' Predictions? Jifan Chen, Eunsol Choi, Greg Durrett 13:10-14:10 Knowing More About Questions Can Help: Improving Calibration in Question An- swering Shujian Zhang, Chengyue Gong and Eunsol Choi 13:10-14:10 RocketQA: An Optimized Training Approach to Dense Passage Retrieval for Open- Domain Question Answering Yingqi Qu, Yuchen Ding, Jing Liu, Kai Liu, Ruiyang Ren, Wayne Xin Zhao, Daxi- ang Dong, Hua Wu and Haifeng Wang 13:10-14:10 Weakly Supervised Pre-Training for Multi-Hop Retriever Yeon Seonwoo, Sang-Woo Lee, Ji-Hoon Kim, Jung-Woo Ha and Alice Oh 13:10-14:10 ReasonBert: Pre-trained to Reason with Distant Supervision Xiang Deng, Yu Su, Alyssa Lees, You Wu, Cong Yu and Huan Sun Wednesday, November 10, 2021 (continued) 13:10-14:10 Question Answering over Electronic Devices: A New Benchmark Dataset and a Multi-Task Learning based QA Framework Abhilash Nandy, Soumya Sharma, Shubham Maddhashiya, Kapil Sachdeva, Pawan Goyal and NIloy Ganguly 13:10-14:10 Do We Know What We Don't Know? Studying Unanswerable Questions beyond SQuAD 2.0", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Reference-based Weak Supervision for Answer Sentence Selection using Web Data Vivek Krishnamurthy", |
| "authors": [ |
| { |
| "first": "Elior", |
| "middle": [], |
| "last": "Sulem", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamaal", |
| "middle": [], |
| "last": "Hay", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "10 Beyond Reptile: Meta-Learned Dot-Product Maximization between Gradients for Improved Single-Task Regularization Akhil Kedia, Sai Chetan Chinthakindi and Wonho Ryu", |
| "volume": "13", |
| "issue": "", |
| "pages": "45--62", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elior Sulem, Jamaal Hay and Dan Roth 13:10-14:10 Relation-Guided Pre-Training for Open-Domain Question Answering Ziniu Hu, Yizhou Sun and Kai-Wei Chang 13:10-14:10 Beyond Reptile: Meta-Learned Dot-Product Maximization between Gradients for Improved Single-Task Regularization Akhil Kedia, Sai Chetan Chinthakindi and Wonho Ryu 13:10-14:10 SD-QA: Spoken Dialectal Question Answering for the Real World Fahim Faisal, Sharlina Keshava, Md Mahfuz Ibn Alam and Antonios Anastasopou- los 13:10-14:10 When Retriever-Reader Meets Scenario-Based Multiple-Choice Questions ZiXian Huang, Ao Wu, Yulin Shen, Gong Cheng and Yuzhong Qu 13:10-14:10 Winnowing Knowledge for Multi-choice Question Answering Yeqiu Li, Bowei Zou, Zhifeng Li, Ai Ti Aw, Yu Hong and Qiaoming Zhu 13:10-14:10 Extract, Integrate, Compete: Towards Verification Style Reading Comprehension Chen Zhang, Yuxuan Lai, Yansong Feng and Dongyan Zhao 13:10-14:10 Reference-based Weak Supervision for Answer Sentence Selection using Web Data Vivek Krishnamurthy, Thuy Vu and Alessandro Moschitti 13:10-14:10 NOAHQA: Numerical Reasoning with Interpretable Graph Question Answering Dataset Qiyuan Zhang, Lei Wang, SICHENG YU, Shuohang Wang, Yang Wang, Jing Jiang and Ee-Peng Lim 13:10-14:10 Improving Numerical Reasoning Skills in the Modular Approach for Complex Ques- tion Answering on Text Xiao-Yu Guo, Yuan-Fang Li and Gholamreza Haffari 13:10-14:10 R2-D2: A Modular Baseline for Open-Domain Question Answering Martin Fajcik, Martin Docekal, Karel Ondrej and Pavel Smrz 13:10-14:10 AutoEQA: Auto-Encoding Questions for Extractive Question Answering Stalin Varanasi, Saadullah Amin and Guenter Neumann 14:10-14:30 Break Wednesday, November 10, 2021 (continued) 14:30-16:45 Interpretability in QA Invited Talk Session 14:30-15:00 Invited Talk 4 -Jonathan Berant 15:00-15:30 Invited Talk 5 -Marco Tulio Ribeiro 15:30-16:00 Invited Talk 6 -Hannaneh Hajishirzi 16:00-16:45 Panel Discussion on Interpretability in QA 16:45-17:00 Closing Remarks", |
| "links": null |
| } |
| }, |
| "ref_entries": {} |
| } |
| } |