| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:57:52.809479Z" |
| }, |
| "title": "Red Dragon AI at TextGraphs 2020 Shared Task: LIT : LSTM-Interleaved Transformer for Multi-Hop Explanation Ranking", |
| "authors": [ |
| { |
| "first": "Ken", |
| "middle": [], |
| "last": "Yew", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chia", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Witteveen", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Andrews", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "martin@reddragon.ai" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Explainable question answering for science questions is a challenging task that requires multihop inference over a large set of fact sentences. To counter the limitations of methods that view each query-document pair in isolation, we propose the LSTM-Interleaved Transformer which incorporates cross-document interactions for improved multi-hop ranking. The LIT architecture can leverage prior ranking positions in the re-ranking setting. Our model is competitive on the current leaderboard for the TextGraphs 2020 shared task, achieving a test-set MAP of 0.5607, and would have gained third place had we submitted before the competition deadline. Our code im", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Explainable question answering for science questions is a challenging task that requires multihop inference over a large set of fact sentences. To counter the limitations of methods that view each query-document pair in isolation, we propose the LSTM-Interleaved Transformer which incorporates cross-document interactions for improved multi-hop ranking. The LIT architecture can leverage prior ranking positions in the re-ranking setting. Our model is competitive on the current leaderboard for the TextGraphs 2020 shared task, achieving a test-set MAP of 0.5607, and would have gained third place had we submitted before the competition deadline. Our code im", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Complex question answering often requires reasoning over many evidence documents, which is known as multi-hop inference. Existing datasets such as Wikihop (Welbl et al., 2018) , OpenBookQA (Mihaylov et al., 2018) , QASC (Khot et al., 2020) , are limited due to artificial questions and short aggregation, requiring less than 3 facts. In comparison, TextGraphs (Jansen and Ustalov, 2020) uses WorldTree V2 (Xie et al., 2020) which is the largest available dataset that requires combining an average of 6 and up to 16 facts in order to generate an explanation for complex science questions. The dataset contains 5k questions that require knowledge in core science as well as common sense. Figure 1 shows an example question from the WorldTree dataset. The evaluation for this dataset is framed as a ranking objective over a large set of 9k science facts, and models are scored based on the MAP metric over the predicted rank ordering. Multi-hop inference encounters significant noise or \"distraction\" documents in the process and this challenge is known as semantic drift (Fried et al., 2015) . Compared to WorldTree V1 (Jansen et al., 2018) , WorldTree V2 has more examples but is more challenging as the larger pool of science facts presents a greater risk of semantic drift.", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 175, |
| "text": "(Welbl et al., 2018)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 189, |
| "end": 212, |
| "text": "(Mihaylov et al., 2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 220, |
| "end": 239, |
| "text": "(Khot et al., 2020)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 360, |
| "end": 386, |
| "text": "(Jansen and Ustalov, 2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 405, |
| "end": 423, |
| "text": "(Xie et al., 2020)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1070, |
| "end": 1090, |
| "text": "(Fried et al., 2015)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1118, |
| "end": 1139, |
| "text": "(Jansen et al., 2018)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 687, |
| "end": 695, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Neural information retrieval models such as DPR (Karpukhin et al., 2020) , RAG (Lewis et al., 2020) , and ColBERT (Khattab and Zaharia, 2020) that assume query-document independence use a language model to generate sentence representations for the query and document separately. The advantage of this late-interaction approach is efficient inference as the sentence representations can be computed beforehand and optimized lookup methods such as FAISS (Johnson et al., 2017) exist for this purpose. However, the late-interaction compromises on deeper semantic understanding possible with language models. Early-interaction approaches such as TFR-BERT (Han et al., 2020) instead concatenate the query and document before generating a unified sentence representation. This approach is more computationally expensive but is attractive for re-ranking over a limited number of documents. However, the previous approaches consider each query-document pair in isolation. This forgoes any cross-document interaction which can leverage additional knowledge sources or benefit the ranking objective. Other work (Pasumarthi et al., 2019; Pobrotyn et al., 2020; Sun and Duh, 2020) facilitate cross-document interactions through self-attention mechanisms. However, the cross-document interaction is only applied after the feature extraction step and cannot leverage the language understanding potential in earlier language model layers.", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 72, |
| "text": "(Karpukhin et al., 2020)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 79, |
| "end": 99, |
| "text": "(Lewis et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 114, |
| "end": 141, |
| "text": "(Khattab and Zaharia, 2020)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 452, |
| "end": 474, |
| "text": "(Johnson et al., 2017)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 651, |
| "end": 669, |
| "text": "(Han et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 1101, |
| "end": 1126, |
| "text": "(Pasumarthi et al., 2019;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1127, |
| "end": 1149, |
| "text": "Pobrotyn et al., 2020;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1150, |
| "end": 1168, |
| "text": "Sun and Duh, 2020)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The most straightforward loss for the document ranking objective is Binary Crossentropy where each document is ranked according to the binary classification probability of being within the gold explanation set. However, there have been recent progress in differentiable losses to optimize directly for the ranking objective (Wang et al., 2018; Revaud et al., 2019; Engilberge et al., 2019) . In this work, we also compare the benefits of each loss for multi-hop ranking.", |
| "cite_spans": [ |
| { |
| "start": 324, |
| "end": 343, |
| "text": "(Wang et al., 2018;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 344, |
| "end": 364, |
| "text": "Revaud et al., 2019;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 365, |
| "end": 389, |
| "text": "Engilberge et al., 2019)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The main contributions of this work are:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "1. We show that conventional information retrieval-based methods are still a strong baseline and propose I-BM25, an iterative retrieval method that improves inference speed and recall by emulating multi-hop retrieval.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2. We propose a hierarchical LSTM-interleaved transformer (LIT) architecture that maximizes early cross-document interactions for improved multi-hop re-ranking.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "3. We provide empirical comparisons of training with different loss functions and show that Binary Crossentropy loss is simple yet may outperform differentiable ranking losses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Three different system architectures are described here, with overall schemes illustrated in Figure 3 for comparison.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 93, |
| "end": 101, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Chia et al (2019) showed that conventional information retrieval methods can be a strong baseline when modified to suit the multi-hop inference objective. However, this method is limited due to computationally expensive inference and sensitivity to noise and semantic drift. We propose an iterative retrieval method 'I-BM25' that performs inference in a fraction of the time and reduces semantic drift, resulting in a even stronger baseline retrieval method. For preprocessing, we use spaCy (Honnibal and Montani, 2017) processes each new candidate one at a time, I-BM25 processes 2 n candidates in the n-th iteration. The algorithm is as follows:", |
| "cite_spans": [ |
| { |
| "start": 491, |
| "end": 519, |
| "text": "(Honnibal and Montani, 2017)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative BM25 Retrieval", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "1. Sparse document vectors are pre-computed for all questions and explanation candidates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative BM25 Retrieval", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "2. For each question, the closest n explanation candidates by cosine proximity are selected and their vectors are aggregated by a max operation. The aggregated vector is down-scaled and used to update the query vector through a max operation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative BM25 Retrieval", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "3. The previous step is repeated for increasing values of n until there are no candidate explanations remaining.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative BM25 Retrieval", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "BERT is a pre-trained language model that is widely adapted and fine-tuned for many downstream NLP tasks. Due to computational constraints, we use DistilBERT (Sanh et al., 2020) which has 40% fewer parameters and comparable performance. In sequence-level tasks such as text classification, a [CLS] token is a special token inserted at the front of the sequence. The latent representation of the token is passed to a feed-forward network for prediction. We append an LSTM (Hochreiter and Schmidhuber, 1997) module with 2 layers that operate on the [CLS] vectors of the last layer of BERT (similar in principle to McCann et al (2018) ). This hierarchical structure allows the transformer to perform crossdocument reasoning and knowledge reference. The LSTM layers enable the model to be rank-aware when used in the re-ranking setting. For re-ranking, the top 128 predictions from I-BM25 are passed to the LSTM-After Transformer which performs binary classification for each document.", |
| "cite_spans": [ |
| { |
| "start": 158, |
| "end": 177, |
| "text": "(Sanh et al., 2020)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 292, |
| "end": 297, |
| "text": "[CLS]", |
| "ref_id": null |
| }, |
| { |
| "start": 471, |
| "end": 505, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 612, |
| "end": 631, |
| "text": "McCann et al (2018)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "LSTM-After Transformer for Re-Ranking", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "TextGraphs is a challenging task which requires complex multi-hop reasoning, but information retrieval methods are surprisingly strong baselines. To enhance cross-document interaction and leverage language representations in earlier transformer layers, we interleave adapters (Houlsby et al., 2019) into the architecture which are recurrent instead of merely feed-forward. The LSTM-adapter modules in Figure 2 operate on the latent representation at the [CLS] position of each document at each layer of the transformer. After each transformer layer, the [CLS] latent representations for each input document are first down-projected, passed to the LSTM layers and finally up-projected and fed into the next transformer layer. Compared to (Houlsby et al., 2019) , the LIT architecture is fully trainable and makes the transformer architecture more expressive by enabling cross-document reasoning which was previously not possible. Apart from LSTM, we also tested GCN (Kipf and Welling, 2017) and Self-Attention (Parikh et al., 2016) layers but had limited success in achieving competitive performance from them. Table 1 shows that I-BM25 is a strong information retrieval method that can be a drop-in replacement for previous information retrieval methods. The results also show the advantage of the LIT architecture in interleaving LSTM layers between transformer layers, rather than after the last transformer layer.", |
| "cite_spans": [ |
| { |
| "start": 276, |
| "end": 298, |
| "text": "(Houlsby et al., 2019)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 737, |
| "end": 759, |
| "text": "(Houlsby et al., 2019)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 965, |
| "end": 989, |
| "text": "(Kipf and Welling, 2017)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 401, |
| "end": 409, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1110, |
| "end": 1117, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "LSTM-Interleaved Transformer for Re-Ranking", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Dev MAP LambdaLoss 0.4970 APLoss 0.5187 Binary Crossentropy 0.5680 The results of optimization using 3 different loss objectives are shown in Table 2 . Surprisingly, the direct ranking-loss oriented objectives were less effective in reducing the final evaluation MAP score, which is potentially due to the bucketisation approximation used in the APLoss calculations not being appropriately pre-scaled in our experiments. In this case, the training may require different hyperparameters to converge optimally. Another potential explanation is that these ranking losses may be sub-optimal (when used as a training objective) when many documents have very similar underlying scores which is the case here.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 142, |
| "end": 149, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Loss Function", |
| "sec_num": null |
| }, |
| { |
| "text": "Further to our experience last year, we included preprocessing steps to isolate the branching 'combo' statements (which essentially contain OR clauses between different noun phrases, for instance). This step remains in our codebase, but we did not exploit it fully, since a full treatment would require the isolation of which 'combo branch' is taken by each gold statement in the training set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Notes", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Other architectures that we explored included Graph neural network (GNN) methods, however we had insufficient time to tune these for the multi-hop explanation task herein. Surprisingly, our simple LSTM methods (which can be viewed as a linear graph that performs message-passing along the list of results ordered by the I-BM25 method) already provided a competitive method. We estimate that next year's competition will require the use of graph-based methods, due to their greater expressive power.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The LIT architecture is a simple yet powerful adaptation of the Transformer architecture to learn better cross-document interactions for multi-hop ranking. The structure can be easily integrated with any transformer language model to enable cross-referencing of knowledge statements and improved ranking performance. For example, LIT can be a drop-in encoder for other multi-hop question answering datasets such as HotPotQA (Yang et al., 2018) . When applied to the challenging WorldTree V2 dataset, LIT achieves competitive performance with current state-of-the-art models despite a smaller footprint. We envision that this architecture can be beneficial to many NLP tasks which require multi-hop reasoning over documents.", |
| "cite_spans": [ |
| { |
| "start": 424, |
| "end": 443, |
| "text": "(Yang et al., 2018)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Red dragon AI at TextGraphs 2019 shared task: Language model assisted explanation generation", |
| "authors": [ |
| { |
| "first": "Ken", |
| "middle": [], |
| "last": "Yew", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Chia", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Witteveen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Andrews", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Thirteenth Workshop on Graph-Based Methods for Natural Language Processing (TextGraphs-13)", |
| "volume": "", |
| "issue": "", |
| "pages": "85--89", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yew Ken Chia, Sam Witteveen, and Martin Andrews. 2019. Red dragon AI at TextGraphs 2019 shared task: Language model assisted explanation generation. In Proceedings of the Thirteenth Workshop on Graph-Based Methods for Natural Language Processing (TextGraphs-13), pages 85-89, Hong Kong, November. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Sodeep: a sorting deep net to learn ranking loss surrogates", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Engilberge", |
| "suffix": "" |
| }, |
| { |
| "first": "Louis", |
| "middle": [], |
| "last": "Chevallier", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "P\u00e9rez", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Cord", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "10792--10801", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin Engilberge, Louis Chevallier, Patrick P\u00e9rez, and Matthieu Cord. 2019. Sodeep: a sorting deep net to learn ranking loss surrogates. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 10792-10801.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Higher-order lexical semantic models for non-factoid answer reranking", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Fried", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Gustave", |
| "middle": [], |
| "last": "Hahn-Powell", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "3", |
| "issue": "", |
| "pages": "197--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Fried, Peter Jansen, Gustave Hahn-Powell, Mihai Surdeanu, and Peter Clark. 2015. Higher-order lexical semantic models for non-factoid answer reranking. Transactions of the Association for Computational Linguis- tics, 3:197-210.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Mike Bendersky, and Marc Najork. 2020. Learning-to-rank with bert in tfranking", |
| "authors": [ |
| { |
| "first": "Shuguang", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuanhui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2004.08476" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuguang Han, Xuanhui Wang, Mike Bendersky, and Marc Najork. 2020. Learning-to-rank with bert in tf- ranking. arXiv preprint arXiv:2004.08476.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735- 1780.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Honnibal", |
| "suffix": "" |
| }, |
| { |
| "first": "Ines", |
| "middle": [], |
| "last": "Montani", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Honnibal and Ines Montani. 2017. spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing. To appear.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Parameter-efficient transfer learning for nlp", |
| "authors": [ |
| { |
| "first": "Neil", |
| "middle": [], |
| "last": "Houlsby", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei", |
| "middle": [], |
| "last": "Giurgiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Stanislaw", |
| "middle": [], |
| "last": "Jastrzebski", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruna", |
| "middle": [], |
| "last": "Morrone", |
| "suffix": "" |
| }, |
| { |
| "first": "Quentin", |
| "middle": [], |
| "last": "De Laroussilhe", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Gesmundo", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Attariyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Gelly", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin de Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for nlp. In ICML.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "TextGraphs 2020 Shared Task on Multi-Hop Inference for Explanation Regeneration", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Dmitry", |
| "middle": [], |
| "last": "Ustalov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Graph-based Methods for Natural Language Processing (TextGraphs)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Jansen and Dmitry Ustalov. 2020. TextGraphs 2020 Shared Task on Multi-Hop Inference for Explanation Regeneration. In Proceedings of the Graph-based Methods for Natural Language Processing (TextGraphs). Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "WorldTree: A Corpus of Explanation Graphs for Elementary Science Questions supporting Multi-hop Inference", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Wainwright", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Marmorstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Clayton", |
| "middle": [], |
| "last": "Morrison", |
| "suffix": "" |
| }, |
| { |
| "first": ";", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Khalid", |
| "middle": [], |
| "last": "Choukri", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Cieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Thierry", |
| "middle": [], |
| "last": "Declerck", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Goggi", |
| "suffix": "" |
| }, |
| { |
| "first": "Koiti", |
| "middle": [], |
| "last": "Hasida", |
| "suffix": "" |
| }, |
| { |
| "first": "Hitoshi", |
| "middle": [], |
| "last": "Isahara", |
| "suffix": "" |
| }, |
| { |
| "first": "Bente", |
| "middle": [], |
| "last": "Maegaard", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Mariani", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Jansen, Elizabeth Wainwright, Steven Marmorstein, and Clayton Morrison. 2018. WorldTree: A Corpus of Explanation Graphs for Elementary Science Questions supporting Multi-hop Inference. In Nicoletta Calzo- lari (Conference chair), Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Koiti Hasida, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, H\u00e9l\u00e8ne Mazo, Asuncion Moreno, Jan Odijk, Stelios Piperidis, and Takenobu Tokunaga, editors, Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan, May 7-12, 2018. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Billion-scale similarity search with gpus", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthijs", |
| "middle": [], |
| "last": "Douze", |
| "suffix": "" |
| }, |
| { |
| "first": "Herv\u00e9", |
| "middle": [], |
| "last": "J\u00e9gou", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1702.08734" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeff Johnson, Matthijs Douze, and Herv\u00e9 J\u00e9gou. 2017. Billion-scale similarity search with gpus. arXiv preprint arXiv:1702.08734.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Dense passage retrieval for open-domain question answering", |
| "authors": [ |
| { |
| "first": "Vladimir", |
| "middle": [], |
| "last": "Karpukhin", |
| "suffix": "" |
| }, |
| { |
| "first": "Barlas", |
| "middle": [], |
| "last": "Oguz", |
| "suffix": "" |
| }, |
| { |
| "first": "Sewon", |
| "middle": [], |
| "last": "Min", |
| "suffix": "" |
| }, |
| { |
| "first": "Ledell", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen-Tau", |
| "middle": [], |
| "last": "Yih", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2004.04906" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vladimir Karpukhin, Barlas Oguz, Sewon Min, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020. Dense passage retrieval for open-domain question answering. arXiv preprint arXiv:2004.04906.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Colbert: Efficient and effective passage search via contextualized late interaction over bert", |
| "authors": [ |
| { |
| "first": "Omar", |
| "middle": [], |
| "last": "Khattab", |
| "suffix": "" |
| }, |
| { |
| "first": "Matei", |
| "middle": [], |
| "last": "Zaharia", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2004.12832" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omar Khattab and Matei Zaharia. 2020. Colbert: Efficient and effective passage search via contextualized late interaction over bert. arXiv preprint arXiv:2004.12832.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Qasc: A dataset for question answering via sentence composition", |
| "authors": [ |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Michal", |
| "middle": [], |
| "last": "Guerquin", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "8082--8090", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tushar Khot, Peter Clark, Michal Guerquin, Peter Jansen, and Ashish Sabharwal. 2020. Qasc: A dataset for question answering via sentence composition. In AAAI, pages 8082-8090.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Semi-supervised classification with graph convolutional networks", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Kipf", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Welling", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Kipf and M. Welling. 2017. Semi-supervised classification with graph convolutional networks. ArXiv, abs/1609.02907.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Tim Rockt\u00e4schel, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Ethan", |
| "middle": [], |
| "last": "Perez", |
| "suffix": "" |
| }, |
| { |
| "first": "Aleksandara", |
| "middle": [], |
| "last": "Piktus", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Petroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Vladimir", |
| "middle": [], |
| "last": "Karpukhin", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Heinrich", |
| "middle": [], |
| "last": "K\u00fcttler", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen-Tau", |
| "middle": [], |
| "last": "Yih", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.11401" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\u00fcttler, Mike Lewis, Wen-tau Yih, Tim Rockt\u00e4schel, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. arXiv preprint arXiv:2005.11401.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "The natural language decathlon: Multitask learning as question answering", |
| "authors": [ |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Mccann", |
| "suffix": "" |
| }, |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Shirish Keskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bryan McCann, Nitish Shirish Keskar, Caiming Xiong, and Richard Socher. 2018. The natural language de- cathlon: Multitask learning as question answering.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Can a suit of armor conduct electricity? a new dataset for open book question answering", |
| "authors": [ |
| { |
| "first": "Todor", |
| "middle": [], |
| "last": "Mihaylov", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Todor Mihaylov, Peter Clark, Tushar Khot, and Ashish Sabharwal. 2018. Can a suit of armor conduct electricity? a new dataset for open book question answering. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "A decomposable attention model for natural language inference", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Ankur", |
| "suffix": "" |
| }, |
| { |
| "first": "Oscar", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "T\u00e4ckstr\u00f6m", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankur P. Parikh, Oscar T\u00e4ckstr\u00f6m, Dipanjan Das, and Jakob Uszkoreit. 2016. A decomposable attention model for natural language inference. ArXiv, abs/1606.01933.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Self-attentive document interaction networks for permutation equivariant ranking", |
| "authors": [ |
| { |
| "first": "Xuanhui", |
| "middle": [], |
| "last": "Rama Kumar Pasumarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Bendersky", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Najork", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rama Kumar Pasumarthi, Xuanhui Wang, Michael Bendersky, and Marc Najork. 2019. Self-attentive document interaction networks for permutation equivariant ranking.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Context-aware learning to rank with self-attention", |
| "authors": [ |
| { |
| "first": "Przemys\u0142aw", |
| "middle": [], |
| "last": "Pobrotyn", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomasz", |
| "middle": [], |
| "last": "Bartczak", |
| "suffix": "" |
| }, |
| { |
| "first": "Miko\u0142aj", |
| "middle": [], |
| "last": "Synowiec", |
| "suffix": "" |
| }, |
| { |
| "first": "Rados\u0142aw", |
| "middle": [], |
| "last": "Bia\u0142obrzeski", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaros\u0142aw", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.10084" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Przemys\u0142aw Pobrotyn, Tomasz Bartczak, Miko\u0142aj Synowiec, Rados\u0142aw Bia\u0142obrzeski, and Jaros\u0142aw Bojar. 2020. Context-aware learning to rank with self-attention. arXiv preprint arXiv:2005.10084.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Learning with average precision: Training image retrieval with a listwise loss", |
| "authors": [ |
| { |
| "first": "Jerome", |
| "middle": [], |
| "last": "Revaud", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [], |
| "last": "Almaz\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Cesar Roberto De", |
| "middle": [], |
| "last": "Rafael S Rezende", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Souza", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the IEEE International Conference on Computer Vision", |
| "volume": "", |
| "issue": "", |
| "pages": "5107--5116", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jerome Revaud, Jon Almaz\u00e1n, Rafael S Rezende, and Cesar Roberto de Souza. 2019. Learning with average precision: Training image retrieval with a listwise loss. In Proceedings of the IEEE International Conference on Computer Vision, pages 5107-5116.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter", |
| "authors": [ |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2020. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Modeling document interactions for learning to rank with regularized selfattention", |
| "authors": [ |
| { |
| "first": "Shuo", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.03932" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuo Sun and Kevin Duh. 2020. Modeling document interactions for learning to rank with regularized self- attention. arXiv preprint arXiv:2005.03932.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "The lambdaloss framework for ranking metric optimization", |
| "authors": [ |
| { |
| "first": "Xuanhui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Cheng", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadav", |
| "middle": [], |
| "last": "Golbandi", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Bendersky", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Najork", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th ACM International Conference on Information and Knowledge Management", |
| "volume": "", |
| "issue": "", |
| "pages": "1313--1322", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xuanhui Wang, Cheng Li, Nadav Golbandi, Michael Bendersky, and Marc Najork. 2018. The lambdaloss frame- work for ranking metric optimization. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, pages 1313-1322.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Constructing datasets for multi-hop reading comprehension across documents", |
| "authors": [ |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Welbl", |
| "suffix": "" |
| }, |
| { |
| "first": "Pontus", |
| "middle": [], |
| "last": "Stenetorp", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "287--302", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johannes Welbl, Pontus Stenetorp, and Sebastian Riedel. 2018. Constructing datasets for multi-hop reading comprehension across documents. Transactions of the Association for Computational Linguistics, 6:287-302.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "WorldTree v2: A corpus of science-domain structured explanations and inference patterns supporting multi-hop inference", |
| "authors": [ |
| { |
| "first": "Zhengnan", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Thiem", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaycie", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Wainwright", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Marmorstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "5456--5473", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhengnan Xie, Sebastian Thiem, Jaycie Martin, Elizabeth Wainwright, Steven Marmorstein, and Peter Jansen. 2020. WorldTree v2: A corpus of science-domain structured explanations and inference patterns supporting multi-hop inference. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 5456- 5473, Marseille, France, May. European Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Hotpotqa: A dataset for diverse, explainable multi-hop question answering", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Qi", |
| "suffix": "" |
| }, |
| { |
| "first": "Saizheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "W" |
| ], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Z. Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, William W. Cohen, R. Salakhutdinov, and Christopher D. Manning. 2018. Hotpotqa: A dataset for diverse, explainable multi-hop question answering. ArXiv, abs/1809.09600.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "text": "Example of a single question and groundtruth explanation facts in WorldTree V2 dataset.", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "text": "Overview of 3 architectures", |
| "uris": null |
| }, |
| "TABREF2": { |
| "html": null, |
| "type_str": "table", |
| "text": "Loss function comparison on WorldTree V2 dataset", |
| "num": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |