| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:58:03.651082Z" |
| }, |
| "title": "PGL at TextGraphs 2020 Shared Task: Explanation Regeneration using Language and Graph Learning Methods", |
| "authors": [ |
| { |
| "first": "Weibin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "liweibin02@baidu.com" |
| }, |
| { |
| "first": "Yuxiang", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "luyuxiang@baidu.com" |
| }, |
| { |
| "first": "Zhengjie", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "huangzhengjie@baidu.com" |
| }, |
| { |
| "first": "Jiaxiang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "liujiaxiang@baidu.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes the system designed by the Baidu PGL Team which achieved the first place in the TextGraphs 2020 Shared Task. The task focuses on generating explanations for elementary science questions. Given a question and its corresponding correct answer, we are asked to select the facts that can explain why the answer is correct for that question and answering (QA) from a large knowledge base. To address this problem, we use a pre-trained language model to recall the top-K relevant explanations for each question. Then, we adopt a re-ranking approach based on a pre-trained language model to rank the candidate explanations. To further improve the rankings, we also develop an architecture consisting both powerful pre-trained transformers and GNNs to tackle the multi-hop inference problem. The official evaluation shows that, our system can outperform the second best system by 1.91 points.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes the system designed by the Baidu PGL Team which achieved the first place in the TextGraphs 2020 Shared Task. The task focuses on generating explanations for elementary science questions. Given a question and its corresponding correct answer, we are asked to select the facts that can explain why the answer is correct for that question and answering (QA) from a large knowledge base. To address this problem, we use a pre-trained language model to recall the top-K relevant explanations for each question. Then, we adopt a re-ranking approach based on a pre-trained language model to rank the candidate explanations. To further improve the rankings, we also develop an architecture consisting both powerful pre-trained transformers and GNNs to tackle the multi-hop inference problem. The official evaluation shows that, our system can outperform the second best system by 1.91 points.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The TextGraphs 2020 Shared Task on Explanation Regeneration (Jansen and Ustalov, 2020) asks participants to develop methods to reconstruct gold explanations for elementary science questions. Concretely, given an elementary science question and its corresponding correct answer, the system need to perform the multi-hop inference and rank a set of explanatory facts that are expected to explain why the answer is correct from a large knowledge base.", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 86, |
| "text": "(Jansen and Ustalov, 2020)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Multi-hop inference is the task of combining more than one piece of information to solve a reasoning task, such as question answering. Multi-hop inference or information aggregation has been shown to be extremely challenging (Jansen, 2018) , especially for the case here, where current estimates suggest that an average of 4 to 6 sentences are required to answer and explain a given question. An example is shown in Figure 1 . Q: Which of the following best describes the mass of a solid block of ice? E1: mass is a measure of the amount of matter in an object E2: measuring is used for describing an object A: the amount of matter in the block E3: a block is a kind of object Figure 1 : A subgraph of explanation sentences that explains why the answer is correct for the question.", |
| "cite_spans": [ |
| { |
| "start": 225, |
| "end": 239, |
| "text": "(Jansen, 2018)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 416, |
| "end": 424, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 677, |
| "end": 685, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the TextGraphs 2020 Shared Task, we not only need to consider extracting the semantic information between the question and each explanation, but also need to take the structural relationship between the explanations into account. Therefore, we adopt a pipeline architecture to address the problem. First, we use a pre-trained language model to recall the top-K relevant explanations for each question. Then, we adopt a re-ranking approach based on another pre-trained language model to rank the candidate explanations. Finally, to further improve the rankings, we also develop an architecture utilizing the power of pre-trained transformers (Vaswani et al., 2017) and graph neural networks (GNNs) (Kipf and Welling, 2016) to tackle the multi-hop inference problem. We also adopt a Virtual Adversarial Training (Takeru et al., 2018) method to train our model and got a slight improvement.", |
| "cite_spans": [ |
| { |
| "start": 644, |
| "end": 666, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 813, |
| "end": 834, |
| "text": "(Takeru et al., 2018)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of the paper is organized as follows. In Section 2, we will briefly describe the task, the dataset and the evaluation metrics of the task. Section 3 shows the details of our approach. Our experiments will be shown in Section 4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Task. As described in Section 1, the TextGraphs 2020 Shared Task focuses on selecting a set of explanation sentences that can explain the answer of a question, which can be regarded as a ranking task. Concretely, given an elementary science question, its corresponding correct answer and a set of explanation sentences, the goal is to determine whether an explanation sentence is the reason for the QA.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Corpus. The data used in this shared task comes from the WorldTree V2 corpus (Xie et al., 2020) . The dataset includes approximately 4400 standardized elementary and middle school science exam questions (3rd through 9th grade). Each example in the WorldTree V2 corpus contains detailed annotation stating whether a fact is a part of the explanation for that question. For each explanation, the WorldTree V2 corpus also includes annotation for how important each fact is towards the explanation.", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 95, |
| "text": "(Xie et al., 2020)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Evaluation. Explanation reconstruction performance is evaluated in terms of mean average precision (MAP) by comparing the ranked list of facts with the gold explanation. Therefore, it is intuitive for us to regard the task as a ranking problem.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our system consists of two major components. The first part is an information retrieval (IR) system based on the pre-trained language model to retrieve the top-K relevant explanation sentences from the whole knowledge base. The second part consists of two modules, including a pointwise ranking module to rank candidate facts and a graph-based module to counter the problem of multi-hop inference.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Recently, pre-trained language models (Devlin et al., 2018; Liu et al., 2019; Lan et al., 2019; Sun et al., 2020) have achieved state-of-the-art results in various language understanding tasks such as question answering (Rajpurkar et al., 2016; Khashabi et al., 2018) . For our IR system, we use ERNIE 2.0 (Sun et al., 2020) , the world's first model to score over 90 in terms of the macro-average score on GLUE benchmark (Wang et al., 2018) , as our retriever. We concatenate the question, the correct answer and the explanation sentence as input of the retriever which will return a score to determine whether an explanation sentence is relevant to that question. Then for each question, we can get the top-K ranked facts from the corpus. Although a simple tf-idf based retriever can obtain the top-K ranked facts in a shorter time, its result is not very effective compared with the pre-trained model, as shown in Table 1 . Since the pre-trained model already has strong semantic representation capabilities, it can achieve an excellent result on 5000 steps fine-tuning within two hours. Details can be found in Section 4.1.", |
| "cite_spans": [ |
| { |
| "start": 38, |
| "end": 59, |
| "text": "(Devlin et al., 2018;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 60, |
| "end": 77, |
| "text": "Liu et al., 2019;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 78, |
| "end": 95, |
| "text": "Lan et al., 2019;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 96, |
| "end": 113, |
| "text": "Sun et al., 2020)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 220, |
| "end": 244, |
| "text": "(Rajpurkar et al., 2016;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 245, |
| "end": 267, |
| "text": "Khashabi et al., 2018)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 306, |
| "end": 324, |
| "text": "(Sun et al., 2020)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 422, |
| "end": 441, |
| "text": "(Wang et al., 2018)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 917, |
| "end": 924, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Retrieval", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Retriever MAP@top100 Oracle MAP@top100 TF-IDF 25.49% 50.78% Ours 48.80% 92.03% Table 1 : The recall result of different retrievers on the development set. The Oracle MAP@top100 is the upper bound MAP score where all the relevant facts in these 100 candidate facts are ranked first.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 79, |
| "end": 86, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Retrieval", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Our re-ranking component consists of two modules. Since we only fine-tune the retriever for 5000 steps, it can be still improved by the pre-trained model. Therefore, we use another pre-trained model based on ERNIE 2.0 (Sun et al., 2020) to re-rank the candidate explanation sentences from the retrieval stage.", |
| "cite_spans": [ |
| { |
| "start": 218, |
| "end": 236, |
| "text": "(Sun et al., 2020)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We significantly improve the performance on the task, outperforming the retriever by more than 10% of MAP. However, we found that there are many facts that have lexical overlap with the question, but they are not the reason for the QA. On the contrary, some key facts that have no lexical overlap with the question are ranked low. This kind of key facts are usually the explanation of other relevant facts, rather than the direct explanation of the question. Since each sample is only composed of a question, a correct answer and an explanation sentence, it is difficult for the retriever to learn the correlation between the candidate facts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To address this problem, we utilize the graph neural networks (GNNs) to learn the correlation between the candidate facts. Graph neural networks (GNNs) are recursive neural networks for modeling the graph structure. Concretely, the graph structure here is the correlation between the candidate facts. As shown in Figure 1 , E1 explains the word mass for the question directly. E2 explains the word measure for the E1. Therefore, E2 can be regarded as a second order neighbor of the question, and we want to learn such relation with help of GNNs. Modern GNNs follow a neighborhood aggregation strategy, where we iteratively update the representation of a node by aggregating representations of its neighbors. In an attempt to integrate the powerful language understanding ability into graph learning, we present a graph aggregator with pre-trained transformers. Figure 2 shows the details of the architecture. Figure 2 : The overview of the architecture that integrates the powerful language understanding ability into graph learning.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 313, |
| "end": 321, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 861, |
| "end": 869, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 909, |
| "end": 917, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ranking", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As GraphSAGE-like (Hamilton et al., 2017) aggregation function only aggregate neighbors using simple operators, such as sum, mean or max. There's no direct interaction between the center node and each neighbor. In a text graph, node (sentence) interaction should not be limited in node-level (sentencelevel) embedding. It should take the token-level (word-level) interaction between two nodes into account. In an attempt to make token-level interaction possible, we apply ERNIE on the edges of the graph by concatenating raw text tokens of the node pairs on the sampled edges (PGL, 2020). As shown in Figure 2 , instead of obtaining the neigh feature directly from the neighbor sentence, we get the neighbor features by the interaction between the center tokens and the neighbor tokens. Then, the [CLS] embedding will be taken as the neigh feature.", |
| "cite_spans": [ |
| { |
| "start": 18, |
| "end": 41, |
| "text": "(Hamilton et al., 2017)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 601, |
| "end": 610, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ranking", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To train the model we described above, we need to construct the edges between explanation facts. The K candidate explanation sentences for a question and a answer are regarded as nodes to form a graph. Here, edges can be a result of lexical overlap between explanation sentences. But we find that using this method will result in a very dense graph, and hence, each node in the graph is linked with many neighbors and most of them are irrelevant to the QA.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To alleviate this problem, we adopt a pairwise binary classification system, to score the explanation fact pairs in the candidate set for each question. We use the pre-trained language model to judge whether the explanation fact pair is relevant to the question and the answer. If both the two explanation sentences are relevant to the question and the answer, the label is 1, otherwise the label is 0. Then we rank all the explanation fact pairs by the score and select the top-M pairs as the edges of the text graph for each question. We then feed them to the text graph model we described above, and regard it as a node classification task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In this work, the pre-trained language model we used to encode the text is ERNIE 2.0 (Sun et al., 2020) , which is considered to be an expressive powerful model. For all experiments, the learning rate of the ERNIE encoder was initialized to 1e \u22124 , batch size is 64 and the maximum sequence length is 128. We used the Adam optimizer with linear learning rate decay. In the retrieval phase, we fine-tuned the model for 5000 steps on a NVIDIA Tesla V100 (32GB GPU) machine. In the ranking phase, the pre-trained model was fine-tuned for 1 epoch with virtual adversarial training. To generate the edges for applying GNNs with the pre-trained language model to tackle multi-hop inference problem, we fine-tuned the ERNIE model for 5000 steps and selected the top 20 explanation sentence pairs as the edges for each question.", |
| "cite_spans": [ |
| { |
| "start": 85, |
| "end": 103, |
| "text": "(Sun et al., 2020)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Configuration", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For evaluation, we select the top 100 ranked facts from the retrieval phase, and we found that the oracle MAP score can reach 92.03% with top 100 ranked facts, as shown in Table 1 . We concatenate the correct answer choice with the question, because we found that adding the wrong options can mislead the model and leads to a lower MAP score. It is intuitive since the wrong options are not necessary to answer the question.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 172, |
| "end": 179, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Configuration", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We report the tf-idf based ranking scores as the baseline. From the Table 2 we can see that, though the tfidf method can quickly score all the facts, its MAP score is very low compared with the ERNIE retriever. The ERNIE Re-ranker can significantly improve the performance on the task, which outperforms the retriever for more than 10% points of MAP score. The ERNIE graphsage model can also improve the performance of the Re-ranker. To further improve the performance on the leaderboard, we run our ERNIE Re-ranker model for three times and then ensemble them to get a better performance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 68, |
| "end": 75, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The performance of hidden test set of our final model is shown in Table 3 . Our submission achieved the first place in TextGraphs 2020 Shared Task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 66, |
| "end": 73, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "MAP@top50 MAP@top100 ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "We proposed our approach to the shared task on \"Multi-hop Inference Explanation Regeneration\". Our system consists of a pre-trained model-based retriever and a graph-based pre-trained model for the reranking phase, and achieved the first place in TextGraphs 2020 Shared Task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirec- tional transformers for language understanding. arXiv preprint arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Inductive representation learning on large graphs", |
| "authors": [ |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Hamilton", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhitao", |
| "middle": [], |
| "last": "Ying", |
| "suffix": "" |
| }, |
| { |
| "first": "Jure", |
| "middle": [], |
| "last": "Leskovec", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1024--1034", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Will Hamilton, Zhitao Ying, and Jure Leskovec. 2017. Inductive representation learning on large graphs. In Advances in neural information processing systems, pages 1024-1034.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "TextGraphs 2020 Shared Task on Multi-Hop Inference for Explanation Regeneration", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Dmitry", |
| "middle": [], |
| "last": "Ustalov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Graph-based Methods for Natural Language Processing (TextGraphs)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Jansen and Dmitry Ustalov. 2020. TextGraphs 2020 Shared Task on Multi-Hop Inference for Explanation Regeneration. In Proceedings of the Graph-based Methods for Natural Language Processing (TextGraphs). Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Multi-hop inference for sentence-level textgraphs: How challenging is meaningfully combining information for science question answering?", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1805.11267" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Jansen. 2018. Multi-hop inference for sentence-level textgraphs: How challenging is meaningfully combin- ing information for science question answering? arXiv preprint arXiv:1805.11267.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Looking beyond the surface: A challenge set for reading comprehension over multiple sentences", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Khashabi", |
| "suffix": "" |
| }, |
| { |
| "first": "Snigdha", |
| "middle": [], |
| "last": "Chaturvedi", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| }, |
| { |
| "first": "Shyam", |
| "middle": [], |
| "last": "Upadhyay", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "252--262", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Khashabi, Snigdha Chaturvedi, Michael Roth, Shyam Upadhyay, and Dan Roth. 2018. Looking beyond the surface: A challenge set for reading comprehension over multiple sentences. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 252-262.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Semi-supervised classification with graph convolutional networks", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Kipf", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Welling", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.02907" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas N Kipf and Max Welling. 2016. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Albert: A lite bert for self-supervised learning of language representations", |
| "authors": [ |
| { |
| "first": "Zhenzhong", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingda", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learning of language representations. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Roberta: A robustly optimized bert pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Erniesage: Ernie sample aggregate", |
| "authors": [ |
| { |
| "first": "Pgl", |
| "middle": [], |
| "last": "Team", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Team PGL. 2020. Erniesage: Ernie sample aggregate. https://github.com/PaddlePaddle/PGL/ tree/master/examples/erniesage.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Squad: 100,000+ questions for machine comprehension of text", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Konstantin", |
| "middle": [], |
| "last": "Lopyrev", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2383--2392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2383-2392.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Ernie 2.0: A continual pre-training framework for language understanding", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuohuan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu-Kun", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Shikun", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Hao Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "8968--8975", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu Sun, Shuohuan Wang, Yu-Kun Li, Shikun Feng, Hao Tian, Hua Wu, and Haifeng Wang. 2020. Ernie 2.0: A continual pre-training framework for language understanding. In AAAI, pages 8968-8975.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Virtual adversarial training: A regularization method for supervised and semi-supervised learning", |
| "authors": [ |
| { |
| "first": "Miyato", |
| "middle": [], |
| "last": "Takeru", |
| "suffix": "" |
| }, |
| { |
| "first": "Maeda", |
| "middle": [], |
| "last": "Shin-Ichi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ishii", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| }, |
| { |
| "first": "Koyama", |
| "middle": [], |
| "last": "Masanori", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "1--1", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Miyato Takeru, Maeda Shin-Ichi, Ishii Shin, and Koyama Masanori. 2018. Virtual adversarial training: A reg- ularization method for supervised and semi-supervised learning. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Glue: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "353--355", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2018. Glue: A multi-task benchmark and analysis platform for natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 353-355.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "WorldTree v2: A corpus of science-domain structured explanations and inference patterns supporting multi-hop inference", |
| "authors": [ |
| { |
| "first": "Zhengnan", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Thiem", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaycie", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Wainwright", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Marmorstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "5456--5473", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhengnan Xie, Sebastian Thiem, Jaycie Martin, Elizabeth Wainwright, Steven Marmorstein, and Peter Jansen. 2020. WorldTree v2: A corpus of science-domain structured explanations and inference patterns supporting multi-hop inference. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 5456- 5473, Marseille, France, May. European Language Resources Association.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "k=1" |
| }, |
| "TABREF0": { |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td/><td/><td colspan=\"2\">Center Feature</td><td/></tr><tr><td/><td colspan=\"2\">Self Feature</td><td>Neigh Feature</td><td/></tr><tr><td/><td/><td/><td/><td>C</td><td>Neighbor Context</td><td>S</td><td>Center Context</td><td>S</td></tr><tr><td/><td/><td/><td>C</td><td colspan=\"2\">Neighbor Context</td><td>S</td><td>Center Context</td><td>S</td></tr><tr><td>C</td><td>Node Context</td><td>S</td><td/><td/></tr><tr><td/><td/><td/><td/><td/><td>\u2026</td></tr><tr><td/><td/><td>\u2026</td><td/><td/><td>ERNIE \u2026</td></tr><tr><td/><td>ERNIE</td><td>\u2026</td><td/><td/><td>ERNIE</td><td>\u2026</td></tr><tr><td/><td/><td/><td/><td/><td>\u2026</td></tr><tr><td/><td/><td/><td/><td>[CLS ]</td><td>Neighbor Tokens</td><td>[SEP ]</td><td>Center Tokens</td><td>[SEP]</td></tr><tr><td/><td/><td/><td>[CLS]</td><td colspan=\"2\">Neighbor Tokens</td><td>Position Embeddings [SEP] Center Tokens</td><td>[SEP]</td></tr><tr><td/><td/><td/><td/><td/><td>Sentence Embedding Position Embeddings</td></tr><tr><td/><td/><td/><td/><td/><td>Sentence Embedding</td></tr></table>", |
| "html": null, |
| "text": "" |
| }, |
| "TABREF2": { |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"2\">Model/participant MAP</td></tr><tr><td>Our model</td><td>60.33%</td></tr><tr><td>alvysinger</td><td>58.43%</td></tr><tr><td>aisys</td><td>52.33%</td></tr></table>", |
| "html": null, |
| "text": "MAP score on the development set." |
| }, |
| "TABREF3": { |
| "num": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null, |
| "text": "MAP score on the test set." |
| } |
| } |
| } |
| } |