| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:31:19.894190Z" |
| }, |
| "title": "Grounded Dialogue Generation with Cross-encoding Re-ranker, Grounding Span Prediction, and Passage Dropout", |
| "authors": [ |
| { |
| "first": "Kun", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Chinese University of Hong Kong", |
| "location": { |
| "settlement": "Hong Kong SAR", |
| "country": "China" |
| } |
| }, |
| "email": "kunli@se.cuhk.edu.hk" |
| }, |
| { |
| "first": "Tianhua", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "thzhang@cpii.hk" |
| }, |
| { |
| "first": "Liping", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "lptang@cpii.hk" |
| }, |
| { |
| "first": "Junan", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "jali@cpii.hk" |
| }, |
| { |
| "first": "Hongyuan", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Chinese University of Hong Kong", |
| "location": { |
| "settlement": "Hong Kong SAR", |
| "country": "China" |
| } |
| }, |
| "email": "hylu@se.cuhk.edu.hk" |
| }, |
| { |
| "first": "Xixin", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Chinese University of Hong Kong", |
| "location": { |
| "settlement": "Hong Kong SAR", |
| "country": "China" |
| } |
| }, |
| "email": "xixinwu@cuhk.edu.hk" |
| }, |
| { |
| "first": "Helen", |
| "middle": [], |
| "last": "Meng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Chinese University of Hong Kong", |
| "location": { |
| "settlement": "Hong Kong SAR", |
| "country": "China" |
| } |
| }, |
| "email": "hmmeng@se.cuhk.edu.hk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "MultiDoc2Dial presents an important challenge on modeling dialogues grounded with multiple documents. This paper proposes a pipeline system of \"retrieve, re-rank, and generate\", where each component is individually optimized. This enables the passage re-ranker and response generator to fully exploit training with groundtruth data. Furthermore, we use a deep crossencoder trained with localized hard negative passages from the retriever. For the response generator, we use grounding span prediction as an auxiliary task to be jointly trained with the main task of response generation. We also adopt a passage dropout and regularization technique to improve response generation performance. Experimental results indicate that the system clearly surpasses the competitive baseline and our team CPII-NLP ranked 1st among the public submissions on ALL four leaderboards based on the sum of F1, SacreBLEU, METEOR and RougeL scores.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "MultiDoc2Dial presents an important challenge on modeling dialogues grounded with multiple documents. This paper proposes a pipeline system of \"retrieve, re-rank, and generate\", where each component is individually optimized. This enables the passage re-ranker and response generator to fully exploit training with groundtruth data. Furthermore, we use a deep crossencoder trained with localized hard negative passages from the retriever. For the response generator, we use grounding span prediction as an auxiliary task to be jointly trained with the main task of response generation. We also adopt a passage dropout and regularization technique to improve response generation performance. Experimental results indicate that the system clearly surpasses the competitive baseline and our team CPII-NLP ranked 1st among the public submissions on ALL four leaderboards based on the sum of F1, SacreBLEU, METEOR and RougeL scores.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The task of developing information-seeking dialogue systems has seen many recent research advancements. The goal is to answer users' questions grounded on documents in a conversational manner. MultiDoc2Dial 1 is a realistic task proposed by Feng et al. (2021) to model goal-oriented information-seeking dialogues that are grounded on multiple documents and participants are required to generate appropriate responses towards users' utterances according to the documents. To facilitate this task, the authors also propose a new dataset that contains dialogues grounded in multiple documents from four domains. Unlike previous work that mostly describe document-grounded dialogue modeling as a machine reading comprehension task based on one particular document or passage, the MultiDoc2Dial involves multiple topics within a conversation, hence it is grounded on different documents. The task contains two sub-tasks: Grounding Span Prediction aims to find the most relevant span from multiple documents for the next agent response, and Agent Response Generation generates the next agent response. This paper focuses on our work in to the second sub-task, and presents three major findings and contributions:", |
| "cite_spans": [ |
| { |
| "start": 241, |
| "end": 259, |
| "text": "Feng et al. (2021)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 In order to fully leverage the ground-truth training data, we propose to individually optimize the retriever, re-ranker, and response generator.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose to adopt a deep cross-encoded re-ranker that is trained with localized hard negatives sampled from the retriever results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose to use grounding span prediction as an auxiliary task for the generator and use passage dropout as a regularization technique to improve the generation performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Experimental results indicate that our proposed system achieves a performance with marked improvement over the strong baseline.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Open-domain Question Answering systems have evolved to adopt the popular \"Retriever-Reader (Generator)\" architecture since DrQA (Chen et al., 2017) . Previous work , Guu et al., 2020 adopt end-to-end training strategy to jointly learn the retriever and reader with question-answer pairs. Retrieval-augmented Generation (RAG) (Lewis et al., 2020b) uses Dense Passage Retriever (DPR) as the retriever to extract multiple documents related to the query and feed them into a BART (Lewis et al., 2020a) generator for answer generation. Izacard and Grave (2021) proposed the Fusion-in-Decoder method which processes passages individually in the encoder but jointly in the decoder, surpassing the performance of RAG. Other work like QuAC (Choi et al., 2018) , ShARC (Saeidi et al., 2018) and CoQA (Reddy et al., 2019) focus on the machine reading comprehension task, which assumes that the associated document is given. In particular, Feng et al. (2020) proposed the Doc2Dial task ,which aims to extract the related span from the given documents for generating the corresponding answer.", |
| "cite_spans": [ |
| { |
| "start": 128, |
| "end": 147, |
| "text": "(Chen et al., 2017)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 164, |
| "end": 182, |
| "text": ", Guu et al., 2020", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 325, |
| "end": 346, |
| "text": "(Lewis et al., 2020b)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 476, |
| "end": 497, |
| "text": "(Lewis et al., 2020a)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 531, |
| "end": 555, |
| "text": "Izacard and Grave (2021)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 731, |
| "end": 750, |
| "text": "(Choi et al., 2018)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 759, |
| "end": 780, |
| "text": "(Saeidi et al., 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 790, |
| "end": 810, |
| "text": "(Reddy et al., 2019)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 928, |
| "end": 946, |
| "text": "Feng et al. (2020)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The MultiDoc2Dial task aims to generate an appropriate response R based on an input query Q (the current user turn u T and the concatenated dialogue history {u T \u22121", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "1 } := u 1 , u 2 , ..., u T \u22121 ) and a collection of passages {P i } M i=1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The passages are extracted from documents based on document structural information indicated by markup tags in the original HTML file. The organizer splits the MultiDoc2Dial data into train, validation, development and test set, and results on the latter two are evaluated through the leaderboard 2 . The validation, development and test set contain two settings: seen and unseen, which is categorized based on whether there are dialogues grounded on the documents seen/unseen during training. We leave detailed dataset description in Appendix A.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We propose a pipeline system of \"retrieve, re-rank, and generate\". Following previous work in Lewis et al. (2020b) ; Feng et al. (2021) , we adopt DPR as the retriever ( \u00a74.1) to efficiently filter out irrelevant passages and narrow the search space. We then refine the retrieval results with a deep cross-encoder ( \u00a74.2) trained with localized negatives (Gao et al., 2021) . We introduce a passage dropout and regularization technique to enhance the robustness of the generator ( \u00a74.3) and use the grounding span prediction as an auxiliary task. Further more, pipeline training is adopted where each component is individually optimized to fully utilize the supervision. Experimental results ( \u00a75.3) also indicate the effectiveness and merits of the training strategy, which we observed to be a key factor for the performance gain. ", |
| "cite_spans": [ |
| { |
| "start": 94, |
| "end": 114, |
| "text": "Lewis et al. (2020b)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 117, |
| "end": 135, |
| "text": "Feng et al. (2021)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 355, |
| "end": 373, |
| "text": "(Gao et al., 2021)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Following Feng et al. 2021, we adopt DPR as the retriever with a representation-based bi-encoder, that is, a dialogue query encoder q(\u2022) and a passage context encoder p(\u2022). Given an input query Q and a collection of", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Passage Retrieval", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "passages {P i } M i=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Passage Retrieval", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": ", we extract the query encoding as q(Q) and the passage encoding as p(P i ). The similarity is defined as the dot product of the two vectors \u27e8q(Q), p(P i )\u27e9 and the model is trained to optimize the negative log likelihood of the positive passage among L in-batch and hard negatives. We then pre-compute the representations of all passages and index them offline. Maximum Inner Product Search (MIPS) with Faiss (Johnson et al., 2017 ) is adopted to retrieve the top-K passages during inference.", |
| "cite_spans": [ |
| { |
| "start": 410, |
| "end": 431, |
| "text": "(Johnson et al., 2017", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Passage Retrieval", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "To re-rank the passages retrieved by DPR, we use a BERT-based cross-encoder that exploits localized negatives sampled from DPR results (Gao et al., 2021) . This means that the construction of the training set for the re-ranker is based on the top negative passages retrieved by the DPR. Specifically, given a query Q, its corresponding ground truth passage P + , and its top-N negative passages {P \u2212 j } N j=1 retrieved by DPR, we first calculate a deep distance function for each positive and negative passage against the query:", |
| "cite_spans": [ |
| { |
| "start": 135, |
| "end": 153, |
| "text": "(Gao et al., 2021)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Passage Re-ranking", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "dist(Q, P) = v T cls(BERT(concat(Q, P))),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Passage Re-ranking", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "(1) where v represents a trainable vector, cls extracts the [CLS] vector from BERT. Consequently, such a distance function is deeply cross-encoded, as we feed the concatenation of the query and the passage into the model instead of encoding them individually with a representation-based bi-encoder (Feng et al., 2021) . We then apply a contrastive loss:", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 317, |
| "text": "(Feng et al., 2021)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Passage Re-ranking", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L c = \u2212 log exp(dist(Q, P + )) P\u2208P \u00b1 exp(dist(Q, P)) ,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Passage Re-ranking", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Passage Re-ranking", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "P \u00b1 represents P + \u222a {P \u2212 i } N i=1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Passage Re-ranking", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Here, it is important to condition the gradient on the negative passages to learn to recognize the positive passage from hard negatives retrieved by the DPR. 3 Ensemble We create an ensemble of three pretrained models (Dietterich, 2000) , namely, BERT (Devlin et al., 2019) , RoBERTa (Liu et al., 2019) , and ELECTRA (Clark et al., 2020) for re-ranking. We first calculate their distance function with Equation 1, with the output scores denoted as O B , O R , and O E . We define the final scores O as the weighted summation of the above three scores:", |
| "cite_spans": [ |
| { |
| "start": 153, |
| "end": 159, |
| "text": "DPR. 3", |
| "ref_id": null |
| }, |
| { |
| "start": 218, |
| "end": 236, |
| "text": "(Dietterich, 2000)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 252, |
| "end": 273, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 284, |
| "end": 302, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 317, |
| "end": 337, |
| "text": "(Clark et al., 2020)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Passage Re-ranking", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "O = \u03b1O B + \u03b2O R + \u03b3O E ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Passage Re-ranking", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where \u03b1, \u03b2, and \u03b3 represent the weight hyperparameters for each model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Passage Re-ranking", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For response generation, we leverage the pretrained sequence-to-sequence model BART large (Lewis et al., 2020a) , where the encoder is fed the concatenation of a query and a passage [Q, P] , and the decoder is then required to generate the corresponding response R. We use the ground truth passage as P for training. The training process can be summarized as follows:", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 111, |
| "text": "(Lewis et al., 2020a)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 182, |
| "end": 188, |
| "text": "[Q, P]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Response Generation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Joint Training with Grounding Prediction The grounding span in a passage is the supporting evidence for the response, which can provide helpful information for response generation. Therefore, we take grounding span prediction as the auxiliary task and apply multi-task learning for model training. Specifically, the passage is first encoded into a sequence of hidden representations", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Response Generation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "h i = Encoder([Q, P]), i \u2208 {1, ..., |P|}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Response Generation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Then a classifier outputs the probability of the i-th token of P to lie within the grounding span as P (y i |Q, P) = sigmoid(MLP(h i )). We define this task's training objective as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Response Generation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L G = \u2212 |P| i=1 logP (y i |Q, P).", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Response Generation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Passage Dropout and Regularization Preliminary experiments indicate that the generator is prone to overfit to some passages quoted frequently in the train set, which may cause generalization errors when applied to previously unseen passages. Hence, we apply passage dropout to enhance the robustness of the generator. In details, for a training sample ([Q, P], R), a consecutive span with a specified length (of 25% in our experiments) in P is randomly selected and then dropped, which produces P \u2032 . It is noteworthy that passage dropout is required to avoid truncating content of grounding spans. 4 Furthermore, we repeat passage dropout twice for each sample in a batch, and obtain ([Q, P \u2032 ], R) as well as ([Q, P \u2032\u2032 ], R). Since the grounding span in a passage serves as the oracle for response generation, the two modified inputs should have similar prediction distribution, denoted as P (r i |Q, P \u2032 , r <i ) and P (r i |Q, P \u2032\u2032 , r <i ), where r i is the i-th token of R. Hence, inspired by Liang et al. (2021) , we propose to regularize the predictions from different passage dropouts by minimizing the bidirectional Kullback-Leibler (KL) divergence between these two different output distributions as L KL :", |
| "cite_spans": [ |
| { |
| "start": 999, |
| "end": 1018, |
| "text": "Liang et al. (2021)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Response Generation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "i (KL(P (r i |Q, P \u2032 , r <i )\u2225P (r i |Q, P \u2032\u2032 , r <i )) + KL(P (r i |Q, P \u2032\u2032 , r <i )\u2225P (r i |Q, P \u2032 , r <i ))).", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Response Generation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We define the training objective for response R as the basic negative log-likelihood:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Response Generation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "L N LL = \u2212 i (logP (r i |Q, P \u2032 , r <i ) + logP (r i |Q, P \u2032\u2032 , r <i )). (6)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Response Generation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "With passage dropout, the learning objective of grounding prediction (Eq.4) is updated for P \u2032 and P \u2032\u2032 . Then we have the final training objective:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Response Generation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L = 1 2 L KL + L N LL + L G .", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Response Generation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "After the re-ranker returns the top-5 passages corresponding to the query Q, we filter out the passages with a low re-ranking score (Eq.3), namely, the ones that have a score gap of over 0. predicts a response R given the input [Q, P]. 5 We employ beam-search (beam width=5) during decoding.", |
| "cite_spans": [ |
| { |
| "start": 236, |
| "end": 237, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We evaluate the passage retrieval results with recall (R) and mean reciprocal rank (MRR). We report response generation performance based on F1, Exact Match (EM) (Rajpurkar et al., 2016) , SacreBLEU (S-BLEU; Post, 2018), and RougeL (Lin, 2004) . Table 1 shows the results we obtain for each data split, each including the seen and unseen settings. RAG (Lewis et al., 2020b) is the baseline adopted by the organizer, and we reproduce it with a more aggressive setting (e.g., a greater input length and beam size), in order to have a fair comparison with the proposed approach. Our generator is a single model. Table 1 shows that the proposed approach consistently outperforms the baseline with significant gaps. We argue that the improvement is derived from (1) high-quality retrieval, (2) stronger generator and (3) pipeline-based training, which will be discussed in the following sections.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 186, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 232, |
| "end": 243, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 352, |
| "end": 373, |
| "text": "(Lewis et al., 2020b)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 246, |
| "end": 253, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 609, |
| "end": 616, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Since the passage supervision of the development and test data is unavailable and the leaderboards do not provide the retrieval scores, we analyze the passage retrieval performance on the validation set 6 as shown in Table 2 . The baseline adopts DPR as retriever, and we evaluate both the official and our reproduced versions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 217, |
| "end": 224, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Retrieval Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "MRR@5 R@1 R@5 MRR@5 R@1 R@5", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method seen unseen", |
| "sec_num": null |
| }, |
| { |
| "text": "Official Introducing the re-ranker gave marked improvement for all three pre-trained models, especially when applied to the unseen passages. In particular, RoBERTa achieves 53.5% and 126.6% improvement over the Reproduced DPR at R@1 on the seen and unseen settings respectively. The ensemble of different re-rankers brings further improvement -E(B, E, R) exceeds the best single re-ranker by around 0.01 across all metrics on the seen data. Furthermore, improved retrieval directly enhances the final task results. Besides a more powerful generator, the large gap between RAG and our approach on the unseen Val data in Table 1 may also be attributed to the great performance gain on passage retrieval, from 0.248 to 0.62 on R@1. Table 3 shows that each component in our approach contributes to improvement. Passage dropout and regularization bring notable performance gains for the unseen setting. This demonstrates robustness in the generator, which is important in practical use.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 619, |
| "end": 626, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 729, |
| "end": 736, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method seen unseen", |
| "sec_num": null |
| }, |
| { |
| "text": "To investigate the merits of pipeline training on generation, we separate the BART large generator from other parts in the reproduced RAG. We input queries combined with the passages returned by the re-reranker for inference. The first and sec- Table 3 : Ablation analysis of the generators based on the validation set. BART in the RAG denotes the generator in the fully-trained RAG. The same retrieval is used in all cases. S-BLEU represents SacreBLEU.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 245, |
| "end": 252, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation Study on the Generator", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "ond rows of Table 3 show that the BART in the RAG gained some improvement through better retrieval, but remains inferior to the BART trained in a pipeline fashion. This is mainly attributed by the fact that under the end-to-end training framework of the RAG, the generator could receive some deteriorated query-passage pairs during training, if the retriever can not successfully return gold passages to the generator. Contrarily, pipeline training for the generator can make full use of training data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation Study on the Generator", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "This paper presents a pipeline system of \"retrieve, re-rank, and generate\" for the MultiDoc2Dial challenge. The advantage is that each of the three components can fully exploit the ground-truth training data. We apply a deep cross-encoder architecture where we create a training set using localized hard negatives sampled from the retriever results. We adopt grounding span prediction as an auxiliary task to be jointly trained with the response generator. We also apply passage dropout and regularization to improve response generation performance. Experimental results indicate that the proposed system improves over a strong, competitive baseline and our team got 1st place on ALL four leaderboards.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://eval.ai/web/challenges/challengepage/1437/leaderboard", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Feng et al. (2021) found that there exists passages that are similar to one another in the dataset. Therefore, it is intuitively important to distinguish these hard negative passages from the ground truth passage. Empirically, we also found that excluding hard negative passages from the training process hampers the re-ranking performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "If the selected span overlaps with a grounding span, this sampling is discarded and another span would be sampled.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Grounding Prediction and passage dropout are not implemented in the inference phrase.6 We evaluate on a cleaned validation set where repeated queries are removed, resulting in 4181 unique instances (cf. 4201 originally) and 121 unique instances (cf. 121 originally) in the seen and unseen settings respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work is partially supported by the Centre for Perceptual and Interactive Intelligence (CPII) Ltd., a CUHK-led under the InnoHK scheme of Innovation and Technology Commission; and in part by the HKSAR RGC GRF (Ref No. 14207619).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| }, |
| { |
| "text": "Instance Table 4 : Data statistics of different splits. We split a single conversation into multiple instances of the train and validation set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 9, |
| "end": 16, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": null |
| }, |
| { |
| "text": "MultiDoc2Dial contains 4796 conversations with an average of 14 turns grounded in 488 documents from four domains. After splitting, the number of passages in the seen set is M = 4110 for the official data pre-processing and M = 3820 for our processed data to remove duplicate passages. Similarly, the number of passages in the unseen set is M = 963. Table 4 shows the statistics of dataset in different splits.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 350, |
| "end": 357, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Dataset Description", |
| "sec_num": null |
| }, |
| { |
| "text": "Our implementations of DPR, BERT, RoBERTa, ELECTRA, and BART are based on the Transformers library (Wolf et al., 2019) . All the models are trained on an RTX 3090 GPU with 24GB VRAM.Retriever We train the retriever on our preprocessed MultiDoc2Dial data with an effective batch size of 16 following Facebook DPR and the corresponding results are shown in Table 2 named as Reproduced DPR. The Official DPR in Table 2 is fine-tuned with a batch size 128 by the organizer.Re-ranker Three public pre-trained language models are ensembled, namely, deepset/bertlarge-uncased-whole-word-masking-squad2 7 , deepset/roberta-large-squad2 8 and deepset/electrabase-squad2 9 . We train the models with a batch size 1 for LARGE (gradient accumulation=4) and 4 for BASE. We use 6 epochs, a learning rate of 1e-5 and weight decay of 0.01. The maximum length of query, i.e., the concatenated dialogue history {u T \u22121 1 } and the current user turn u T is set as 128. Following Feng et al. (2021) , the query is 7 https://huggingface.co/deepset/bert-large-uncasedwhole-word-masking-squad28 https://huggingface.co/deepset/roberta-large-squad2 9 https://huggingface.co/deepset/electra-base-squad2 constructed using reverse conversation order as u T [SEP ]agent : u T \u22121 ||user : u T \u22122 ||...||user : u 1 and truncated from the tail by the tokenizers. The number of localized negatives in training is 7, sampled from Top-N (N=50) returned negative passages from retriever. During inference, re-ranker re-scores Top-K (K=100) returned passage candidates from retriever and selects the Top-5 passages for generator.", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 118, |
| "text": "(Wolf et al., 2019)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 960, |
| "end": 978, |
| "text": "Feng et al. (2021)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 355, |
| "end": 362, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 408, |
| "end": 415, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "B Implementation Details", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Reading Wikipedia to answer opendomain questions", |
| "authors": [ |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Fisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1870--1879", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1171" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danqi Chen, Adam Fisch, Jason Weston, and Antoine Bordes. 2017. Reading Wikipedia to answer open- domain questions. In Proceedings of the 55th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), pages 1870-1879, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Quac: Question answering in context", |
| "authors": [ |
| { |
| "first": "Eunsol", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "He", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Yatskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Wentau", |
| "middle": [], |
| "last": "Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2174--2184", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/d18-1241" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eunsol Choi, He He, Mohit Iyyer, Mark Yatskar, Wen- tau Yih, Yejin Choi, Percy Liang, and Luke Zettle- moyer. 2018. Quac: Question answering in context. In Proceedings of the 2018 Conference on Empiri- cal Methods in Natural Language Processing, Brus- sels, Belgium, October 31 -November 4, 2018, pages 2174-2184. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Pre-training transformers as energy-based cloze models", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Clark, Minh-Thang Luong, Quoc V. Le, and Christopher D. Manning. 2020. Pre-training trans- formers as energy-based cloze models. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Ensemble methods in machine learning", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dietterich", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Multiple Classifier Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1--15", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas G. Dietterich. 2000. Ensemble methods in ma- chine learning. In Multiple Classifier Systems, pages 1-15, Berlin, Heidelberg. Springer Berlin Heidelberg.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "MultiDoc2Dial: Modeling dialogues grounded in multiple documents", |
| "authors": [ |
| { |
| "first": "Song", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Sankalp", |
| "middle": [], |
| "last": "Siva", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Patel", |
| "suffix": "" |
| }, |
| { |
| "first": "Sachindra", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "6162--6176", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.emnlp-main.498" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Song Feng, Siva Sankalp Patel, Hui Wan, and Sachindra Joshi. 2021. MultiDoc2Dial: Modeling dialogues grounded in multiple documents. In Proceedings of the 2021 Conference on Empirical Methods in Natu- ral Language Processing, pages 6162-6176, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "2020. doc2dial: A goal-oriented documentgrounded dialogue dataset", |
| "authors": [ |
| { |
| "first": "Song", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "Chulaka" |
| ], |
| "last": "Hui Wan", |
| "suffix": "" |
| }, |
| { |
| "first": "Siva Sankalp", |
| "middle": [], |
| "last": "Gunasekara", |
| "suffix": "" |
| }, |
| { |
| "first": "Sachindra", |
| "middle": [], |
| "last": "Patel", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [ |
| "A" |
| ], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lastras", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "2020", |
| "issue": "", |
| "pages": "8118--8128", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.652" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Song Feng, Hui Wan, R. Chulaka Gunasekara, Siva Sankalp Patel, Sachindra Joshi, and Luis A. Lastras. 2020. doc2dial: A goal-oriented document- grounded dialogue dataset. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, EMNLP 2020, Online, Novem- ber 16-20, 2020, pages 8118-8128. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Rethink training of BERT rerankers in multi-stage retrieval pipeline", |
| "authors": [ |
| { |
| "first": "Luyu", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhuyun", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Callan", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Advances in Information Retrieval -43rd European Conference on IR Research, ECIR 2021, Virtual Event", |
| "volume": "II", |
| "issue": "", |
| "pages": "280--286", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-3-030-72240-1_26" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luyu Gao, Zhuyun Dai, and Jamie Callan. 2021. Re- think training of BERT rerankers in multi-stage re- trieval pipeline. In Advances in Information Retrieval -43rd European Conference on IR Research, ECIR 2021, Virtual Event, March 28 -April 1, 2021, Pro- ceedings, Part II, volume 12657 of Lecture Notes in Computer Science, pages 280-286. Springer.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "REALM: retrievalaugmented language model pre-training", |
| "authors": [ |
| { |
| "first": "Kelvin", |
| "middle": [], |
| "last": "Guu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Zora", |
| "middle": [], |
| "last": "Tung", |
| "suffix": "" |
| }, |
| { |
| "first": "Panupong", |
| "middle": [], |
| "last": "Pasupat", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasu- pat, and Ming-Wei Chang. 2020. REALM: retrieval- augmented language model pre-training. CoRR, abs/2002.08909.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Leveraging passage retrieval with generative models for open domain question answering", |
| "authors": [ |
| { |
| "first": "Gautier", |
| "middle": [], |
| "last": "Izacard", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 16th", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.eacl-main.74" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gautier Izacard and Edouard Grave. 2021. Leveraging passage retrieval with generative models for open do- main question answering. In Proceedings of the 16th", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, EACL 2021", |
| "authors": [], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "874--880", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Conference of the European Chapter of the Associ- ation for Computational Linguistics: Main Volume, EACL 2021, Online, April 19 -23, 2021, pages 874- 880. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Billion-scale similarity search with GPUs. arXiv e-prints", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthijs", |
| "middle": [], |
| "last": "Douze", |
| "suffix": "" |
| }, |
| { |
| "first": "Herv\u00e9", |
| "middle": [], |
| "last": "J\u00e9gou", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1702.08734" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeff Johnson, Matthijs Douze, and Herv\u00e9 J\u00e9gou. 2017. Billion-scale similarity search with GPUs. arXiv e-prints, page arXiv:1702.08734.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Dense passage retrieval for opendomain question answering", |
| "authors": [ |
| { |
| "first": "Vladimir", |
| "middle": [], |
| "last": "Karpukhin", |
| "suffix": "" |
| }, |
| { |
| "first": "Barlas", |
| "middle": [], |
| "last": "Oguz", |
| "suffix": "" |
| }, |
| { |
| "first": "Sewon", |
| "middle": [], |
| "last": "Min", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Ledell", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen-Tau", |
| "middle": [], |
| "last": "Yih", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "6769--6781", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.550" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020. Dense passage retrieval for open- domain question answering. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6769-6781, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Latent retrieval for weakly supervised open domain question answering", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. 2019. Latent retrieval for weakly supervised open domain question answering. CoRR, abs/1906.00300.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "BART: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Marjan", |
| "middle": [], |
| "last": "Ghazvininejad", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdelrahman", |
| "middle": [], |
| "last": "Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2020", |
| "issue": "", |
| "pages": "7871--7880", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.703" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020a. BART: denoising sequence-to-sequence pre-training for natural language generation, translation, and com- prehension. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, pages 7871-7880. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Retrieval-augmented generation for knowledge-intensive NLP tasks", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "H" |
| ], |
| "last": "Patrick", |
| "suffix": "" |
| }, |
| { |
| "first": "Ethan", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Aleksandra", |
| "middle": [], |
| "last": "Perez", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Piktus", |
| "suffix": "" |
| }, |
| { |
| "first": "Vladimir", |
| "middle": [], |
| "last": "Petroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Karpukhin", |
| "suffix": "" |
| }, |
| { |
| "first": "Heinrich", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "K\u00fcttler", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen-Tau", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Rockt\u00e4schel", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick S. H. Lewis, Ethan Perez, Aleksandra Pik- tus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\u00fcttler, Mike Lewis, Wen-tau Yih, Tim Rockt\u00e4schel, Sebastian Riedel, and Douwe Kiela. 2020b. Retrieval-augmented generation for knowledge-intensive NLP tasks. In Advances in Neu- ral Information Processing Systems 33: Annual Con- ference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "R-drop: Regularized dropout for neural networks", |
| "authors": [ |
| { |
| "first": "Xiaobo", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lijun", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Juntao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Meng", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaobo Liang, Lijun Wu, Juntao Li, Yue Wang, Qi Meng, Tao Qin, Wei Chen, M. Zhang, and Tie- Yan Liu. 2021. R-drop: Regularized dropout for neural networks. In Advances in Neural Information Processing Systems.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "ROUGE: A package for automatic evaluation of summaries", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Text Summarization Branches Out", |
| "volume": "", |
| "issue": "", |
| "pages": "74--81", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. 2004. ROUGE: A package for auto- matic evaluation of summaries. In Text Summariza- tion Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv e-prints", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv e-prints, page arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A call for clarity in reporting BLEU scores", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "186--191", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6319" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186- 191, Brussels, Belgium. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "SQuAD: 100,000+ questions for machine comprehension of text", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Konstantin", |
| "middle": [], |
| "last": "Lopyrev", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2383--2392", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1264" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 2383-2392, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Coqa: A conversational question answering challenge", |
| "authors": [ |
| { |
| "first": "Siva", |
| "middle": [], |
| "last": "Reddy", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Trans. Assoc. Comput. Linguistics", |
| "volume": "7", |
| "issue": "", |
| "pages": "249--266", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siva Reddy, Danqi Chen, and Christopher D. Manning. 2019. Coqa: A conversational question answering challenge. Trans. Assoc. Comput. Linguistics, 7:249- 266.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Interpretation of natural language rules in conversational machine reading", |
| "authors": [ |
| { |
| "first": "Marzieh", |
| "middle": [], |
| "last": "Saeidi", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Bartolo", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "H" |
| ], |
| "last": "Patrick", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Rockt\u00e4schel", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Sheldon", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Bouchard", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2087--2097", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/d18-1233" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marzieh Saeidi, Max Bartolo, Patrick S. H. Lewis, Sameer Singh, Tim Rockt\u00e4schel, Mike Sheldon, Guil- laume Bouchard, and Sebastian Riedel. 2018. Inter- pretation of natural language rules in conversational machine reading. In Proceedings of the 2018 Con- ference on Empirical Methods in Natural Language Processing, Brussels, Belgium, October 31 -Novem- ber 4, 2018, pages 2087-2097. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Huggingface's transformers: State-of-the-art natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9mi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Brew", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtowicz, and Jamie Brew. 2019. Huggingface's transformers: State-of-the-art natural language processing. CoRR, abs/1910.03771.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "Training process of our generator.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "Comparison between the baseline and the proposed framework on the validation, development and test set. The scores with * are cited from the leaderboard. S-BLEU represents SacreBLEU.", |
| "content": "<table/>" |
| }, |
| "TABREF3": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "Retrieval performance on the MultiDoc2Dial validation set. All models are fine-tuned using the training set only. * indicates the model trained on the official pre-processed data; others are trained on our preprocessed version. E(\u2022) denotes ensemble.", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |