| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:34:49.735206Z" |
| }, |
| "title": "Generating Synthetic Data for Task-Oriented Semantic Parsing with Hierarchical Representations", |
| "authors": [ |
| { |
| "first": "Ke", |
| "middle": [], |
| "last": "Tran", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Amazon Translate Berlin", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Amazon Alexa Cambridge", |
| "location": { |
| "region": "MA", |
| "country": "USA" |
| } |
| }, |
| "email": "mingtan@amazon.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Modern conversational AI systems support natural language understanding for a wide variety of capabilities. While a majority of these tasks can be accomplished using a simple and flat representation of intents and slots, more sophisticated capabilities require complex hierarchical representations supported by semantic parsing. State-of-the-art semantic parsers are trained using supervised learning with data labeled according to a hierarchical schema which might be costly to obtain or not readily available for a new domain. In this work, we explore the possibility of generating synthetic data for neural semantic parsing using a pretrained denoising sequence-to-sequence model (i.e., BART). Specifically, we first extract masked templates from the existing labeled utterances, and then fine-tune BART to generate synthetic utterances conditioning on the extracted templates. Finally, we use an auxiliary parser (AP) to filter the generated utterances. The AP guarantees the quality of the generated data. We show the potential of our approach when evaluating on the Facebook TOP dataset 1 for navigation domain.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Modern conversational AI systems support natural language understanding for a wide variety of capabilities. While a majority of these tasks can be accomplished using a simple and flat representation of intents and slots, more sophisticated capabilities require complex hierarchical representations supported by semantic parsing. State-of-the-art semantic parsers are trained using supervised learning with data labeled according to a hierarchical schema which might be costly to obtain or not readily available for a new domain. In this work, we explore the possibility of generating synthetic data for neural semantic parsing using a pretrained denoising sequence-to-sequence model (i.e., BART). Specifically, we first extract masked templates from the existing labeled utterances, and then fine-tune BART to generate synthetic utterances conditioning on the extracted templates. Finally, we use an auxiliary parser (AP) to filter the generated utterances. The AP guarantees the quality of the generated data. We show the potential of our approach when evaluating on the Facebook TOP dataset 1 for navigation domain.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In this work, we investigate semantic parsing with hierarchical representations (Gupta et al., 2018) instead of the traditional logical forms (Zettlemoyer and Collins, 2005) . Given an utterance x, our goal is to produce a tree-structured representation y of the utterance where additional information about intents and slots is introduced at the non-terminal nodes of the tree. We define a template z of a given annotation y as a result of replacing all terminal nodes by a generic [mask] node. Figure 1 shows an example of such an utterance x, its annotation y and the corresponding template z. Figure 1: An example of an input utterance x, its desired output y, and the template z inferred from y . By definition, the template z above can be used to generate other utterances such as \"how is the 5:00 traffic looking\" or \"Any construction on my morning route\". The hierarchical representation for task-oriented parsing proposed in (Gupta et al., 2018) aims for ease of annotation and expressiveness. The dataset in their work, Facebook TOP, is the largest publicly available dataset in English for hierarchical semantic parsing. It has more than 44K annotated queries. We look at the distribution of the templates in Facebook TOP and found that the dataset is highly unbalanced (Figure 2 ). The 10 most frequent templates account for 30% of the training data and 14% of the data are singletons, which are utterances with only a single occurrence. This analysis suggests that it is beneficial to generate more synthetic data for templates with low frequencies. In the field of Natural Language Processing, us-ing synthetic data via back-translation (Sennrich et al., 2016) has shown a great success for machine translation (Edunov et al., 2018) . Unlike machine translation, generating synthetic data for hierarchical semantic parsing is less straightforward. Our work positions itself as one of the first to explore the possibility of generate text from graph (template) for semantic parsing.", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 100, |
| "text": "(Gupta et al., 2018)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 142, |
| "end": 173, |
| "text": "(Zettlemoyer and Collins, 2005)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 483, |
| "end": 489, |
| "text": "[mask]", |
| "ref_id": null |
| }, |
| { |
| "start": 934, |
| "end": 954, |
| "text": "(Gupta et al., 2018)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1651, |
| "end": 1674, |
| "text": "(Sennrich et al., 2016)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1725, |
| "end": 1746, |
| "text": "(Edunov et al., 2018)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 496, |
| "end": 504, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1281, |
| "end": 1290, |
| "text": "(Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose a generic framework for augmenting a semantic parser with synthetic data. Our framework consists of two steps. First, we train a generator, followed by top-p sampling to generate diverse synthetic utterances conditioning on the above-mentioned templates. Generated utterances share similar hierarchical structures (i.e., templates) with real training utterances while providing a wide spectrum of lexical variety. Second, we use an auxiliary parser for filtering on the generated candidates. The filtering step guarantees the quality of the synthetic data. Our generator is a sequence to sequence (seq2seq) model that is pretrained on massive amount of monolingual data with text infilling objective ( \u00a72). We utilize BART (Lewis et al., 2020) , a recently proposed denoising autoencoder, as our generator to avoid training it from scratch. The auxiliary parser can be arbitrary. We experiment with BART-based parser as well as state-of-the-art pointer network parser (s2s-pointer; Rongali et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 749, |
| "end": 769, |
| "text": "(Lewis et al., 2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1008, |
| "end": 1029, |
| "text": "Rongali et al., 2020)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The paper is structured as follows. We introduce our generative model for synthetic data in Section \u00a72. Experimental results on Facebook TOP dataset and sub-sampled datasets to simulate lowresource scenario are presented in Section \u00a73. Section \u00a75 concludes the paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The generative story for generating synthetic data", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Denoising Sequence-to-Sequence as Generator", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Y syn = { y i } M i=1 is given by 1. draw a template z \u223c p \u03c6 (z); 2 2. draw an annotation y \u223c p \u03b8 (y | z) by filling each [mask]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Denoising Sequence-to-Sequence as Generator", |
| "sec_num": "2" |
| }, |
| { |
| "text": "token in z by a word or sequence of words from vocabulary V;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Denoising Sequence-to-Sequence as Generator", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Note that the transformation from annotation y to utterance x is deterministic by removing nonterminals from y. While p \u03c6 (z) can be modeled by an autoregressive neural language model or a Probabilistic Context Free Grammar (Johnson, 1998) , in this work we sample template z from seen templates in the data. We leave the possibility of generating new templates to future work. We need a powerful conditional model p \u03b8 (y | z) to generate annotation y. Thus, we choose BART, a pretrained denoising autoencoder for sequenceto-sequence, as our model. Figure 3a illustrates the idea behind BART. Given an input sequence (a stream of text), one of five types of noise (Figure 3b) is used to corrupt the input sequence. Then BART reconstructs the original sequence by maximizing the likelihood of the original sequence.", |
| "cite_spans": [ |
| { |
| "start": 224, |
| "end": 239, |
| "text": "(Johnson, 1998)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 549, |
| "end": 558, |
| "text": "Figure 3a", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 664, |
| "end": 675, |
| "text": "(Figure 3b)", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Denoising Sequence-to-Sequence as Generator", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Bidirectional Encoder", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Autoregressive Decoder", |
| "sec_num": null |
| }, |
| { |
| "text": "A B C D E A _ B _ E <s> A B C D (a)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Autoregressive Decoder", |
| "sec_num": null |
| }, |
| { |
| "text": "BART is trained to reconstruct the corrupted input. Since pretrained BART uses text infilling as noise to corrupt the input sequence, naturally we can use BART to infill the templates. Text infilling is the task where a number of spans in the original input sequence are replaced by a token [mask] and BART is trained to predict the replaced spans in the position of [mask] tokens. For our purpose of generating synthetic data, we fine-tune BART on an infilling dataset where the input is a template z with [mask] and the output is a linearized tree representation y where [mask] tokens are replaced by lexical words as shown in Figure 4 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 629, |
| "end": 637, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Autoregressive Decoder", |
| "sec_num": null |
| }, |
| { |
| "text": "A B C . D E . A . C . E . A _ . D _ E . A _C . _ E . C . D E . A B Document Rotation Token Masking Token Deletion Text Infilling D E . A B C .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Autoregressive Decoder", |
| "sec_num": null |
| }, |
| { |
| "text": "BART source/target construction: We call out a few processing steps to construct this infilling dataset. First, non-terminal words are lowercased. We find this is necessary since the input will be tokenized by BART tokenizer and lowercasing non-terminal words prevents oversegmentation. Second, we make each of the closing brackets \"]\" in the original data explicit (e.g., in:get distance], sl:destination]). This transformation provides the model explicit infor- mation of the scope of the intents and slots. Fine-tuning and generation: We fine-tune BART generator using the (template, annotation) pairs. After fine-tuning, we use the generator to generate full parse trees given templates. To increase the diversity of generated samples, we use top-p sampling (Holtzman et al., 2020) instead of beam search. The generator is trained to generate the tokenized labels together with the words. We remove generated annotations with invalid labels and convert the tokenized labels into the original tags in a post-processing step.", |
| "cite_spans": [ |
| { |
| "start": 762, |
| "end": 785, |
| "text": "(Holtzman et al., 2020)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Autoregressive Decoder", |
| "sec_num": null |
| }, |
| { |
| "text": "Auxiliary parser (AP) for filtering: In our preliminary experiments, we found that the generated samples are noisy. When we train our parser on the concatenation of both real and generated samples, the test accuracy degrades by 1.13% compared with a parser trained purely on real data. We therefore use an auxiliary parser (AP) to select robust samples. The filtering step is straightforward. First, we train an auxiliary semantic parser f \u03b8 (x) on the original Facebook TOP dataset. We then use this trained AP to parse synthetic data ( x i , y i ) and keep those samples where the outputs of the parser f \u03b8 ( x i ) match the synthetic labels y i (i.e., f \u03b8 ( x i ) = y i ). The AP for filtering can be different from the target parser we train for semantic parsing. Therefore, we propose three settings: (1) BART as AP and a sequence-to-sequence model with pointer networks (s2s-pointer; Rongali et al., 2020) as the target parser. (2) BART models for both AP and target parser. (3) s2s-pointer models for both AP and target parser. The comparisons and analysis are detailed in Section \u00a73.", |
| "cite_spans": [ |
| { |
| "start": 890, |
| "end": 911, |
| "text": "Rongali et al., 2020)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Autoregressive Decoder", |
| "sec_num": null |
| }, |
| { |
| "text": "We use Facebook TOP dataset in our experiments. Statistics of the dataset are shown in Table 1 . While there are more than 31K annotated utterances in training data, the number of unique templates is about 6K. As we have shown in Section 1, the distribution of the templates is highly unbalanced. We fine-tune our BART generator using Adam optimizer (Kingma and Ba, 2015) with a linear warmup of 4,000 steps at the peak learning rate of 2e\u22125. We pick the best model based on validation perplexity. After fine-tuning, we use the generator to sample 5 full parse trees per template.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 87, |
| "end": 94, |
| "text": "Table 1", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The exact-match results for the three settings of using BART/s2s-pointer as auxiliary and target parser are given in Table 2 . We first notice that the BART-based parser performs on-par with SOTA model based on pointer network and RoBERTa (Liu et al., 2019) feature extractor in the work of Rongali et al. (2020) . This suggests that pretraining a general purpose seq2seq model is beneficial for downstream conditional generation task. We also see that using synthetic data brings additional 0.89% for BART-parser and 0.88% for s2s-pointer parser on the exact-match accuracy. The gain of using synthetic data is smaller when UNSUPPORTED utterances are present in training and testing data. Table 3 shows the exact match accuracy of BART-based parser on testset with respect to template frequency f in training data. We see that synthetic data helps low-frequency templates ( f < 5) the most (+1.36%). The gain of 0.67% for unseen templates ( f = 0) suggests that there is a room for further improvement by generating new templates.", |
| "cite_spans": [ |
| { |
| "start": 239, |
| "end": 257, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 291, |
| "end": 312, |
| "text": "Rongali et al. (2020)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 117, |
| "end": 124, |
| "text": "Table 2", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 690, |
| "end": 697, |
| "text": "Table 3", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In order to support new domains (with new intents and slots) for the virtual assistants, we investigate the role of synthetic data when there is a little data available for the new domains. We sim- ulate this scenario by sub-sampling 6K utterances in the training data as follows: for each template in the training data, we randomly choose one utterance. We use this sub-sampled data for training our parser, generator, and AP. Table 4 shows the mean and variance of the accuracy on five random sub-sampled portions of the train data. We see that in this low resource setting, our approach boosts the accuracy by more than 2% absolute.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 428, |
| "end": 435, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Training data #Samples Acc (%)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Real 6,000 72.24 \u00b1 0.05 +syn 30,000 74.31 \u00b1 0.05 Table 4 : Average accuracy of five different runs for 6K training examples. The synthetic data is filtered by BART parser, which is trained on 6K samples.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 49, |
| "end": 56, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Using pretrained models to generate synthetic data has been studied recently (Amin-Nejad et al., 2020; Kumar et al., 2020) . Their work however focuses on multi-class classification problems. Taking a step further, our work shows a viable path for structured output (i.e., parse trees) problems.", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 102, |
| "text": "(Amin-Nejad et al., 2020;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 103, |
| "end": 122, |
| "text": "Kumar et al., 2020)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We have proposed a novel approach for generating synthetic data for hierarchical semantic parsing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our initial experiments show promising results of this approach and open up possibility for applying it to other problems with highly structured outputs in Natural Language Processing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "During inference for generating synthetic data, we draw z uniformly in order to generate more annotations for templates in the long tail.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank reviewers for their constructive comments and suggestions. We also thank Raquel G. Alhama for proofreading this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Exploring transformer text generation for medical dataset augmentation", |
| "authors": [ |
| { |
| "first": "Ali", |
| "middle": [], |
| "last": "Amin-Nejad", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Ive", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumithra", |
| "middle": [], |
| "last": "Velupillai", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "4699--4708", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ali Amin-Nejad, Julia Ive, and Sumithra Velupillai. 2020. Exploring transformer text generation for medical dataset augmentation. In Proceedings of the 12th Language Resources and Evaluation Con- ference, pages 4699-4708, Marseille, France. Euro- pean Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Understanding back-translation at scale", |
| "authors": [ |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "489--500", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1045" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sergey Edunov, Myle Ott, Michael Auli, and David Grangier. 2018. Understanding back-translation at scale. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 489-500, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Semantic parsing for task oriented dialog using hierarchical representations", |
| "authors": [ |
| { |
| "first": "Sonal", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Rushin", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "Mrinal", |
| "middle": [], |
| "last": "Mohit", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2787--2792", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1300" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sonal Gupta, Rushin Shah, Mrinal Mohit, Anuj Ku- mar, and Mike Lewis. 2018. Semantic parsing for task oriented dialog using hierarchical representa- tions. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2787-2792, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The curious case of neural text degeneration", |
| "authors": [ |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Holtzman", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Buys", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxwell", |
| "middle": [], |
| "last": "Forbes", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. The curious case of neural text de- generation. In International Conference on Learn- ing Representations.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Pcfg models of linguistic tree representations", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Comput. Linguist", |
| "volume": "24", |
| "issue": "4", |
| "pages": "613--632", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Johnson. 1998. Pcfg models of linguistic tree representations. Comput. Linguist., 24(4):613-632.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. ICLR.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Data augmentation using pre-trained transformer models", |
| "authors": [ |
| { |
| "first": "Varun", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashutosh", |
| "middle": [], |
| "last": "Choudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Eunah", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Varun Kumar, Ashutosh Choudhary, and Eunah Cho. 2020. Data augmentation using pre-trained trans- former models.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal ; Abdelrahman Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "7871--7880", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre- training for natural language generation, translation, and comprehension. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Roberta: A robustly optimized bert pretraining approach", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Y. Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, M. Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. ArXiv, abs/1907.11692.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Don't parse, generate! a sequence to sequence architecture for task-oriented semantic parsing", |
| "authors": [ |
| { |
| "first": "Subendhu", |
| "middle": [], |
| "last": "Rongali", |
| "suffix": "" |
| }, |
| { |
| "first": "Luca", |
| "middle": [], |
| "last": "Soldaini", |
| "suffix": "" |
| }, |
| { |
| "first": "Emilio", |
| "middle": [], |
| "last": "Monti", |
| "suffix": "" |
| }, |
| { |
| "first": "Wael", |
| "middle": [], |
| "last": "Hamza", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The Web Conference 2020, WWW '20", |
| "volume": "", |
| "issue": "", |
| "pages": "2962--2968", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3366423.3380064" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Subendhu Rongali, Luca Soldaini, Emilio Monti, and Wael Hamza. 2020. Don't parse, generate! a se- quence to sequence architecture for task-oriented se- mantic parsing. In Proceedings of The Web Confer- ence 2020, WWW '20, page 2962-2968, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Improving neural machine translation models with monolingual data", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "86--96", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1009" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Improving neural machine translation mod- els with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 86-96, Berlin, Germany. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Learning to map sentences to logical form: Structured classification with probabilistic categorial grammars", |
| "authors": [ |
| { |
| "first": "Luke", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the Twenty-First Conference on Uncertainty in Artificial Intelligence, UAI'05", |
| "volume": "", |
| "issue": "", |
| "pages": "658--666", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luke S. Zettlemoyer and Michael Collins. 2005. Learn- ing to map sentences to logical form: Structured classification with probabilistic categorial grammars. In Proceedings of the Twenty-First Conference on Uncertainty in Artificial Intelligence, UAI'05, page 658-666, Arlington, Virginia, USA. AUAI Press.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Frequency of most 50 common templates in Facebook TOP dataset. The frequency of z follows a power-law probability distribution.", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Sentence Permutation (b) Five different types of noise introduced in BART.", |
| "num": null |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Overview of BART.", |
| "num": null |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Data for fine-tuning BART.", |
| "num": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "text": "Source: [in:get_distance [mask] [sl:destination [in:get_location [sl:category_location [mask] sl:category_location] in:get_location] sl:destination] in:get_distance] Target: [in:get_distance How far is [sl:destination [in:get_location [sl:category_location the coffee shop sl:category_location] in:get_location] sl:destination] in:get_distance]", |
| "num": null, |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "text": "", |
| "num": null, |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "text": "[in:get_info_road_condition is the road [sl:road_condition icy ] on [sl:path I -5 ] ] [in:get_info_road_condition Are the roads [sl:road_condition slick ] on [sl:path I90 ] ] [in:get_info_road_condition Is there [sl:road_condition snow ] on [sl:path the commute ] ] [in:get_info_road_condition will the roads be [sl:road_condition slippery ] on [sl:path my commute ] ] [in:get_info_road_condition Are there any [sl:road_condition flooding ] on [sl:path Route 66 ] ]Figure 5: Sample of five synthetic parse trees generated given a template. Colors indicate the corresponding generated spans per [mask] token. The data is reformatted for readability.", |
| "num": null, |
| "content": "<table><tr><td>Template:</td><td/><td/><td/><td/><td/></tr><tr><td colspan=\"7\">[in:get_info_road_condition [mask] [sl:road_condition [mask] ] [mask] [sl:path [mask] ] ]</td></tr><tr><td>Generate:</td><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td colspan=\"2\">\u2212 UNSUPPORTED</td><td colspan=\"2\">+ UNSUPPORTED</td></tr><tr><td>Data</td><td>AP filter</td><td colspan=\"3\">Target parser #Samples Acc (%)</td><td colspan=\"2\">#Samples Acc (%)</td></tr><tr><td>Real</td><td/><td>BART</td><td>28,414</td><td>83.37</td><td>31,279</td><td>81.01</td></tr><tr><td colspan=\"2\">+ syn BART</td><td>BART</td><td>53,679</td><td>84.26 (+0.89)</td><td>56,547</td><td>81.74 (+0.73)</td></tr><tr><td>Real</td><td/><td>s2s-pointer</td><td>28,414</td><td>84.80</td><td>31,279</td><td>82.10</td></tr><tr><td colspan=\"2\">+syn BART</td><td>s2s-pointer</td><td>53,679</td><td>85.31 (+0.51)</td><td>56,355</td><td>82.71 (+0.61)</td></tr><tr><td colspan=\"3\">+syn s2s-pointer s2s-pointer</td><td>89,629</td><td>85.68 (+0.88)</td><td>92,264</td><td>82.77 (+0.67)</td></tr></table>", |
| "html": null |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "text": "Exact-match results of our experiments. The AP filter can be a fine-tuned BART for parsing or a s2spointer model ofRongali et al. (2020)", |
| "num": null, |
| "content": "<table><tr><td>Training data</td><td colspan=\"3\">f \u2265 5 f < 5 f = 0</td></tr><tr><td>Real</td><td colspan=\"3\">89.46 74.70 61.90</td></tr><tr><td>+syn</td><td colspan=\"3\">90.30 76.06 62.57</td></tr><tr><td>\u2206</td><td>0.84</td><td>1.36</td><td>0.67</td></tr></table>", |
| "html": null |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "text": "Exact-match accuracy on testset with respect to template frequency f in training data.", |
| "num": null, |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |