| { |
| "paper_id": "U19-1012", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:07:45.131293Z" |
| }, |
| "title": "Domain Adaptation for Low-Resource Neural Semantic Parsing", |
| "authors": [ |
| { |
| "first": "Alvin", |
| "middle": [], |
| "last": "Kennardi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Australian National University", |
| "location": {} |
| }, |
| "email": "alvin.kennardi@anu.edu.au" |
| }, |
| { |
| "first": "Gabriela", |
| "middle": [], |
| "last": "Ferraro", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Australian National University", |
| "location": {} |
| }, |
| "email": "gabriela.ferraro@data61.csiro.au" |
| }, |
| { |
| "first": "Qing", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Australian National University", |
| "location": {} |
| }, |
| "email": "qing.wang@anu.edu.au" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "One key challenge for building a semantic parser in new domains is the difficulty to annotate new datasets. In this paper, we propose a sequential transfer learning method as a domain adaptation method to tackle this issue. We show that we can obtain a model with better generalisation on a small dataset by transferring network parameters from a model trained with a bigger dataset with similar meaning representations. We evaluate our model with different datasets as well as versions of the datasets with different difficulty levels.", |
| "pdf_parse": { |
| "paper_id": "U19-1012", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "One key challenge for building a semantic parser in new domains is the difficulty to annotate new datasets. In this paper, we propose a sequential transfer learning method as a domain adaptation method to tackle this issue. We show that we can obtain a model with better generalisation on a small dataset by transferring network parameters from a model trained with a bigger dataset with similar meaning representations. We evaluate our model with different datasets as well as versions of the datasets with different difficulty levels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Semantic parsing maps natural language sentences into meaning representations, for example, logical formulae, SQL queries, or executable codes. The successful implementation of the encoder-decoder architecture in the machine translation (Kalchbrenner and Blunsom, 2013; has driven researchers to apply this model into semantic parsing task (Dong and Lapata, 2016; Jia and Liang, 2016; Ling et al., 2016; Dong and Lapata, 2018) . These neural semantic parsing models have achieved impressive results.", |
| "cite_spans": [ |
| { |
| "start": 237, |
| "end": 269, |
| "text": "(Kalchbrenner and Blunsom, 2013;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 340, |
| "end": 363, |
| "text": "(Dong and Lapata, 2016;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 364, |
| "end": 384, |
| "text": "Jia and Liang, 2016;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 385, |
| "end": 403, |
| "text": "Ling et al., 2016;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 404, |
| "end": 426, |
| "text": "Dong and Lapata, 2018)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Semantic parsing datasets are usually domain and meaning representation dependent, thus making it difficult to re-use existing datasets for building general semantic parsers or semantic parsers in new domains. The process of annotating sentences with their meaning representations for modeling new domains or augmenting the existing datasets is expensive. Prior works proposed several strategies to tackle this issue, such as paraphrasing (Su and Yan, 2017) , decoupling structure and lexicon (Herzig and Berant, 2018) , and multi-task learning (Susanto and Lu, 2017; Herzig and Berant, 2017) .", |
| "cite_spans": [ |
| { |
| "start": 439, |
| "end": 457, |
| "text": "(Su and Yan, 2017)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 493, |
| "end": 518, |
| "text": "(Herzig and Berant, 2018)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 545, |
| "end": 567, |
| "text": "(Susanto and Lu, 2017;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 568, |
| "end": 592, |
| "text": "Herzig and Berant, 2017)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our method aims to provide an alternative to the previous work. We perform transfer learning by training a model for one task using a dataset and fine-tuning the model using another related dataset. The idea of transfer learning is to utilize features, weights, or other knowledge acquired for one task to solve another related task. It has been extensively used for domain adaptation and building models to solve problems where only limited data is available (Pan and Yang, 2010) . The fine-tuning transfer learning procedure has been successfully implemented in the encoderdecoder architecture for Neural Machine Translation Task (NMT) Sennrich et al., 2016; Servan et al., 2016) . In contrast with the multi-task learning, which jointly trains several tasks together, we perform transfer learning by training the first and second tasks in sequence.", |
| "cite_spans": [ |
| { |
| "start": 460, |
| "end": 480, |
| "text": "(Pan and Yang, 2010)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 638, |
| "end": 660, |
| "text": "Sennrich et al., 2016;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 661, |
| "end": 681, |
| "text": "Servan et al., 2016)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Compared to models without transfer learning, our experiments shows that transfer learning gives a good prior for models trained with small datasets, hence improving model performance when only limited amounts of data are available.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Neural semantic parsing models are usually trained and tested using datasets in which variables are identified and anonymised before hand, thus considerably reducing the difficulty of the semantic parsing task (Finegan-Dollak et al., 2018) . In this work, we use the un-anonymised versions of two semantic parsing datasets, as well as different data splits to provide extensive evaluation of our model.", |
| "cite_spans": [ |
| { |
| "start": 210, |
| "end": 239, |
| "text": "(Finegan-Dollak et al., 2018)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To summarise, the contributions of this paper are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "-Evaluation of transfer learning as domain adaptation for low-resource neural semantic parsing with different datasets and difficulty levels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "-Compilation and release of un-anonymised versions of ATIS and GeoQuery datasets for semantic parsing in lambda calculus formulae. 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Encoder-decoder architectures based on neural networks have been successfully applied to semantic parsing (Dong and Lapata, 2016; Jia and Liang, 2016; Ling et al., 2016; Dong and Lapata, 2018) . Since then, several ideas such as including attention mechanism (Dong and Lapata, 2016) , multi-task learning (Susanto and Lu, 2017; Herzig and Berant, 2017; Fan et al., 2017) , data augmentation (Jia and Liang, 2016; Ko\u010disk\u00fd et al., 2016) and two-steps (coarse-to-fine) decoder (Dong and Lapata, 2018) have been applied to semantic parsing models with the aim of boosting performance. Similar to our work, others tried to overcome the lack of annotated data by leveraging existing datasets from related domains. Previous works from Herzig and Berant (2017) and Fan et al. (2017) used a multi-task framework to jointly learn the neural semantic parsing model and encourage parameter sharing between different datasets. The model proposed by Herzig and Berant (2017) used multiple knowledge bases in different domains to enhance the model performance. On the other hand, the work from Fan et al. (2017) leveraged access to a very large labeled dataset to help a small one. However, their models are trained using proprietary datasets, which are not publicly available, thus making model comparison unfeasible. The work proposed by Damonte et al. (2019) investigates the possibility of transfer learning to tackle the issue of lacking annotated data on neural semantic parsing. They used more complex model and data sets compared to our work.", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 129, |
| "text": "(Dong and Lapata, 2016;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 130, |
| "end": 150, |
| "text": "Jia and Liang, 2016;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 151, |
| "end": 169, |
| "text": "Ling et al., 2016;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 170, |
| "end": 192, |
| "text": "Dong and Lapata, 2018)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 259, |
| "end": 282, |
| "text": "(Dong and Lapata, 2016)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 305, |
| "end": 327, |
| "text": "(Susanto and Lu, 2017;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 328, |
| "end": 352, |
| "text": "Herzig and Berant, 2017;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 353, |
| "end": 370, |
| "text": "Fan et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 391, |
| "end": 412, |
| "text": "(Jia and Liang, 2016;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 413, |
| "end": 434, |
| "text": "Ko\u010disk\u00fd et al., 2016)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 474, |
| "end": 497, |
| "text": "(Dong and Lapata, 2018)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 728, |
| "end": 752, |
| "text": "Herzig and Berant (2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 757, |
| "end": 774, |
| "text": "Fan et al. (2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 936, |
| "end": 960, |
| "text": "Herzig and Berant (2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1325, |
| "end": 1346, |
| "text": "Damonte et al. (2019)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our work focuses on training a model using a larger dataset and fine-tune using another related low-resource dataset rather than multi-task learning. We also evaluate how additional training examples impact transfer learning models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We adapt the formal definition of transfer learning from Pan and Yang (2010) to the neural semantic parsing problem involving a question q and a meaning representation f . A domain D consists of input space Q and marginal probability P (Q), where Q = {q 1 , q 2 , ..., q n } \u2286 Q. A domain can be denoted by D = {Q, P (Q)}. Given a domain D = {Q, P (Q)}, a task T consists of output space F and conditional probability P (F |Q). A task can be denoted as T = {F, P (F |Q)}. In the semantic parsing problem, we want to learn conditional probability P (F |Q) from the training set with training data (q i , f i ), where q i \u2208 Q and f i \u2208 F.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer Learning as Domain Adaptation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Suppose we have a source domain D S , with source task T S and a target domain D T with target task T T where 0 < n T << n S . Transfer learning uses the knowledge from D S and T S to improve the performance of T T , where D S = D T , or T S = T T (Pan and Yang, 2010).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer Learning as Domain Adaptation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Our transfer learning method starts by training a model in the source domain D S to solve a source task T S . Subsequently, we transfer the knowledge (i.e network parameters) to the model aimed to solve target task T T and fine-tune the model using the target domain D T .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer Learning as Domain Adaptation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In this work, we adopt the sequence-to-sequence with neural attention method from Dong and Lapata (2016) . The model aims to map a question input q = x 1 , x 2 , ..., x |q| to a meaning representation f = y 1 , y 2 , ..., y |f | . We want to compute the conditional probability of generating the meaning representation f given a question q as follows:", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 104, |
| "text": "Dong and Lapata (2016)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(f |q) = |f | t=1 p(y t |y <t , q)", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The question input q is encoded using an encoder, and then a meaning representation f is generated using an attention decoder. The encoder hidden state h t and cell state c t at time step t can be computed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h t , c t = LST M (h t\u22121 , c t\u22121 , E(x t ))", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where LSTM refers to a LSTM function described by Zaremba et al. (2014) and E(.) is an embedding layer that returns a word vector representation of x t . The hidden and cell state of the last encoder step are used to initialize the LSTM cell on the first decoder step, hence giving the context to the decoder. The LSTM encoder and decoder have different parameters. The attention layers aim to include the encoder information to a meaning representation at each decoder step (Bahdanau et al., 2015; . In an attention layer, we compute an attention score s k,t between the k-th encoder hidden state h k and a decoder hidden state h t . The context vector c t is a weighted sum of all encoder hidden vectors. We use the context vector c t and the decoder hidden state h t , to obtain an attention hidden state vector h att t using equations as follows:", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 71, |
| "text": "Zaremba et al. (2014)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 475, |
| "end": 498, |
| "text": "(Bahdanau et al., 2015;", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "s k,t = exp{h k \u2022 h t } |q| j=1 exp{h j \u2022 h t } c t = |q| k=1 s k,t h k h att t = tanh(W 1 h t + W 2 c t )", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The conditional probability of generating token y t at time step t can be expressed as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "p(y t |y <t , q) = (sof tmax(W o h att t )) T e(y t ) (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where e(y t ) is a one-hot vector with value 1 in the element of index y t in the embedding layer and 0 otherwise. We train our model to minimise the negative log-likelihood function over questions and formulae in the training set T . The optimisation problem can be written as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "minimise \u2212 (q,f )\u2208T log(p(f |q))", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Given a question q, we used the model to generate the most probable sequencef as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f = arg max f p(f |q)", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The model performs a greedy search to generate one token at a time to construct a sequence in lambda calculus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "For evaluation we used two semantic parsing datasets, namely ATIS and GeoQuery. The meaning representation of the datasets is lambda calculus. There are two types of dataset splits: questionsplit and query-split. In question-split, the training and test examples are divided based on the questions (Finegan-Dollak et al., 2018) , thus based on the input sequence. Meanwhile, in query-split, the training and test examples are divided according to the similarity of their meaning representations ATIS Question : cheapest fare from ci0 to ci1 Formula : ( min $0 ( exists $1 ( and ( from $1 ci0 ) ( to $1 ci1 ) ( = ( fare $1 ) $0 ) ) ) ) ATIS Un-anonymised Question : cheapest fare from Indianapolis to Seattle Formula : ( min $0 ( exists $1 ( and ( from $1 indianapolis ) ( to $1 seattle ) ( = ( fare $1 ) $0 ) ) ) ) GeoQuery Question : what is the capital of s0 Formula : ( capital:c s0 ) GeoQuery Un-anonymised Question : what is the capital of Georgia Formula : ( capital: georgia ) (Finegan-Dollak et al., 2018), thus based on the output sequences. Therefore, the query-split is more appropriate to evaluate the model's capability of composing output sequences, in this case, lambda calculus expressions. The ATIS dataset (Price, 1990; Dahl et al., 1994; Zettlemoyer and Collins, 2007) consists of queries from a flight booking system. We obtained the un-anonymised version of ATIS by preprocessing the non-SQL ATIS dataset (Finegan-Dollak et al., 2018) . Question variables in this dataset are not anonymised, but the formulae have variable identifiers. We removed the variable identifiers in logical formulae. The ATIS dataset split is question-split.", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 327, |
| "text": "(Finegan-Dollak et al., 2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1224, |
| "end": 1237, |
| "text": "(Price, 1990;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1238, |
| "end": 1256, |
| "text": "Dahl et al., 1994;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1257, |
| "end": 1287, |
| "text": "Zettlemoyer and Collins, 2007)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1426, |
| "end": 1455, |
| "text": "(Finegan-Dollak et al., 2018)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The GeoQuery dataset (Zelle and Mooney, 1996; Zettlemoyer and Collins, 2005) consists of queries about US geographical information. We annotated the un-anonymised version of Geo-Query based on non-SQL GeoQuery dataset (Finegan-Dollak et al., 2018) , which has different meaning representations. We compared the question with the anonymised version and annotated lambda calculus formulae on the non-SQL Geo-Query dataset. We ran a script to put the variable back into the questions-formulae pairs, and split them into training, development and test sets based on Finegan-Dollak et al. (2018) . We also divided the GeoQuery un-anoymised dataset using querysplit as proposed by Finegan-Dollak et al. (2018) . Table 2 shows the details of each dataset.", |
| "cite_spans": [ |
| { |
| "start": 46, |
| "end": 76, |
| "text": "Zettlemoyer and Collins, 2005)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 218, |
| "end": 247, |
| "text": "(Finegan-Dollak et al., 2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 562, |
| "end": 590, |
| "text": "Finegan-Dollak et al. (2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 675, |
| "end": 703, |
| "text": "Finegan-Dollak et al. (2018)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 706, |
| "end": 713, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We considered ATIS as a Source Domain dataset and GeoQuery as a Target Domain dataset. We believe that ATIS training samples are less similar, since it could only achieve a good model performance using more training samples. Thus it is more beneficial to use ATIS as Source Domain. We evenly divided the GeoQuery into 10 subsets of {10%, 20%,...,100%} fraction of the training set. With this setup, we simulate the situation where we have limited data in the target domain. This setup also allowed us to evaluate the effectiveness of transfer learning with sufficient training data. Details about the experiments setups are depicted in Table 3 . We set the model hyper-parameters following Dong and Lapata (2016) for GeoQuery. We optimised the objective function in Equation 5 using RMSProp algorithm (Tieleman and Hinton, 2012) with a decay rate of 0.95. The batch size was 20. We randomly initialised parameter from the uniform distribution U(\u22120.08, 008). The hidden unit size was 150, and the dropout rate was 0.5. We used 15 epoch to obtain a model from ATIS. We increased the number of epochs after transferring all network parameters to 150 and 180 for anonymised and un-anonymised GeoQuery, re-spectively. Source and target models were trained with their own vocabularies to handle differences of vocabularies between two datasets. The evaluation metric was accuracy. We evaluated each model with inference described in Equation 6 on the full GeoQuery test set for every bucket. We reported exact match accuracy computed using equation as follows:", |
| "cite_spans": [ |
| { |
| "start": 690, |
| "end": 712, |
| "text": "Dong and Lapata (2016)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 801, |
| "end": 828, |
| "text": "(Tieleman and Hinton, 2012)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 636, |
| "end": 643, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Setup", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Accuracy = # of correct formulae # test examples in the test set (7)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setup", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We compared our transfer learning framework with the original target model (i.e. without transfer learning) in three different setups described in Section 4.2. Figure 1 shows the learning curves of those setups. The results from small Geo-Query subsets confirmed our hypothesis that the source model gives a stronger prior to the target model. The model obtained from transfer learning has 13.93%, 3.58%, and 2.15% accuracy improvement on the 10% fraction of Geo-Query, GeoQuery Un-anonymised, and GeoQuery Un-anonymised with Query-Split datasets respectively. Figure 1(a) and (b) clearly shows how the transfer learning improves the performance of the target models trained with small subsets. In Figure 1(c) , the performance of the model with transfer learning are comparable to the original target model. However, the performance of original target model drops with additional training examples from 40% to 50% subset. On the other hand, the model with transfer learning does not have a sudden drop. A possible explanation to this result may be due to the difficulty of the original target model to learn from difficult training samples. The learning curves of the transfer learning models show smoother changes with additional training data as", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 160, |
| "end": 168, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 561, |
| "end": 572, |
| "text": "Figure 1(a)", |
| "ref_id": null |
| }, |
| { |
| "start": 698, |
| "end": 709, |
| "text": "Figure 1(c)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation on Transfer Learning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "No.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation on Transfer Learning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Original Target Model 1 river in s0 ( lambda $0 e ( and ( river:t $0 ) ( loc:t $0 s0 ) ) ) ( lambda $0 e ( and ( river:t $0 ) ( loc:t $0 s0 ) ) ( size:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question Transfer Learning", |
| "sec_num": null |
| }, |
| { |
| "text": "i $0 ) ) 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question Transfer Learning", |
| "sec_num": null |
| }, |
| { |
| "text": "what is the capital of the smallest state ( capital:c ( argmin $1 ( state:t $1 ) ( size:i $1 ) ) ) ( capital:c ( argmax $1 ( state:t $1 ) ( size:i $1 ) ) ) 3 how many rivers does colorado have ( count $0 ( and ( river $0 ) ( loc $0 colorado ) ) ) ( count $0 ( and ( state $0 ) ( loc $0 usa ) ) ) 4 how large is texas ( size texas ) ( argmax $0 ( river $0 ) ( density $0 ) ) 5 how many states does missouri border ( count $0 ( and ( state $0 ) ( next to $0 missouri ) ) ) ( count $0 ( and ( state $0 ) ( next to $0 delaware ) ) ) 6 how many states does the missouri river run through ( count $0 ( and ( state $0 ) ( loc $0 missouri ) ) ) ( lambda $0 e ( and ( state $0 ) ( loc $0 missouri ) ) ) compared to the original target model, indicating better model generalisation when the training data is small. With bigger subset (i.e 70% and more), the results from transfer learning models are comparable to the original models, indicating that the out-of-domain data does not impair the model performance. We show that our transfer learning method helps the target model to have a better performance when the training data is very small.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question Transfer Learning", |
| "sec_num": null |
| }, |
| { |
| "text": "We also looked into samples generated from the transfer learning models and original target models. Table 4 presents six samples from three different setups described in Table 3 with the target model trained with 10% subset of training examples. The first two samples are obtained from the models trained with GeoQuery. In the first example, the model trained with transfer learning can identify correct meaning representation, while the original target model generates wrong meaning representation due to the generation of extra tokens. The second example shows the model trained with transfer learning correctly identified the token \"smallest\" to generate \"argmin\" instead of \"argmax\". The third and fourth samples show examples of meaning representations generated by the model trained with un-anonymised GeoQuery. In the third examples, the model with transfer learning correctly identified the entity \"river\". On the other hand, the model without transfer learning generates \"state\", which is more common in the training set. On the fourth example, the original target model generates an irrelevant meaning representation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 100, |
| "end": 107, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 170, |
| "end": 177, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Error Analysis on Transfer Learning", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The last two samples are obtained from models trained with un-anonymised GeoQuery with query-split. The fifth example shows how the original target generates a wrong entity name \"delaware\" instead of \"missouri\". Similarly, the sixth example shows original target model produces a token \"lambda\" instead of \"count\". This error may be due to the fact that the original target model tends to generate the token they are familiar with in the training set. Examples described above shows how the model trained with transfer learning has a better ability to generate tokens that are different with training examples, thus improve the performance of the model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis on Transfer Learning", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We proposed a transfer learning method by training a model using a larger dataset and fine-tuning with another related low-resource dataset. With this method, we can use a bigger dataset with a similar composition to improve the performance of a model trained with a smaller dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For future work, it would be interesting to combine transfer learning and data selection methods so that the source model is trained only with the most similar instances in respect with the target domain. Another direction would be to explore transfer learning on a more complex model such as sequence-to-tree, which has a better performance than sequence-to-sequence models when trained with large datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The code and datasets are available from https:// github.com/akennardi/Semantic-Parsing", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank Xiang Li for his insight throughout the project. We would also like to thank the three anonymous reviewers for their valuable comments and insights. This work is a part of Individual Computing Project Course at the Australian National University taken by the first author with the same title.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgement", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "3rd International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Expanding the scope of the atis task: The atis-3 corpus", |
| "authors": [ |
| { |
| "first": "Deborah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Dahl", |
| "suffix": "" |
| }, |
| { |
| "first": "Madeleine", |
| "middle": [], |
| "last": "Bates", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Fisher", |
| "suffix": "" |
| }, |
| { |
| "first": "Kate", |
| "middle": [], |
| "last": "Hunicke-Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Pallett", |
| "suffix": "" |
| }, |
| { |
| "first": "Christine", |
| "middle": [], |
| "last": "Pao", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Rudnicky", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Shriberg", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "HUMAN LANGUAGE TECHNOLOGY: Proceedings of a Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deborah A. Dahl, Madeleine Bates, Michael Brown, William Fisher, Kate Hunicke-Smith, David Pallett, Christine Pao, Alexander Rudnicky, and Elizabeth Shriberg. 1994. Expanding the scope of the atis task: The atis-3 corpus. In HUMAN LANGUAGE TECHNOLOGY: Proceedings of a Workshop held at Plainsboro, New Jersey, March 8-11, 1994.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Practical semantic parsing for spoken language understanding", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Damonte", |
| "suffix": "" |
| }, |
| { |
| "first": "Rahul", |
| "middle": [], |
| "last": "Goel", |
| "suffix": "" |
| }, |
| { |
| "first": "Tagyoung", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Damonte, Rahul Goel, and Tagyoung Chung. 2019. Practical semantic parsing for spoken lan- guage understanding. CoRR, abs/1903.04521.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Language to Logical Form with Neural Attention", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "33--43", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1004" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li Dong and Mirella Lapata. 2016. Language to Log- ical Form with Neural Attention. In Proceedings of the 54th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers), ACL 2016, pages 33-43, Berlin, Germany. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Coarse-to-Fine Decoding for Neural Semantic Parsing", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "731--742", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li Dong and Mirella Lapata. 2018. Coarse-to-Fine De- coding for Neural Semantic Parsing. In Proceed- ings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), ACL 2018, pages 731-742, Melbourne, Aus- tralia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Transfer Learning for Neural Semantic Parsing", |
| "authors": [ |
| { |
| "first": "Xing", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Emilio", |
| "middle": [], |
| "last": "Monti", |
| "suffix": "" |
| }, |
| { |
| "first": "Lambert", |
| "middle": [], |
| "last": "Mathias", |
| "suffix": "" |
| }, |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Dreyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2nd Workshop on Representation Learning for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "48--56", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-2607" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xing Fan, Emilio Monti, Lambert Mathias, and Markus Dreyer. 2017. Transfer Learning for Neural Seman- tic Parsing. In Proceedings of the 2nd Workshop on Representation Learning for NLP, Rep4NLP 2017, pages 48-56, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Improving text-to-SQL evaluation methodology", |
| "authors": [ |
| { |
| "first": "Catherine", |
| "middle": [], |
| "last": "Finegan-Dollak", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [ |
| "K" |
| ], |
| "last": "Kummerfeld", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Ramanathan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sesh", |
| "middle": [], |
| "last": "Sadasivam", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [], |
| "last": "Radev", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "351--360", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1033" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Catherine Finegan-Dollak, Jonathan K. Kummerfeld, Li Zhang, Karthik Ramanathan, Sesh Sadasivam, Rui Zhang, and Dragomir Radev. 2018. Improving text-to-SQL evaluation methodology. In Proceed- ings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 351-360, Melbourne, Australia. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Neural Semantic Parsing over Multiple Knowledge-bases", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Herzig", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Berant", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "623--628", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-2098" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan Herzig and Jonathan Berant. 2017. Neural Semantic Parsing over Multiple Knowledge-bases. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), ACL 2017, pages 623-628, Van- couver, Canada. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Decoupling structure and lexicon for zero-shot semantic parsing", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Herzig", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Berant", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1619--1629", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1190" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan Herzig and Jonathan Berant. 2018. Decou- pling structure and lexicon for zero-shot semantic parsing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, pages 1619-1629, Brussels, Belgium. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Data Recombination for Neural Semantic Parsing", |
| "authors": [ |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "12--22", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1002" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robin Jia and Percy Liang. 2016. Data Recombination for Neural Semantic Parsing. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), ACL 2016, pages 12-22, Berlin, Germany. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Recurrent Continuous Translation Models", |
| "authors": [ |
| { |
| "first": "Nal", |
| "middle": [], |
| "last": "Kalchbrenner", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1700--1709", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nal Kalchbrenner and Phil Blunsom. 2013. Recur- rent Continuous Translation Models. In Proceed- ings of the 2013 Conference on Empirical Meth- ods in Natural Language Processing, EMNLP 2013, pages 1700-1709, Seattle, Washington, USA. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Semantic Parsing with Semi-Supervised Sequential Autoencoders", |
| "authors": [ |
| { |
| "first": "Karl Moritz", |
| "middle": [], |
| "last": "Hermann", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1078--1087", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1116" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karl Moritz Hermann. 2016. Semantic Parsing with Semi-Supervised Sequential Autoencoders. In Pro- ceedings of the 2016 Conference on Empirical Meth- ods in Natural Language Processing, EMNLP 2016, pages 1078-1087, Austin, TX, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Latent Predictor Networks for Code Generation", |
| "authors": [ |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "Moritz" |
| ], |
| "last": "Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Ko\u010disk\u00fd", |
| "suffix": "" |
| }, |
| { |
| "first": "Fumin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Senior", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "599--609", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1057" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang Ling, Phil Blunsom, Edward Grefenstette, Karl Moritz Hermann, Tom\u00e1\u0161 Ko\u010disk\u00fd, Fumin Wang, and Andrew Senior. 2016. Latent Predic- tor Networks for Code Generation. In Proceed- ings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), ACL 2016, pages 599-609, Berlin, Germany. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Stanford neural machine translation systems for spoken language domains", |
| "authors": [ |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Workshop on Spoken Language Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minh-Thang Luong and Christopher D. Manning. 2015. Stanford neural machine translation sys- tems for spoken language domains. In International Workshop on Spoken Language Translation.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Effective approaches to attention-based neural machine translation", |
| "authors": [ |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1412--1421", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D15-1166" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015. Effective approaches to attention-based neural machine translation. In Proceedings of the 2015 Conference on Empirical Methods in Natu- ral Language Processing, pages 1412-1421, Lis- bon, Portugal. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "A survey on transfer learning", |
| "authors": [ |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Sinno Jialin Pan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Trans. on Knowledge and Data Eng", |
| "volume": "22", |
| "issue": "10", |
| "pages": "1345--1359", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TKDE.2009.191" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sinno Jialin Pan and Qiang Yang. 2010. A survey on transfer learning. Trans. on Knowledge and Data Eng., 22(10):1345-1359.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Evaluation of spoken language systems: the ATIS domain", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "J" |
| ], |
| "last": "Price", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Speech and Natural Language: Proceedings of a Workshop Held at Hidden Valley", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. J. Price. 1990. Evaluation of spoken language sys- tems: the ATIS domain. In Speech and Natural Lan- guage: Proceedings of a Workshop Held at Hidden Valley, Pennsylvania, June 24-27,1990.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Improving neural machine translation models with monolingual data", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "86--96", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1009" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Improving neural machine translation mod- els with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 86-96, Berlin, Germany. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Domain specialization: a post-training domain adaptation for neural machine translation", |
| "authors": [ |
| { |
| "first": "Christophe", |
| "middle": [], |
| "last": "Servan", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Josep", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Crego", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Senellart", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christophe Servan, Josep Maria Crego, and Jean Senel- lart. 2016. Domain specialization: a post-training domain adaptation for neural machine translation. ArXiv, abs/1612.06141.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Cross-domain semantic parsing via paraphrasing", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Xifeng", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1235--1246", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1127" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu Su and Xifeng Yan. 2017. Cross-domain se- mantic parsing via paraphrasing. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 1235-1246, Copenhagen, Denmark. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Neural Architectures for Multilingual Semantic Parsing", |
| "authors": [ |
| { |
| "first": "Raymond Hendy", |
| "middle": [], |
| "last": "Susanto", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "38--44", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-2007" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raymond Hendy Susanto and Wei Lu. 2017. Neural Architectures for Multilingual Semantic Parsing. In Proceedings of the 55th Annual Meeting of the As- sociation for Computational Linguistics (Volume 2: Short Papers), ACL 2017, pages 38-44, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural net- works. CoRR, abs/1409.3215.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Lecture 6.5-RmsProp: Divide the gradient by a running average of its recent magnitude", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Tieleman", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "COURSERA: Neural Networks for Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Tieleman and G. Hinton. 2012. Lecture 6.5- RmsProp: Divide the gradient by a running average of its recent magnitude. COURSERA: Neural Net- works for Machine Learning.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Recurrent neural network regularization", |
| "authors": [ |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Zaremba", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wojciech Zaremba, Ilya Sutskever, and Oriol Vinyals. 2014. Recurrent neural network regularization. CoRR, abs/1409.2329.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Learning to Parse Database Queries Using Inductive Logic Programming", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Zelle", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the 13th National Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "1050--1055", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John M. Zelle and Raymond J. Mooney. 1996. Learn- ing to Parse Database Queries Using Inductive Logic Programming. In Proceedings of the 13th Na- tional Conference on Artificial Intelligence, vol- ume 2, pages 1050-1055, Portland, Oregon, USA. AAAI Press / The MIT Press.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Online learning of relaxed CCG grammars for parsing to logical form", |
| "authors": [ |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "678--687", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luke Zettlemoyer and Michael Collins. 2007. On- line learning of relaxed CCG grammars for parsing to logical form. In Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Lan- guage Processing and Computational Natural Lan- guage Learning (EMNLP-CoNLL), pages 678-687, Prague, Czech Republic. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Learning to map sentences to logical form: Structured classification with probabilistic categorial grammars", |
| "authors": [ |
| { |
| "first": "Luke", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the Twenty-First Conference on Uncertainty in Artificial Intelligence, UAI'05", |
| "volume": "", |
| "issue": "", |
| "pages": "658--666", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luke S. Zettlemoyer and Michael Collins. 2005. Learning to map sentences to logical form: Struc- tured classification with probabilistic categorial grammars. In Proceedings of the Twenty-First Con- ference on Uncertainty in Artificial Intelligence, UAI'05, pages 658-666, Arlington, Virginia, United States. AUAI Press.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "text": "Example of natural language questions and their meaning representation in lambda calculus.", |
| "type_str": "table", |
| "content": "<table><tr><td>Data Set</td><td colspan=\"3\">Train Dev. Test</td></tr><tr><td>ATIS</td><td>4,434</td><td>491</td><td>448</td></tr><tr><td>ATIS un-anonymised</td><td>4,029</td><td>504</td><td>504</td></tr><tr><td>GeoQuery</td><td>600</td><td>0</td><td>280</td></tr><tr><td>GeoQuery un-anonymised</td><td>583</td><td>15</td><td>279</td></tr><tr><td>GeoQuery un-anonymised</td><td>543</td><td>148</td><td>186</td></tr><tr><td>+ query-split</td><td/><td/><td/></tr></table>", |
| "html": null, |
| "num": null |
| }, |
| "TABREF1": { |
| "text": "", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null, |
| "num": null |
| }, |
| "TABREF2": { |
| "text": "Learning curves from different transfer learning setups.", |
| "type_str": "table", |
| "content": "<table><tr><td/><td/><td colspan=\"6\">(a) ATIS to GeoQuery</td><td/><td/><td/><td/><td/><td colspan=\"6\">(b) ATIS to GeoQuery</td><td/><td/><td/><td/><td/><td colspan=\"6\">(c) ATIS to GeoQuery</td><td/><td/></tr><tr><td/><td/><td/><td colspan=\"4\">(Anonymised)</td><td/><td/><td/><td>100</td><td/><td/><td colspan=\"6\">(Un-anonymised)</td><td/><td/><td>100</td><td colspan=\"10\">(Un-anonymised with Query-Split)</td></tr><tr><td>32.14 18.21</td><td>47.14 45.71</td><td>55.00 47.50</td><td>61.43 59.64</td><td>72.86 71.43</td><td>75.71 73.21</td><td>77.14 77.50</td><td>81.43 82.14</td><td>81.43 82.14</td><td>84.64 80.36</td><td>80 0 20 40 60</td><td>2.87 6.45</td><td>10.75 19.71</td><td>13.62 29.75</td><td>42.29 29.03</td><td>53.05 43.73</td><td>56.63 49.10</td><td>55.91 60.57</td><td>63.44 61.29</td><td>70.25 66.67</td><td>67.03 67.74</td><td>80 0 60 20 40</td><td>0.00 2.15</td><td>3.23 3.76</td><td>10.22 14.52</td><td>26.34 19.35</td><td>18.28 24.19</td><td>19.89 28.49</td><td>33.33 32.80</td><td>37.63 42.47</td><td>44.09 37.63</td><td>46.24 44.09</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>0</td><td/><td>20</td><td/><td>40</td><td/><td>60</td><td/><td>80</td><td/><td>100</td><td>0</td><td/><td>20</td><td/><td>40</td><td/><td>60</td><td/><td>80</td><td/><td>100</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"8\">Subset data fraction (%)</td><td/><td/><td/><td colspan=\"8\">Subset data fraction (%)</td><td/></tr><tr><td colspan=\"10\">Figure 1: Source Domain Target Domain</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>ATIS</td><td/><td/><td/><td/><td colspan=\"4\">GeoQuery</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"12\">ATIS un-anonymised GeoQuery un-anonymised</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"12\">ATIS un-anonymised GeoQuery un-anonymised</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td colspan=\"5\">with query-split</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr></table>", |
| "html": null, |
| "num": null |
| }, |
| "TABREF3": { |
| "text": "Transfer learning experiments with ATIS and GeoQuery datasets.", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null, |
| "num": null |
| }, |
| "TABREF4": { |
| "text": "Examples of Meaning Representations generated by the model trained with transfer learning and original target model using 10% fraction of various GeoQuery datasets.", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null, |
| "num": null |
| } |
| } |
| } |
| } |