ACL-OCL / Base_JSON /prefixW /json /W19 /W19-0128.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "W19-0128",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T06:17:05.876266Z"
},
"title": "On Evaluating the Generalization of LSTM Models in Formal Languages",
"authors": [
{
"first": "Mirac",
"middle": [],
"last": "Suzgun",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Harvard University Cambridge",
"location": {
"postCode": "02138",
"region": "MA",
"country": "USA"
}
},
"email": "msuzgun@college"
},
{
"first": "Yonatan",
"middle": [],
"last": "Belinkov",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Harvard University Cambridge",
"location": {
"postCode": "02138",
"region": "MA",
"country": "USA"
}
},
"email": "belinkov@seas"
},
{
"first": "Stuart",
"middle": [
"M"
],
"last": "Shieber",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Harvard University Cambridge",
"location": {
"postCode": "02138",
"region": "MA",
"country": "USA"
}
},
"email": "shieber@seas.harvard.edu"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Recurrent Neural Networks (RNNs) are theoretically Turing-complete and established themselves as a dominant model for language processing. Yet, there still remains an uncertainty regarding their language learning capabilities. In this paper, we empirically evaluate the inductive learning capabilities of Long Short-Term Memory networks, a popular extension of simple RNNs, to learn simple formal languages, in particular a n b n , a n b n c n , and a n b n c n d n. We investigate the influence of various aspects of learning, such as training data regimes and model capacity, on the generalization to unobserved samples. We find striking differences in model performances under different training settings and highlight the need for careful analysis and assessment when making claims about the learning capabilities of neural network models. 1",
"pdf_parse": {
"paper_id": "W19-0128",
"_pdf_hash": "",
"abstract": [
{
"text": "Recurrent Neural Networks (RNNs) are theoretically Turing-complete and established themselves as a dominant model for language processing. Yet, there still remains an uncertainty regarding their language learning capabilities. In this paper, we empirically evaluate the inductive learning capabilities of Long Short-Term Memory networks, a popular extension of simple RNNs, to learn simple formal languages, in particular a n b n , a n b n c n , and a n b n c n d n. We investigate the influence of various aspects of learning, such as training data regimes and model capacity, on the generalization to unobserved samples. We find striking differences in model performances under different training settings and highlight the need for careful analysis and assessment when making claims about the learning capabilities of neural network models. 1",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Recurrent Neural Networks (RNNs) are powerful machine learning models that can capture and exploit sequential data. They have become standard in important natural language processing tasks such as machine translation (Sutskever et al., 2014; Bahdanau et al., 2014) and speech recognition (Sak et al., 2014) . Despite the ubiquity of various RNN architectures in natural language processing, there still lies an unanswered fundamental question: What classes of languages can, empirically or theoretically, be learned by neural networks? This question has drawn much attention in the study of formal languages, with previous results on both the theoretical (Siegelmann and Sontag, 1992; Siegelmann, 1995) and empirical capabilities of RNNs, showing that different RNN architectures can learn certain regular Casey, 1996) , context-free (Elman, 1991; Das et al., 1992) , and context-sensitive languages (Gers and Schmidhuber, 2001) .",
"cite_spans": [
{
"start": 217,
"end": 241,
"text": "(Sutskever et al., 2014;",
"ref_id": "BIBREF22"
},
{
"start": 242,
"end": 264,
"text": "Bahdanau et al., 2014)",
"ref_id": "BIBREF1"
},
{
"start": 288,
"end": 306,
"text": "(Sak et al., 2014)",
"ref_id": "BIBREF18"
},
{
"start": 655,
"end": 684,
"text": "(Siegelmann and Sontag, 1992;",
"ref_id": "BIBREF20"
},
{
"start": 685,
"end": 702,
"text": "Siegelmann, 1995)",
"ref_id": "BIBREF19"
},
{
"start": 806,
"end": 818,
"text": "Casey, 1996)",
"ref_id": "BIBREF4"
},
{
"start": 834,
"end": 847,
"text": "(Elman, 1991;",
"ref_id": "BIBREF7"
},
{
"start": 848,
"end": 865,
"text": "Das et al., 1992)",
"ref_id": "BIBREF6"
},
{
"start": 900,
"end": 928,
"text": "(Gers and Schmidhuber, 2001)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In a common experimental setup for investigating whether a neural network can learn a formal language, one formulates a supervised learning problem where the network is presented one character at a time and predicts the next possible character(s). The performance of the network can then be evaluated based on its ability to recognize sequences shown in the training set and -more importantly -to generalize to unseen sequences. There are, however, various methods of evaluation in a language learning task. In order to define the generalization of a network, one may consider the length of the shortest sequence in a language whose output was incorrectly produced by the network, or the size of the largest accepted test set, or the accuracy on a fixed test set (Rodriguez et al., 1999; Bod\u00e9n and Wiles, 2000; Gers and Schmidhuber, 2001; Rodriguez, 2001 ). These formulations follow narrow and bounded evaluation schemes though: They often define a length threshold in the test set and report the performance of the model on this fixed set.",
"cite_spans": [
{
"start": 763,
"end": 787,
"text": "(Rodriguez et al., 1999;",
"ref_id": "BIBREF17"
},
{
"start": 788,
"end": 810,
"text": "Bod\u00e9n and Wiles, 2000;",
"ref_id": "BIBREF2"
},
{
"start": 811,
"end": 838,
"text": "Gers and Schmidhuber, 2001;",
"ref_id": "BIBREF8"
},
{
"start": 839,
"end": 854,
"text": "Rodriguez, 2001",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We acknowledge three unsettling issues with these formulations. First, the sequences in the training set are usually assumed to be uniformly or geometrically distributed, with little regard to the nature and complexity of the language. This assumption may undermine any conclusions drawn from empirical investigations, especially given that natural language is not uniformly distributed, an aspect that is known to affect learning in modern RNN architectures (Liu et al., 2018) . Second, in a test set where the sequences are enumerated by their lengths, if a network makes an error on a sequence of, say, length 7, but correctly recognizes longer sequences of length up to 1000, would we consider the model's gener-alization as good or bad? In a setting where we monitor only the shortest sequence that was incorrectly predicted by the network, this scheme clearly misses the potential success of the model after witnessing a failure, thereby misportraying the capabilities of the network. Third, the test sets are often bounded in these formulations, making it challenging to compare and contrast the performance of models if they attain full accuracy on their fixed test sets.",
"cite_spans": [
{
"start": 459,
"end": 477,
"text": "(Liu et al., 2018)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In the present work, we address these limitations by providing a more nuanced evaluation of the learning capabilities of RNNs. In particular, we investigate the effects of three different aspects of a network's generalization: data distribution, length-window, and network capacity. We define an informative protocol for assessing the performance of RNNs: Instead of training a single network until it has learned its training set and then evaluating it on its test set, as Gers and Schmidhuber do in their study, we monitor and test the network's performance at each epoch during the entire course of training. This approach allows us to study the stability of the solutions reached by the network. Furthermore, we do not restrict ourselves to a test set of sequences of fixed lengths during testing. Rather, we exhaustively enumerate all the sequences in a language by their lengths and then go through the sequences in the test set one by one until our network errs k times, thereby providing a more fine-grained evaluation criterion of its generalization capabilities.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Our experimental evaluation is focused on the Long Short-Term Memory (LSTM) network (Hochreiter and Schmidhuber, 1997) , a particularly popular RNN variant. We consider three formal languages, namely a n b n , a n b n c n , and a n b n c n d n , and investigate how LSTM networks learn these languages under different training regimes. Our investigation leads to the following insights: (1) The data distribution has a significant effect on generalization capability, with discrete uniform and U-shaped distributions often leading to the best generalization amongst all the four distributions in consideration. (2) Widening the training length-window, naturally, enables LSTM models to generalize better to longer sequences, and interestingly, the networks seem to learn to generalize to shorter sequences when trained on long sequences. (3) Higher model capacity -having more hidden units -leads to better stability, but not necessarily better generalization levels. In other words, over-parameterized models are more stable than models with theoretically sufficient but far fewer parameters. We explain this phenomenon by conjecturing that a collaborative counting mechanism arises in over-parameterized networks.",
"cite_spans": [
{
"start": 84,
"end": 118,
"text": "(Hochreiter and Schmidhuber, 1997)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "It has been shown that RNNs with a finite number of states can process regular languages by acting like a finite-state automaton using different units in their hidden layers Casey, 1996) . RNNs, however, are not limited to recognizing only regular languages. Siegelmann and Sontag (1992) and Siegelmann (1995) showed that first-order RNNs (with rational state weights and infinite numeric precision) can simulate a pushdown automaton with two-stacks, thereby demonstrating that RNNs are Turing-complete. In theory, RNNs with infinite numeric precision are capable of expressing recursively enumerable languages. Yet, in practice, modern machine architectures do not contain computational structures that support infinite numeric precision. Thus, the computational power of RNNs with finite precision may not necessarily be the same as that of RNNs with infinite precision.",
"cite_spans": [
{
"start": 174,
"end": 186,
"text": "Casey, 1996)",
"ref_id": "BIBREF4"
},
{
"start": 259,
"end": 287,
"text": "Siegelmann and Sontag (1992)",
"ref_id": "BIBREF20"
},
{
"start": 292,
"end": 309,
"text": "Siegelmann (1995)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Elman (1991) investigated the learning capabilities of simple RNNs to process and formalize a context-free grammar containing hierarchical (recursively embedded) dependencies: He observed that distinct parts of the networks were able to learn some complex representations to encode certain grammatical structures and dependencies of the context-free grammar. Later, Das et al. (1992) introduced an RNN with an external stack memory to learn simple context-free languages, such as a n b m , a n b n cb m a m , and a n+m b n c m . Similar studies (Kwasny and Kalman, 1995; Wiles and Elman, 1995; Steijvers and Gr\u00fcnwald, 1996; Rodriguez et al., 1999; Bod\u00e9n and Wiles, 2000) have explored the existence of stable counting mechanisms in simple RNNs, which would enable them to learn various context-free and context-sensitive languages, but none of the RNN architectures proposed in the early days were able to generalize the training set to longer (or more complex) test samples with substantially high accuracy.",
"cite_spans": [
{
"start": 366,
"end": 383,
"text": "Das et al. (1992)",
"ref_id": "BIBREF6"
},
{
"start": 571,
"end": 593,
"text": "Wiles and Elman, 1995;",
"ref_id": "BIBREF24"
},
{
"start": 594,
"end": 623,
"text": "Steijvers and Gr\u00fcnwald, 1996;",
"ref_id": "BIBREF21"
},
{
"start": 624,
"end": 647,
"text": "Rodriguez et al., 1999;",
"ref_id": "BIBREF17"
},
{
"start": 648,
"end": 670,
"text": "Bod\u00e9n and Wiles, 2000)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Gers and Schmidhuber 2001 Memory (LSTM) networks 2 to learn two contextfree languages, a n b n , a n b m B m A n , and one strictly context-sensitive language, a n b n c n . Given only a small fraction of samples in a formal language, with values of n (and m) ranging from 1 to a certain training threshold N , they trained an LSTM model until its full convergence on the training set and then tested it on a more generalized set. They showed that their LSTM model outperformed the previous approaches in capturing and generalizing the aforementioned formal languages. By analyzing the cell states and the activations of the gates in their LSTM model, they further demonstrated that the network learns how to count up and down at certain places in the sample sequences to encode information about the underlying structure of each of these formal languages.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Following this approach, Bod\u00e9n and Wiles (2002) and Chalup and Blair (2003) studied the stability of the LSTM networks in learning context-free and context-sensitive languages and examined the processing mechanism developed by the hidden states during the training phase. They observed that the weight initialization of the hidden states in the LSTM network had a significant effect on the inductive capabilities of the model and that the solutions were often unstable in the sense that the numbers up to which the LSTM models were able to generalize using the training dataset sporadically oscillated.",
"cite_spans": [
{
"start": 25,
"end": 47,
"text": "Bod\u00e9n and Wiles (2002)",
"ref_id": "BIBREF3"
},
{
"start": 52,
"end": 75,
"text": "Chalup and Blair (2003)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Following the traditional approach adopted by Elman (1991); Rodriguez (2001) ; Gers and Schmidhuber (2001) and many other studies, we train our neural network as follows. At each time step, we present one input character to our model and then ask it to predict the set of next possible characters, based on the current character and the prior hidden states. 3 Given a vocabulary V (i) of size d, we use a one-hot representation to encode the input values; therefore, all the input vectors are ddimensional binary vectors. The output values are (d+1)-dimensional though, since they may further contain the termination symbol a, in addition to the symbols in V (i) . The output values are not always one-hot encoded, because there can be multiple possibilities for the next character in the sequence, therefore we instead use a k-hot representation to encode the output values. Our objective is to minimize the mean-squared error (MSE) of the sequence predictions. During testing, we use an output threshold criterion of 0.5 for the sigmoid output layer to indicate which characters were predicted by the model. We then turn this prediction task into a classification task by accepting a sample if our model predicts all of its output values correctly and rejecting it otherwise. 4",
"cite_spans": [
{
"start": 60,
"end": 76,
"text": "Rodriguez (2001)",
"ref_id": "BIBREF16"
},
{
"start": 659,
"end": 662,
"text": "(i)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "The Sequence Prediction Task",
"sec_num": "3"
},
{
"text": "We consider the following three formal languages in our predictions tasks: a n b n , a n b n c n , and a n b n c n d n , where n 1. Of these three languages, the first one is a context-free language and the last two are strictly context-sensitive languages. Table 1 provides example input-output pairs for these languages under the sequence prediction task. In the rest of this section, we formulate the sequence prediction task for each language in more detail.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Languages",
"sec_num": "3.1"
},
{
"text": "CFL a n b n : The input vocabulary V (i) for a n b n consists of a and b. The output vocabulary V (o) is the union of V (i) and {a}. Therefore, the input vectors are 2-dimensional, and the output vectors are 3-dimensional. Before the occurrence of the first b in a sequence, the model always predicts a or b (which we notate a/b) whenever it that having a start symbol in the sequence does not affect the learning capabilities of the model. However, we still use a termination symbol a to encode the end of the sequence in our output samples. sees an a. However, after it encounters the first b, the rest of the sequence becomes entirely deterministic: Assuming that the model observes n a's in a sequence, it outputs (n 1) b's for the next (n 1) b's and the terminal symbol a for the last b in the sequence. Summarizing, we define the input-target scheme for a n b n as follows:",
"cite_spans": [
{
"start": 98,
"end": 101,
"text": "(o)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Languages",
"sec_num": "3.1"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "a n b n ) (a/b) n b n 1 a",
"eq_num": "(1)"
}
],
"section": "Languages",
"sec_num": "3.1"
},
{
"text": "CSL a n b n c n : The input vocabulary V (i) for a n b n c n consists of three characters: a, b, and c.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Languages",
"sec_num": "3.1"
},
{
"text": "The",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Languages",
"sec_num": "3.1"
},
{
"text": "output vocabulary V (o) is V (i) [ {a}.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Languages",
"sec_num": "3.1"
},
{
"text": "The input and output vectors are 3-and 4-dimensional, respectively. The input-target scheme for a n b n c n is:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Languages",
"sec_num": "3.1"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "a n b n c n ) (a/b) n b n 1 c n a",
"eq_num": "(2)"
}
],
"section": "Languages",
"sec_num": "3.1"
},
{
"text": "CSL a n b n c n d n : The vocabulary V (i) for the last language a n b n c n d n consists of a, b, c, and d. The input vectors are 4-dimensional, and the output vectors are 5-dimensional. As in the case of the previous two languages, a sequence becomes entirely deterministic after the observance of the first b, hence the input-target scheme for a n b n c n d n is:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Languages",
"sec_num": "3.1"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "a n b n c n d n ) (a/b) n b n 1 c n d n a",
"eq_num": "(3)"
}
],
"section": "Languages",
"sec_num": "3.1"
},
{
"text": "We use a single-layer LSTM model to perform the sequence prediction task, followed by a linear layer that maps to the output vocabulary size. The linear layer is followed by a sigmoid unit layer. The loss is the sum of the mean squared error between the prediction and the correct output at each character. See Figure 1 for an illustration. In our implementation, we used the standard LSTM module in PyTorch (Paszke et al., 2017) and initialized the initial hidden and cell states, h 0 and c 0 , to zero.",
"cite_spans": [
{
"start": 408,
"end": 429,
"text": "(Paszke et al., 2017)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [
{
"start": 311,
"end": 319,
"text": "Figure 1",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "The LSTM Model",
"sec_num": "3.2"
},
{
"text": "4 Experimental Setup",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The LSTM Model",
"sec_num": "3.2"
},
{
"text": "Training and testing are done in alternating steps: In each epoch, for training, we first present to an LSTM network 1000 samples in a given language, which are generated according to a certain discrete probability distribution supported on a closed finite interval. 5 We then freeze all the weights in our model, exhaustively enumerate all the sequences in the language by their lengths, and determine 5 The strings are presented to the model in a random order. the first k shortest sequences whose outputs the model produces inaccurately. 6 We remark, for the sake of clarity, that our test design is slightly different from the traditional testing approaches used by Rodriguez et al. (1999) ; Gers and Schmidhuber (2001); Rodriguez (2001), since we do not consider the shortest sequence in a language whose output was incorrectly predicted by the model, or the largest accepted test set, or the accuracy of the model on a fixed test set. Our testing approach, as we will see shortly in the following subsections, gives more information about the inductive capabilities of our LSTM networks than the previous techniques and proves itself to be useful especially in the cases where the distribution of the length of our training dataset is skewed towards one of the boundaries of the distribution's support. For instance, LSTM models sometimes fail to capture some of the short sequences in a language during the testing phase 7 , but they then predict a large number of long sequences correctly. 8 If we were to report only the shortest sequence whose output our model incorrectly predicts, we would then be unable to capture the model's inductive capabilities. Furthermore, we test and report the performance of the model after each full pass of the training set. Finally, in all our investigations, we repeated each experiment ten times. In each trial, we only changed the weights of the hidden states of the model -all the other parameters were kept the same.",
"cite_spans": [
{
"start": 267,
"end": 268,
"text": "5",
"ref_id": null
},
{
"start": 403,
"end": 404,
"text": "5",
"ref_id": null
},
{
"start": 541,
"end": 542,
"text": "6",
"ref_id": null
},
{
"start": 670,
"end": 693,
"text": "Rodriguez et al. (1999)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Training and Testing",
"sec_num": "4.1"
},
{
"text": "Previous studies have examined various length distribution models to generate appropriate training sets for each formal language: Wiles and Elman (1995) ; Bod\u00e9n and Wiles (2000) ; Rodriguez (2001), for instance, used length distributions that were skewed towards having more short sequences than long sequences given a training length-window, whereas Gers and Schmidhuber (2001) used a uniform distribution scheme to generate their training sets. The latter briefly comment that the distribution of lengths of sequences in the training set does influence the generalization ability and convergence speed 9 of neural networks, and mention that training sets containing abundant numbers of both short and long sequences are learned by networks much more quickly than uniformly distributed regimes. Nevertheless, they do not systematically compare or explicitly report their findings. To study the effect of various length distributions on the learning capability and speed of LSTM models, we experimented with four discrete probability distributions supported on bounded intervals (Figure 2 ) to sample the lengths of sequences for the languages. We briefly recall the probability distribution functions for discrete uniform and Beta-Binomial distributions used in our data generation procedure.",
"cite_spans": [
{
"start": 130,
"end": 152,
"text": "Wiles and Elman (1995)",
"ref_id": "BIBREF24"
},
{
"start": 155,
"end": 177,
"text": "Bod\u00e9n and Wiles (2000)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [
{
"start": 1079,
"end": 1088,
"text": "(Figure 2",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Length Distributions",
"sec_num": "4.2"
},
{
"text": "Discrete Uniform Distribution: Given N 2 N, if a random variable X \u21e0 U (1, N) , then the probability distribution function of X is given as follows:",
"cite_spans": [],
"ref_spans": [
{
"start": 71,
"end": 77,
"text": "(1, N)",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Length Distributions",
"sec_num": "4.2"
},
{
"text": "P (x) = ( 1 N if x 2 {1, . . . , N} 0 otherwise.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Length Distributions",
"sec_num": "4.2"
},
{
"text": "To generate training data with uniformly distributed lengths, we simply draw n from U (1, N) as defined above.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Length Distributions",
"sec_num": "4.2"
},
{
"text": "Beta-Binomial Distribution: Similarly, given N 2 Z 0 and two parameters \u21b5 and 2 R >0 , if a random variable X \u21e0 BetaBin (N, \u21b5, ) , then the probability distribution function of X is given as follows:",
"cite_spans": [],
"ref_spans": [
{
"start": 120,
"end": 128,
"text": "(N, \u21b5, )",
"ref_id": null
}
],
"eq_spans": [],
"section": "Length Distributions",
"sec_num": "4.2"
},
{
"text": "P (x) = ( N x B(x+\u21b5,N x+ ) B(\u21b5, ) if x 2 {0, . . . , N} 0 otherwise.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Length Distributions",
"sec_num": "4.2"
},
{
"text": "where B(\u21b5, ) is the Beta function. We set different values of \u21b5 and as such in order to generate the following distributions: U-shaped (\u21b5 = 0.25, = 0.25): The probabilities of having short and long sequences are equally high, but the probability of having an average-length sequence is low.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Length Distributions",
"sec_num": "4.2"
},
{
"text": "Right-tailed (\u21b5 = 1, = 5): Short sequences are more probable than long sequences.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Length Distributions",
"sec_num": "4.2"
},
{
"text": "Left-tailed (\u21b5 = 5, = 1): Long sequences are more probable than short sequences.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Length Distributions",
"sec_num": "4.2"
},
{
"text": "Most of the previous studies trained networks on sequences of lengths n 2 [1, N], where typical N values were between 10 and 50 (Bod\u00e9n and Wiles, 2000; Gers and Schmidhuber, 2001) , and more recently 100 (Weiss et al., 2018) . To determine the impact of the choice of training length-window on the stability and inductive capabilities of the LSTM networks, we experimented with three different length-windows for n: [1, 30] , [1, 50] , and [50, 100] . In the third window setting [50, 100], we further wanted to see whether LSTM are capable of generalizing to short sequences that are contained in the window range [1, 50] , as well as to sequences that are longer than the sequences seen in the training set. ",
"cite_spans": [
{
"start": 128,
"end": 151,
"text": "(Bod\u00e9n and Wiles, 2000;",
"ref_id": "BIBREF2"
},
{
"start": 152,
"end": 179,
"text": "Gers and Schmidhuber, 2001)",
"ref_id": "BIBREF8"
},
{
"start": 204,
"end": 224,
"text": "(Weiss et al., 2018)",
"ref_id": "BIBREF23"
},
{
"start": 416,
"end": 419,
"text": "[1,",
"ref_id": null
},
{
"start": 420,
"end": 423,
"text": "30]",
"ref_id": null
},
{
"start": 426,
"end": 429,
"text": "[1,",
"ref_id": null
},
{
"start": 430,
"end": 433,
"text": "50]",
"ref_id": null
},
{
"start": 440,
"end": 444,
"text": "[50,",
"ref_id": null
},
{
"start": 445,
"end": 449,
"text": "100]",
"ref_id": null
},
{
"start": 615,
"end": 618,
"text": "[1,",
"ref_id": null
},
{
"start": 619,
"end": 622,
"text": "50]",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Length Windows",
"sec_num": "4.3"
},
{
"text": "It has been shown by Gers and Schmidhuber (2001) that LSTMs can learn a n b n and a n b n c n with 1 and 2 hidden units, respectively. Similarly, H\u00f6lldobler et al. (1997) demonstrated that a simple RNN architecture containing a single hidden unit with carefully tuned parameters can develop a canonical linear counting mechanism to recognize the simple context-free language a n b n , for n \uf8ff 250. We wanted to explore whether the stability of the networks would improve with an increase in capacity of the LSTM model. We, therefore, varied the number of hidden units in our LSTM models as follows. We experimented with 1, 2, 3, and 36 hidden units for a n b n ; 2, 3, 4, and 36 hidden units for a n b n c n ; and 3, 4, 5, and 36 hidden units for a n b n c n d n . The 36 hidden unit case represents an over-parameterized network with more than enough theoretical capacity to recognize all these languages. Figure 3 exhibits the generalization graphs for the three formal languages trained with LSTM models under different length distribution regimes. Each single-color sequence in a generalization graph shows the average performance of ten LSTMs trained under the same settings but with different weight initializations. In all these experiments, the training sets had the same length-window [1, 50] . On the other hand, we used 2, 3, and 4 hidden units in our LSTM architectures for the languages a n b n , a n b n c n , and a n b n c n d n , respectively. 10 The top three plots show the average lengths of the shortest sequences (e 1 ) whose outputs were incorrectly predicted by the model at test time, whereas the bottom plots show the fifth such shortest lengths (e 5 ). We note that the models trained on uniformly distributed samples seem to perform the best amongst all the four distributions in all the three languages. Furthermore, for the languages a n b n c n and a n b n c n d n , the U-shaped Beta-Binomial distribution appears to help the LSTM models generalize better than the left-and righttailed Beta Binomial distributions, in which the lengths of the samples are intentionally skewed towards one end of the training length-window.",
"cite_spans": [
{
"start": 21,
"end": 48,
"text": "Gers and Schmidhuber (2001)",
"ref_id": "BIBREF8"
},
{
"start": 146,
"end": 170,
"text": "H\u00f6lldobler et al. (1997)",
"ref_id": "BIBREF12"
},
{
"start": 1294,
"end": 1297,
"text": "[1,",
"ref_id": null
},
{
"start": 1298,
"end": 1301,
"text": "50]",
"ref_id": null
}
],
"ref_spans": [
{
"start": 907,
"end": 915,
"text": "Figure 3",
"ref_id": "FIGREF3"
}
],
"eq_spans": [],
"section": "Model Capacity",
"sec_num": "4.4"
},
{
"text": "When we look at the plots for the e 1 values, we observe that all the distribution regimes seem to facilitate learning at least up to the longest sequences in their respective training datasets, drawn by the light blue horizontal lines on the plots, except for the left-tailed Beta-Binomial distribution for which we see errors at lengths shorter than the training length threshold in the languages a n b n c n and a n b n c n d n . For instance, if we were to consider only the e 1 values in our analysis, it would be tempting to argue that the model trained under the left-tailed Beta-Binomial distribution regime did not learn to recognize the language a n b n c n d n . By looking at the e 5 values, in addition to the e 1 values, we however realize that the model was actually learning many of the sequences in the language, but it was just struggling to recognize and correctly predict the outputs of some of the short sequences in the language. This phenomenon can be explained by the under-representation of short sequences in lefttailed Beta-Binomial distributions. Our observation clearly emphasizes the significance of looking beyond e 1 , the shortest error length at test time, in order to obtain a more complete picture of the model's generalizing capabilities. Figure 4 shows the generalization graphs for the three formal languages trained with LSTM models under different training windows. We note that enlarging the training length-window, naturally, enables an LSTM model to generalize far beyond its training length threshold. Besides, we see that the models with the training length-window of [50, 100] performed slightly better than the other two window ranges in the case of a n b n c n (green line, bottom middle plot). Moreover, we acknowledge the capability of LSTMs to recognize longer sequences, as well as shorter sequences. For instance, when trained on the training lengthwindow [50, 100] , our models learned to recognize not only the longer sequences but also the shorter sequences not presented in the training sets for the languages a n b n and a n b n c n .",
"cite_spans": [
{
"start": 1910,
"end": 1914,
"text": "[50,",
"ref_id": null
},
{
"start": 1915,
"end": 1919,
"text": "100]",
"ref_id": null
}
],
"ref_spans": [
{
"start": 1276,
"end": 1284,
"text": "Figure 4",
"ref_id": "FIGREF4"
}
],
"eq_spans": [],
"section": "Length Distributions",
"sec_num": "5.1"
},
{
"text": "Finally, we highlight the importance of the e 5 values once again: If we were to consider only the e 1 values, for instance, we would not have captured the inductive learning capabilities of the models trained with a length-window of [50, 100] in the case of a n b n c n , since the models always failed at recognizing the shortest sequence ab in the language. Yet, considering e 5 values helped us evaluate the performance of the LSTM models more accurately.",
"cite_spans": [
{
"start": 234,
"end": 238,
"text": "[50,",
"ref_id": null
},
{
"start": 239,
"end": 243,
"text": "100]",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Training Length Windows",
"sec_num": "5.2"
},
{
"text": "There seems to be a positive correlation between the number of hidden units in an LSTM network and its stability while learning a formal language. As Figure 5 demonstrates, increasing the number of hidden units in an LSTM network both increases the network's stability and also leads to faster convergence. However, it does not necessarily result in a better generalization. 11 We conjec- ture that, with more hidden units, we simply offer more resources to our LSTM models to regulate their hidden states to learn these languages. The next section supports this hypothesis by visualizing the hidden state activations during sequence processing.",
"cite_spans": [
{
"start": 375,
"end": 377,
"text": "11",
"ref_id": null
}
],
"ref_spans": [
{
"start": 150,
"end": 158,
"text": "Figure 5",
"ref_id": "FIGREF5"
}
],
"eq_spans": [],
"section": "Number of Hidden Units",
"sec_num": "5.3"
},
{
"text": "In addition to the analysis of our empirical results in the previous section, we would like to touch upon two important characteristics of LSTM models when they learn formal languages, namely the convergence issue and counting behavior of LSTM models.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "6"
},
{
"text": "We note that our experiments indicate that LSTM models often do not generalize to the same value in a given experiment setting. Figure 6 , for instance, displays the generalization and loss graphs of LSTM models which were trained to recognize the language a n b n c n under a uniform distribution regime with a training window of [1, 50] . The figure shows the results of 10 trials with different random weight initializations. While all runs appear to converge to a similar loss value, they have different generalization values (that is, their e 1 values are all different).",
"cite_spans": [
{
"start": 331,
"end": 334,
"text": "[1,",
"ref_id": null
},
{
"start": 335,
"end": 338,
"text": "50]",
"ref_id": null
}
],
"ref_spans": [
{
"start": 128,
"end": 136,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "Convergence:",
"sec_num": null
},
{
"text": "length window of [1, 50] . We observed similar trends with other configurations. This pattern is fairly common in our experiments, suggesting a disconnection between loss convergence and generalization capability. This result again highlights the importance of performing a fine-grained evaluation of generalization capability, rather than reporting a single number. Our argument is also consistent with those of Bod\u00e9n and Wiles (2002) and Chalup and Blair (2003) , for they also found that the weight initialization affects the inductive capabilities of an LSTM.",
"cite_spans": [
{
"start": 17,
"end": 20,
"text": "[1,",
"ref_id": null
},
{
"start": 21,
"end": 24,
"text": "50]",
"ref_id": null
},
{
"start": 413,
"end": 435,
"text": "Bod\u00e9n and Wiles (2002)",
"ref_id": "BIBREF3"
},
{
"start": 440,
"end": 463,
"text": "Chalup and Blair (2003)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Convergence:",
"sec_num": null
},
{
"text": "Counting Behavior: Here we look at the activation dynamics of the hidden states of the model when processing specific sequences. Figure 7 demonstrates that an LSTM network organizes its hidden state structure in such a way that certain hidden state units learn how to count up and down upon the subsequent encounter of some characters. In the case of a 100 b 100 c 100 d 100 , we observe, for instance, that certain units get activated at time steps 100, 200, and 300. In fact, some units appear to cooperate together to count.",
"cite_spans": [],
"ref_spans": [
{
"start": 129,
"end": 137,
"text": "Figure 7",
"ref_id": null
}
],
"eq_spans": [],
"section": "Convergence:",
"sec_num": null
},
{
"text": "On the other hand, when we visualized the activation dynamics of a model which was trained to learn the language a n b n using 36 hidden units, we observed on the testing of a 1000 b 1000 that the model still uses some of its hidden units to count up and down for all the a's and b's seen by the model, respectively, although it rejects this sam- Figure 6 : Generalization graph (left) and loss graph (right) with different random weight initializations. Figure 7 : Hidden state dynamics in a four-unit LSTM model. We note that certain units in the LSTM model get activated at time steps 100, 200, and 300.",
"cite_spans": [],
"ref_spans": [
{
"start": 347,
"end": 355,
"text": "Figure 6",
"ref_id": null
},
{
"start": 455,
"end": 463,
"text": "Figure 7",
"ref_id": null
}
],
"eq_spans": [],
"section": "Convergence:",
"sec_num": null
},
{
"text": "ple. It simply outputs (a/b) 1000 b 996 a 4 , instead of (a/b) 1000 b 999 a. Our results corroborate and refine the findings of Gers and Schmidhuber (2001) and Weiss et al. (2018) , who noted the existence of a counting mechanisms for simpler languages, while we also observe a collaborative counting behavior in over-parameterized networks.",
"cite_spans": [
{
"start": 128,
"end": 155,
"text": "Gers and Schmidhuber (2001)",
"ref_id": "BIBREF8"
},
{
"start": 160,
"end": 179,
"text": "Weiss et al. (2018)",
"ref_id": "BIBREF23"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Convergence:",
"sec_num": null
},
{
"text": "In this paper, we have addressed the influence of various length distribution regimes and length-window sizes on the generalizing ability of LSTMs to learn simple context-free and contextsensitive languages, namely a n b n , a n b n c n , and a n b n c n d n . Furthermore, we have discussed the effect of the number of hidden units in LSTM models on the stability of a representation learned by the network: We show that increasing the number of hidden units in an LSTM model improves the stability of the network, but not necessarily the inductive power. Finally, we have exhibited the importance of weight initialization to the convergence of the network: Our results indicate that different hidden weight initializations can yield different convergence values, given that all the other parameters are unchanged. Throughout our analysis, we emphasized the importance of a finegrained evaluation, considering generalization beyond the first error and during training. We therefore concluded that there are an abundant number of parameters that can influence the inductive ability of an LSTM to learn a formal language and that the notion of learning, from a neural network's perspective, should be treated carefully.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "7"
},
{
"text": "Our code is available at https://github.com/ suzgunmirac/lstm-eval.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "For a comprehensive investigation of the LSTM architectures, we invite the reader to refer to the following two papers:(Hochreiter and Schmidhuber, 1997;Greff et al., 2017).3 Unlike Gers and Schmidhuber (2001), we do not start each input sequence with a start symbol, since we observed",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We note that we only present positive samples from a given language to our model, but this approach is still consistent with Gold's Theorem about the inductive interference of formal languages only from positive samples(Angluin, 1980), because we give feedback to our model during training whenever it makes an error about its predictions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "In all our experiments, we decided to choose k to be 5. 7 This phenomenon is usually observed in distributions where the training set is skewed towards having more long sequences than short sequences.8 We note that correctly predicting the outputs for the samples ab, abc, and abcd in the languages a n b n , a n b n c n , and a n b n c n d n , respectively, is a hard task, because the output sequences for these samples are a a, ac a, and acd a, in this given order. While they never contain the symbol b in their outputs, the rest of the sequences in their corresponding languages do contain at least one b in their outputs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We define convergence (learning) speed as the speed at which a sequence of numbers, the e1 or e5 values in our cases, converge to its stationary value.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "The results with other configurations were qualitatively similar.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "The results shown in the plot are for models that were trained on datasets with uniform length distributions with a",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "The first author gratefully acknowledges the support of the Harvard College Research Program (HCRP) and the Harvard Center for Research on Computation and Society Research Fellowship for Undergraduate Students. The second author was supported by the Harvard Mind, Brain, and Behavior Initiative. The authors also thank Sebastian Gehrmann for his helpful comments and discussion at the beginning of the project. The computations in this paper were run on the Odyssey cluster supported by the FAS Division of Science, Research Computing Group at Harvard University.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgment",
"sec_num": "8"
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Inductive inference of formal languages from positive data. Information and control",
"authors": [
{
"first": "",
"middle": [],
"last": "Dana Angluin",
"suffix": ""
}
],
"year": 1980,
"venue": "",
"volume": "45",
"issue": "",
"pages": "117--135",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dana Angluin. 1980. Inductive inference of formal languages from positive data. Information and con- trol, 45(2):117-135.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Neural machine translation by jointly learning to align and translate",
"authors": [
{
"first": "Dzmitry",
"middle": [],
"last": "Bahdanau",
"suffix": ""
},
{
"first": "Kyunghyun",
"middle": [],
"last": "Cho",
"suffix": ""
},
{
"first": "Yoshua",
"middle": [],
"last": "Bengio",
"suffix": ""
}
],
"year": 2014,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1409.0473"
]
},
"num": null,
"urls": [],
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Context-free and context-sensitive dynamics in recurrent neural networks",
"authors": [
{
"first": "Mikael",
"middle": [],
"last": "Bod\u00e9n",
"suffix": ""
},
{
"first": "Janet",
"middle": [],
"last": "Wiles",
"suffix": ""
}
],
"year": 2000,
"venue": "Connection Science",
"volume": "12",
"issue": "3-4",
"pages": "197--210",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mikael Bod\u00e9n and Janet Wiles. 2000. Context-free and context-sensitive dynamics in recurrent neural net- works. Connection Science, 12(3-4):197-210.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "On learning context-free and context-sensitive languages",
"authors": [
{
"first": "Mikael",
"middle": [],
"last": "Bod\u00e9n",
"suffix": ""
},
{
"first": "Janet",
"middle": [],
"last": "Wiles",
"suffix": ""
}
],
"year": 2002,
"venue": "IEEE Transactions on Neural Networks",
"volume": "13",
"issue": "2",
"pages": "491--493",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mikael Bod\u00e9n and Janet Wiles. 2002. On learning context-free and context-sensitive languages. IEEE Transactions on Neural Networks, 13(2):491-493.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "The dynamics of discrete-time computation, with application to recurrent neural networks and finite state machine extraction. Neural computation",
"authors": [
{
"first": "Mike",
"middle": [],
"last": "Casey",
"suffix": ""
}
],
"year": 1996,
"venue": "",
"volume": "8",
"issue": "",
"pages": "1135--1178",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mike Casey. 1996. The dynamics of discrete-time computation, with application to recurrent neural networks and finite state machine extraction. Neu- ral computation, 8(6):1135-1178.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Incremental training of first order recurrent neural networks to predict a context-sensitive language",
"authors": [
{
"first": "K",
"middle": [],
"last": "Stephan",
"suffix": ""
},
{
"first": "Alan",
"middle": [
"D"
],
"last": "Chalup",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Blair",
"suffix": ""
}
],
"year": 2003,
"venue": "Neural Networks",
"volume": "16",
"issue": "7",
"pages": "955--972",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stephan K Chalup and Alan D Blair. 2003. Incremen- tal training of first order recurrent neural networks to predict a context-sensitive language. Neural Net- works, 16(7):955-972.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Learning context-free grammars: Capabilities and limitations of a recurrent neural network with an external stack memory",
"authors": [
{
"first": "Sreerupa",
"middle": [],
"last": "Das",
"suffix": ""
},
{
"first": "Lee",
"middle": [],
"last": "Giles",
"suffix": ""
},
{
"first": "Guo-Zheng",
"middle": [],
"last": "Sun",
"suffix": ""
}
],
"year": 1992,
"venue": "Proceedings of The Fourteenth Annual Conference of Cognitive Science Society",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sreerupa Das, C Lee Giles, and Guo-Zheng Sun. 1992. Learning context-free grammars: Capabilities and limitations of a recurrent neural network with an ex- ternal stack memory. In Proceedings of The Four- teenth Annual Conference of Cognitive Science So- ciety. Indiana University, page 14.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Distributed representations, simple recurrent networks, and grammatical structure",
"authors": [
{
"first": "",
"middle": [],
"last": "Jeffrey L Elman",
"suffix": ""
}
],
"year": 1991,
"venue": "Machine learning",
"volume": "7",
"issue": "2-3",
"pages": "195--225",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jeffrey L Elman. 1991. Distributed representations, simple recurrent networks, and grammatical struc- ture. Machine learning, 7(2-3):195-225.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "LSTM recurrent networks learn simple context-free and contextsensitive languages",
"authors": [
{
"first": "A",
"middle": [],
"last": "Felix",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Gers",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Schmidhuber",
"suffix": ""
}
],
"year": 2001,
"venue": "IEEE Transactions on Neural Networks",
"volume": "12",
"issue": "6",
"pages": "1333--1340",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Felix A Gers and E Schmidhuber. 2001. LSTM recur- rent networks learn simple context-free and context- sensitive languages. IEEE Transactions on Neural Networks, 12(6):1333-1340.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Learning and extracting finite state automata with second-order recurrent neural networks",
"authors": [
{
"first": "C Lee",
"middle": [],
"last": "Giles",
"suffix": ""
},
{
"first": "Clifford",
"middle": [
"B"
],
"last": "Miller",
"suffix": ""
},
{
"first": "Dong",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Hsing-Hen",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Guo-Zheng",
"middle": [],
"last": "Sun",
"suffix": ""
},
{
"first": "Yee-Chun",
"middle": [],
"last": "Lee",
"suffix": ""
}
],
"year": 1992,
"venue": "Neural Computation",
"volume": "4",
"issue": "3",
"pages": "393--405",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "C Lee Giles, Clifford B Miller, Dong Chen, Hsing- Hen Chen, Guo-Zheng Sun, and Yee-Chun Lee. 1992. Learning and extracting finite state automata with second-order recurrent neural networks. Neu- ral Computation, 4(3):393-405.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "LSTM: A search space odyssey",
"authors": [
{
"first": "Klaus",
"middle": [],
"last": "Greff",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Rupesh",
"suffix": ""
},
{
"first": "Jan",
"middle": [],
"last": "Srivastava",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Koutn\u00edk",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Bas",
"suffix": ""
},
{
"first": "J\u00fcrgen",
"middle": [],
"last": "Steunebrink",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Schmidhuber",
"suffix": ""
}
],
"year": 2017,
"venue": "IEEE transactions on neural networks and learning systems",
"volume": "28",
"issue": "",
"pages": "2222--2232",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Klaus Greff, Rupesh K Srivastava, Jan Koutn\u00edk, Bas R Steunebrink, and J\u00fcrgen Schmidhuber. 2017. LSTM: A search space odyssey. IEEE transac- tions on neural networks and learning systems, 28(10):2222-2232.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Long short-term memory",
"authors": [
{
"first": "Sepp",
"middle": [],
"last": "Hochreiter",
"suffix": ""
},
{
"first": "J\u00fcrgen",
"middle": [],
"last": "Schmidhuber",
"suffix": ""
}
],
"year": 1997,
"venue": "Neural computation",
"volume": "9",
"issue": "8",
"pages": "1735--1780",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Designing a counter: Another case study of dynamics and activation landscapes in recurrent networks",
"authors": [
{
"first": "Steffen",
"middle": [],
"last": "H\u00f6lldobler",
"suffix": ""
},
{
"first": "Yvonne",
"middle": [],
"last": "Kalinke",
"suffix": ""
},
{
"first": "Helko",
"middle": [],
"last": "Lehmann",
"suffix": ""
}
],
"year": 1997,
"venue": "Annual Conference on Artificial Intelligence",
"volume": "",
"issue": "",
"pages": "313--324",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Steffen H\u00f6lldobler, Yvonne Kalinke, and Helko Lehmann. 1997. Designing a counter: Another case study of dynamics and activation landscapes in re- current networks. In Annual Conference on Artifi- cial Intelligence, pages 313-324. Springer.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Tailrecursive distributed representations and simple recurrent networks",
"authors": [
{
"first": "C",
"middle": [],
"last": "Stan",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Kwasny",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Barry L Kalman",
"suffix": ""
}
],
"year": 1995,
"venue": "Connection Science",
"volume": "7",
"issue": "1",
"pages": "61--80",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stan C Kwasny and Barry L Kalman. 1995. Tail- recursive distributed representations and simple re- current networks. Connection Science, 7(1):61-80.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "LSTMs Exploit Linguistic Attributes of Data",
"authors": [
{
"first": "Nelson",
"middle": [
"F"
],
"last": "Liu",
"suffix": ""
},
{
"first": "Omer",
"middle": [],
"last": "Levy",
"suffix": ""
},
{
"first": "Roy",
"middle": [],
"last": "Schwartz",
"suffix": ""
},
{
"first": "Chenhao",
"middle": [],
"last": "Tan",
"suffix": ""
},
{
"first": "Noah",
"middle": [
"A"
],
"last": "Smith",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the Third Workshop on Representation Learning for NLP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nelson F. Liu, Omer Levy, Roy Schwartz, Chenhao Tan, and Noah A. Smith. 2018. LSTMs Exploit Linguistic Attributes of Data. In Proceedings of the Third Workshop on Representation Learning for NLP.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Automatic differentiation in PyTorch",
"authors": [
{
"first": "Adam",
"middle": [],
"last": "Paszke",
"suffix": ""
},
{
"first": "Sam",
"middle": [],
"last": "Gross",
"suffix": ""
},
{
"first": "Soumith",
"middle": [],
"last": "Chintala",
"suffix": ""
},
{
"first": "Gregory",
"middle": [],
"last": "Chanan",
"suffix": ""
},
{
"first": "Edward",
"middle": [],
"last": "Yang",
"suffix": ""
},
{
"first": "Zachary",
"middle": [],
"last": "Devito",
"suffix": ""
},
{
"first": "Zeming",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "Alban",
"middle": [],
"last": "Desmaison",
"suffix": ""
},
{
"first": "Luca",
"middle": [],
"last": "Antiga",
"suffix": ""
},
{
"first": "Adam",
"middle": [],
"last": "Lerer",
"suffix": ""
}
],
"year": 2017,
"venue": "NIPS-W",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Adam Paszke, Sam Gross, Soumith Chintala, Gre- gory Chanan, Edward Yang, Zachary DeVito, Zem- ing Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. 2017. Automatic differentiation in PyTorch. In NIPS-W.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Simple recurrent networks learn context-free and context-sensitive languages by counting",
"authors": [
{
"first": "Paul",
"middle": [],
"last": "Rodriguez",
"suffix": ""
}
],
"year": 2001,
"venue": "Neural computation",
"volume": "13",
"issue": "9",
"pages": "2093--2118",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Paul Rodriguez. 2001. Simple recurrent networks learn context-free and context-sensitive languages by counting. Neural computation, 13(9):2093- 2118.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "A Recurrent Neural Network that learns to count",
"authors": [
{
"first": "Paul",
"middle": [],
"last": "Rodriguez",
"suffix": ""
},
{
"first": "Janet",
"middle": [],
"last": "Wiles",
"suffix": ""
},
{
"first": "Jeffrey",
"middle": [
"L"
],
"last": "Elman",
"suffix": ""
}
],
"year": 1999,
"venue": "Connection Science",
"volume": "11",
"issue": "1",
"pages": "5--40",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Paul Rodriguez, Janet Wiles, and Jeffrey L. Elman. 1999. A Recurrent Neural Network that learns to count. Connection Science, 11(1):5-40.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Long short-term memory recurrent neural network architectures for large scale acoustic modeling",
"authors": [
{
"first": "Hasim",
"middle": [],
"last": "Sak",
"suffix": ""
},
{
"first": "Andrew",
"middle": [
"W"
],
"last": "Senior",
"suffix": ""
},
{
"first": "Fran\u00e7oise",
"middle": [],
"last": "Beaufays",
"suffix": ""
}
],
"year": 2014,
"venue": "15th Annual Conference of the International Speech Communication Association (Interspeech)",
"volume": "",
"issue": "",
"pages": "338--342",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hasim Sak, Andrew W. Senior, and Fran\u00e7oise Bea- ufays. 2014. Long short-term memory recurrent neural network architectures for large scale acoustic modeling. In 15th Annual Conference of the Inter- national Speech Communication Association (Inter- speech), pages 338-342.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Computation beyond the Turing limit",
"authors": [
{
"first": "T",
"middle": [],
"last": "Hava",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Siegelmann",
"suffix": ""
}
],
"year": 1995,
"venue": "Science",
"volume": "268",
"issue": "5210",
"pages": "545--548",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hava T Siegelmann. 1995. Computation beyond the Turing limit. Science, 268(5210):545-548.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "On the computational power of neural nets",
"authors": [
{
"first": "T",
"middle": [],
"last": "Hava",
"suffix": ""
},
{
"first": "Eduardo",
"middle": [
"D"
],
"last": "Siegelmann",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Sontag",
"suffix": ""
}
],
"year": 1992,
"venue": "Proceedings of the fifth annual workshop on Computational learning theory",
"volume": "",
"issue": "",
"pages": "440--449",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hava T Siegelmann and Eduardo D Sontag. 1992. On the computational power of neural nets. In Proceed- ings of the fifth annual workshop on Computational learning theory, pages 440-449. ACM.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "A recurrent network that performs a context-sensitive prediction task",
"authors": [
{
"first": "Mark",
"middle": [],
"last": "Steijvers",
"suffix": ""
},
{
"first": "Peter",
"middle": [],
"last": "Gr\u00fcnwald",
"suffix": ""
}
],
"year": 1996,
"venue": "Proceedings of the 18th annual conference of the cognitive science society",
"volume": "",
"issue": "",
"pages": "335--339",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mark Steijvers and Peter Gr\u00fcnwald. 1996. A recurrent network that performs a context-sensitive prediction task. In Proceedings of the 18th annual conference of the cognitive science society, pages 335-339.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Sequence to Sequence Learning with Neural Networks",
"authors": [
{
"first": "Ilya",
"middle": [],
"last": "Sutskever",
"suffix": ""
},
{
"first": "Oriol",
"middle": [],
"last": "Vinyals",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Quoc",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Le",
"suffix": ""
}
],
"year": 2014,
"venue": "Advances in neural information processing systems",
"volume": "",
"issue": "",
"pages": "3104--3112",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to Sequence Learning with Neural Net- works. In Advances in neural information process- ing systems, pages 3104-3112.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "On the practical computational power of finite precision rnns for language recognition",
"authors": [
{
"first": "Gail",
"middle": [],
"last": "Weiss",
"suffix": ""
},
{
"first": "Yoav",
"middle": [],
"last": "Goldberg",
"suffix": ""
},
{
"first": "Eran",
"middle": [],
"last": "Yahav",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics",
"volume": "2",
"issue": "",
"pages": "740--745",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gail Weiss, Yoav Goldberg, and Eran Yahav. 2018. On the practical computational power of finite precision rnns for language recognition. In Proceedings of the 56th Annual Meeting of the Association for Compu- tational Linguistics (Volume 2: Short Papers), pages 740-745. Association for Computational Linguis- tics.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Learning to count without a counter: A case study of dynamics and activation landscapes in recurrent networks",
"authors": [
{
"first": "Janet",
"middle": [],
"last": "Wiles",
"suffix": ""
},
{
"first": "Jeff",
"middle": [],
"last": "Elman",
"suffix": ""
}
],
"year": 1995,
"venue": "Proceedings of the seventeenth annual conference of the cognitive science society",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Janet Wiles and Jeff Elman. 1995. Learning to count without a counter: A case study of dynamics and activation landscapes in recurrent networks. In Pro- ceedings of the seventeenth annual conference of the cognitive science society, s 482, page 487. Erlbaum Hillsdale, NJ.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"uris": null,
"type_str": "figure",
"text": ", on the other hand, proposed a variant of Long Short-Term Sample a 2 b 2 a 2 b 2 c 2 a 2 b 2 c 2 d 2 Output a/b a/b b a a/b a/b b c c a a/b a/b b c c d d a",
"num": null
},
"FIGREF1": {
"uris": null,
"type_str": "figure",
"text": "Our LSTM architecture",
"num": null
},
"FIGREF2": {
"uris": null,
"type_str": "figure",
"text": "Distributions from Left to Right: Uniform Distribution with N = 50, (U-Shaped) Beta-Binomial Distribution with \u21b5 = 0.25, = 0.25, N = 49, (Right-Tailed) Beta-Binomial Distribution with \u21b5 = 1, = 5, N = 49, and (Left-Tailed) Beta-Binomial Distribution with \u21b5 = 5, = 1, N = 49.",
"num": null
},
"FIGREF3": {
"uris": null,
"type_str": "figure",
"text": "Generalization graphs showing the average performance of LSTMs trained under different probability distribution regimes for each language. The top plots show the e 1 values, whereas the bottom ones the e 5 values. The light blue horizontal lines indicate the training length window [1, 50].",
"num": null
},
"FIGREF4": {
"uris": null,
"type_str": "figure",
"text": "Generalization graphs showing the average performance of LSTMs trained under different training length-windows for each language. The top plots show the e 1 values, whereas the bottom ones the e 5 values.",
"num": null
},
"FIGREF5": {
"uris": null,
"type_str": "figure",
"text": "Generalization graphs showing the average performance of LSTM models with a different number of hidden units for each language. The top plots show the e 1 values, whereas the bottom ones the e 5 values. The light blue horizontal lines indicate the training length window [1, 50].",
"num": null
},
"TABREF0": {
"type_str": "table",
"content": "<table/>",
"html": null,
"num": null,
"text": "Example input-output pairs for each language under the sequence prediction formulation."
}
}
}
}