| { |
| "paper_id": "K19-1007", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:06:38.851460Z" |
| }, |
| "title": "Using Priming to Uncover the Organization of Syntactic Representations in Neural Language Models", |
| "authors": [ |
| { |
| "first": "Grusha", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": {} |
| }, |
| "email": "grusha.prasad@jhu.edu" |
| }, |
| { |
| "first": "Marten", |
| "middle": [], |
| "last": "Van Schijndel", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Cornell University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": {} |
| }, |
| "email": "tal.linzen@jhu.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Neural language models (LMs) perform well on tasks that require sensitivity to syntactic structure. Drawing on the syntactic priming paradigm from psycholinguistics, we propose a novel technique to analyze the representations that enable such success. By establishing a gradient similarity metric between structures, this technique allows us to reconstruct the organization of the LMs' syntactic representational space. We use this technique to demonstrate that LSTM LMs' representations of different types of sentences with relative clauses are organized hierarchically in a linguistically interpretable manner, suggesting that the LMs track abstract properties of the sentence.", |
| "pdf_parse": { |
| "paper_id": "K19-1007", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Neural language models (LMs) perform well on tasks that require sensitivity to syntactic structure. Drawing on the syntactic priming paradigm from psycholinguistics, we propose a novel technique to analyze the representations that enable such success. By establishing a gradient similarity metric between structures, this technique allows us to reconstruct the organization of the LMs' syntactic representational space. We use this technique to demonstrate that LSTM LMs' representations of different types of sentences with relative clauses are organized hierarchically in a linguistically interpretable manner, suggesting that the LMs track abstract properties of the sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Neural networks trained on text alone, without explicit syntactic supervision, have been surprisingly successful in tasks that require sensitivity to sentence structure. The difficulty of interpreting the learned neural representations that underlie this success has motivated a range of analysis techniques, including diagnostic classifiers (Giulianelli et al., 2018; Conneau et al., 2018; Shi et al., 2016) , visualization of individual neuron activations (K\u00e1d\u00e1r et al., 2017; Qian et al., 2016) , ablation of individual neurons or sets of neurons (Lakretz et al., 2019) and behavioral tests of generalization to infrequent or held out syntactic structures (Linzen et al., 2016; Weber et al., 2018; Mc-Coy et al., 2018) ; for reviews, see Belinkov and Glass (2019) and Alishahi et al. (2019) .", |
| "cite_spans": [ |
| { |
| "start": 342, |
| "end": 368, |
| "text": "(Giulianelli et al., 2018;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 369, |
| "end": 390, |
| "text": "Conneau et al., 2018;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 391, |
| "end": 408, |
| "text": "Shi et al., 2016)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 458, |
| "end": 478, |
| "text": "(K\u00e1d\u00e1r et al., 2017;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 479, |
| "end": 497, |
| "text": "Qian et al., 2016)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 550, |
| "end": 572, |
| "text": "(Lakretz et al., 2019)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 659, |
| "end": 680, |
| "text": "(Linzen et al., 2016;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 681, |
| "end": 700, |
| "text": "Weber et al., 2018;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 701, |
| "end": 721, |
| "text": "Mc-Coy et al., 2018)", |
| "ref_id": null |
| }, |
| { |
| "start": 741, |
| "end": 766, |
| "text": "Belinkov and Glass (2019)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 771, |
| "end": 793, |
| "text": "Alishahi et al. (2019)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper expands the toolkit of neural network analysis techniques by drawing on the syntactic priming paradigm, a central tool in psycholinguistics for analyzing human syntactic representations (Bock, 1986) . This paradigm is based on the empirical finding that people tend to reuse syntactic structures that they have recently produced or encountered. For example, English provides two roughly equivalent ways to express a transfer event:", |
| "cite_spans": [ |
| { |
| "start": 197, |
| "end": 209, |
| "text": "(Bock, 1986)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(1) a. The boy threw the ball to the dog.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "b. The boy threw the dog the ball.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "When readers encounter one of these variants in the text more frequently than the other, they expect that future transfer events will more likely be expressed using the frequent construction than the infrequent one. For example, after reading sentences like (1a) (the prime), readers expect sentences like (2a), which shares syntactic structure with the prime, to occur with a greater likelihood than the alternative variant like (2b) which does not (Wells et al., 2009) . 1 (2) a. The lawyer sent the letter to the client. b. The lawyer sent the client the letter.", |
| "cite_spans": [ |
| { |
| "start": 450, |
| "end": 470, |
| "text": "(Wells et al., 2009)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 473, |
| "end": 474, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We use the priming paradigm to analyze neural network language models (LMs), systems that define a probability distribution over the n th word of a sentence given its first n \u2212 1 words. Building on paradigms that determine whether the LM's expectations are consistent with the syntactic structure of the sentence (Linzen et al., 2016) , we measure the extent to which a LM's expectation for a specific syntactic structure is affected by recent experience with related structures. We prime a fully trained model with a structure by adapting it to a small number of sentences containing that structure (van Schijndel and Linzen, 2018) . We then measure the change in surprisal (negative log probability) after adaptation when the LM is tested either on sentences with the same struc-ture or sentences with different but related structures. The degree to which one structure primes another provides a graded similarity metric between the model's representations of those structures (cf. Branigan and Pickering 2017), which allows us to investigate how the representations of sentences with these structures are organized.", |
| "cite_spans": [ |
| { |
| "start": 313, |
| "end": 334, |
| "text": "(Linzen et al., 2016)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 600, |
| "end": 632, |
| "text": "(van Schijndel and Linzen, 2018)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As a case study, we applied this technique to investigate how recurrent neural network (RNN) LMs represent sentences with relative clauses (RCs). We found that the representations of these sentences are organized in a linguistically interpretable manner: sentences with a particular type of RC were most similar to other sentences with the same type of RC in the LMs' representation space. Furthermore, sentences with different types of RCs were more similar to each other than sentences without RCs. We demonstrate that the similarity between sentences was not driven merely by specific words that appeared in the sentence, suggesting that the LMs tracked abstract properties of the sentence. This ability to track abstract properties decreased as the training corpus size increased. Finally, we tested the hypothesis that LMs' accuracy on agreement prediction (Marvin and Linzen, 2018) would increase with the LMs' ability to track more abstract properties of the sentence, but did not find evidence for this hypothesis.", |
| "cite_spans": [ |
| { |
| "start": 862, |
| "end": 887, |
| "text": "(Marvin and Linzen, 2018)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We build on paradigms that use LM probability estimates for words in a given context as a measure of the model's sensitivity to the syntactic structure of the sentence (Linzen et al., 2016; Gulordava et al., 2018; Marvin and Linzen, 2018) . If a language model assigns a higher probability to a verb form that agrees in number with the subject (the boy... writes) than a verb form that does not (the boy... write), we can infer that the model encodes information about the agreement features of nouns and verbs (that is, the difference between singular and plural) and has correctly identified the subject that corresponds to this verb. This reasoning has been extended beyond subject-verb agreement to study whether the predictions of neural LMs are sensitive to a range of other syntactic dependencies, including negative polarity items (Jumelet and Hupkes, 2018) , filler-gap dependencies (Wilcox et al., 2018) and reflexive pronoun binding (Futrell et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 168, |
| "end": 189, |
| "text": "(Linzen et al., 2016;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 190, |
| "end": 213, |
| "text": "Gulordava et al., 2018;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 214, |
| "end": 238, |
| "text": "Marvin and Linzen, 2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 839, |
| "end": 865, |
| "text": "(Jumelet and Hupkes, 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 892, |
| "end": 913, |
| "text": "(Wilcox et al., 2018)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 944, |
| "end": 966, |
| "text": "(Futrell et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic predictions in neural LMs", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Syntactic priming has been used to study whether the representations of two sentences have shared structure. For example, (1a) (repeated below as (3)) shares the structure VP \u2192 V NP PP with (4a) but not (4b).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic priming in humans", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "(3) The boy threw the ball to the dog.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic priming in humans", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "(4) a. The renowned chef made some wonderful pasta for the guest. b. The renowned chef made the guest some wonderful pasta.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic priming in humans", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "If (3) primes (4a) more than it primes (4b), we can infer that the representations of (3) are more similar to that of (4a) than to that of (4b). Since (4b) and (4a) differ only in their structure, this difference in similarity must be driven by structural information in the representations of the sentences (for reviews, see Mahowald et al. 2016 and Tooley and Traxler 2010) . Although priming studies have traditionally measured the priming effect on the sentence immediately following the prime, more recent studies have demonstrated that the effects of syntactic priming can be cumulative and long-lasting: sentences with a shared structure S X become progressively easier to process when preceded by n sentences with the same structure S X than when preceded by n sentences with a different structure S Y (Kaschak et al., 2011; Wells et al., 2009 ). 2 In conjunction with the finding that words that are consistent with a probable syntactic parse are easier to process than words consistent with less probable parses (Hale, 2001; Levy, 2008) , the increased ease of processing in cumulative priming studies can be interpreted as evidence that, with increased exposure to a structure, participants begin to expect that structure with a greater probability (Chang et al., 2006) .", |
| "cite_spans": [ |
| { |
| "start": 326, |
| "end": 350, |
| "text": "Mahowald et al. 2016 and", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 351, |
| "end": 361, |
| "text": "Tooley and", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 362, |
| "end": 375, |
| "text": "Traxler 2010)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 810, |
| "end": 832, |
| "text": "(Kaschak et al., 2011;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 833, |
| "end": 851, |
| "text": "Wells et al., 2009", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 1022, |
| "end": 1034, |
| "text": "(Hale, 2001;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1035, |
| "end": 1046, |
| "text": "Levy, 2008)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1260, |
| "end": 1280, |
| "text": "(Chang et al., 2006)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic priming in humans", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Cumulative priming allows us to study how sentences are related to each other in the human (or LM) representation space in the same way that non-cumulative priming does: when participants (or LMs) are exposed to sentences with structure S X , if there is a greater decrease in surprisal when they are tested on other sentences with S X than when they are tested on other sentences with S Y , we can infer that the representations of sentences with S X are more similar to each other than to the", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic priming in humans", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Unreduced Object RC The conspiracy that the employee welcomed divided the beautiful country.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract structure Example", |
| "sec_num": null |
| }, |
| { |
| "text": "The conspiracy the employee welcomed divided the beautiful country.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reduced Object RC", |
| "sec_num": null |
| }, |
| { |
| "text": "The conspiracy that was welcomed by the employee divided the beautiful country.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unreduced Passive RC", |
| "sec_num": null |
| }, |
| { |
| "text": "The conspiracy welcomed by the employee divided the beautiful country.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reduced Passive RC", |
| "sec_num": null |
| }, |
| { |
| "text": "The employee that welcomed the conspiracy quickly searched the buildings. PS/ORC-matched Coordination The conspiracy welcomed the employee and divided the beautiful country.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Active Subject RC", |
| "sec_num": null |
| }, |
| { |
| "text": "The employee welcomed the conspiracy and quickly searched the buildings. Chowdhury and Zamparelli 2019). They demonstrated that when an RNN LM was adapted to a small number of sentences with a shared syntactic structure, the surprisal for novel sentences with that structure decreased, enabling them to infer that the LM's representations of sentences contained information about that structure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ASRC-matched Coordination", |
| "sec_num": null |
| }, |
| { |
| "text": "Following the assumptions in Section 2.2, we define a similarity metric between two structures S X and S Y in an LM's representation space by adapting the LM to sentences with S X and measuring the change in surprisal for sentences with S Yi.e. measuring to what extent sentences with S X prime sentences with S Y . We use the notation A(Y | X) to refer to this change in surprisal 3 , where X and Y are non-lexically-overlapping sets of sentences whose members share the structures S X and S Y respectively. If we assume that S X and S Y are similar to each other in the LM's representation space, then A(Y | X) > 0 -i.e., encountering sentences with S X causes the LM to assign a higher probability to sentences with S Y . On the other hand, if we assume that S X and S Y are unrelated to each other, then A(Y | X) = 0 -i.e., encountering sentences with S X does not cause the LM to change its probability for sentences with 3 A is shorthand for adaptation.", |
| "cite_spans": [ |
| { |
| "start": 927, |
| "end": 928, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity between syntactic structures in RNN LM representational space", |
| "sec_num": "3" |
| }, |
| { |
| "text": "S Y .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity between syntactic structures in RNN LM representational space", |
| "sec_num": "3" |
| }, |
| { |
| "text": "4 Experimental setup", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity between syntactic structures in RNN LM representational space", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We analyzed five types of RCs. In an active subject RC, the gap is in the subject position of the embedded clause: 4", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic structures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "(5) My cousin that liked the book ...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic structures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In a passive subject RC (passive RCs), the gap is in the subject position of the embedded clause, and the embedded verb is passive. In English, passive RCs can be unreduced (6a) or reduced 6b : A schematic for calculating the similarity between two structures S X and S Y in an LM's representation space. X 1 , X 2 and Y 1 , Y 2 are non-lexicallyoverlapping sets of sentences with S X and S Y respectively. Model X and Model Y refer to versions of a fully trained model that have been adapted to either X 1 or Y 1 respectively. Surp X () and Surp Y () are functions that return the surprisal of sentences for Model X and Model Y .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic structures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "These conditions enable us to measure whether sentences with different types of RCs are more similar to each other in an LM's representation space than they are to lexically matched sentences without RCs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic structures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We generated sentences from seven templates, one for each of the syntactic structures of interest. The slots were filled with 223 verbs, 164 nouns, 24 adverbs and 78 adjectives such that the semantic plausibility of the combination of nouns, verbs, adverbs and adjectives was ensured. The seven variants of every sentence had nearly identical lexical items (see Table 1 ). 6 We used these templates to generate five experimental lists -each list comprised of a pair of adaptation and test sets with minimal lexical overlap between them (only function words and some modifiers were shared). Each adaptation set contained 20 sentences and each test set contained 50. In order to infer that any decrease in surprisal is caused by adaptation to an abstract syntactic structure, we need to ensure that the models are not adapting to properties of the sentence that are unrelated to the abstract structure of interest. Con- 6 Since the main verb of the sentence was constrained to be semantically plausible with the subject of the sentence, it often varied between active subject RC and ASRC-matched coordination on the one had and all other conditions on the other. sider a LM adapted to (10) and tested on (11):", |
| "cite_spans": [ |
| { |
| "start": 373, |
| "end": 374, |
| "text": "6", |
| "ref_id": null |
| }, |
| { |
| "start": 918, |
| "end": 919, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 362, |
| "end": 369, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adaptation and test sets", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "(10) The conspiracy that the employee welcomed divided the country.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptation and test sets", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "(11) The proposal that the receptionist managed shocked the CEO.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptation and test sets", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "When the LM is adapted to sentences such as (10), it could adjust its expectations about several properties of the sentence, some more linguistically interesting than others. For instance, it could learn that there are three determiners in the sentence, that the third word of the sentence is that, that sentences have nine words, that every verb is preceded by a noun, and so on and so forth. If there is a decrease in surprisal when a model is adapted to (10) and tested on (11), it is unclear if this is because the model learned to expect object relative clauses or if it learned to expect any of the other mentioned properties.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptation and test sets", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To minimize the likelihood that the adaptation effects are driven by irrelevant properties of the sentence, we introduced several sources of variability to our templates: nouns could either be singular or plural, noun phrases could be optionally modified by an adjective, adjectives were optionally modified with an intensifier and verb phrases were optionally modified with adverbs which could occur either pre-verbally or postverbally (details in the Supplementary Materials). 7", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptation and test sets", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We used 75 of the LSTM language models trained by van Schijndel et al. (2019); these LMs varied in the number of hidden units per layer (100, 200, 400, 800, 1600) and the number of tokens they were trained on (2 million, 10 million or 20 million). For each training corpus size, van Schijndel and Linzen trained models on five disjoint subsets of the WikiText-103 corpus, to ensure that the results generalized across different training sets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "For every structure, we computed the similarity between that structure and every other structure (including itself) as described in Section 3. This process is schematized in Figure 1 . The surprisal values were averaged across the entire sentence. Figure 2 : The adaptation effect averaged across all 75 models when (a) they were adapted to each of the structures and tested on either the same structure (blue, bottom) or different structure (pink, top) and (b) they were adapted to RCs and tested on non-RCs or vice versa (pink bars); or when they were adapted to RCs or non-RCs and tested on other RCs or and non-RCs respectively (blue bars). Greater values indicate more similarity between adaptation and test structures. Error bars reflect 95% CIs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 174, |
| "end": 182, |
| "text": "Figure 1", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 248, |
| "end": 256, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Calculating the adaptation effect (AE)", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We found that A(B | A) was proportional to the surprisal of B prior to adaptation (see Supplementary Materials). As a consequence, for three structures X, Y and Z, A(Y | X) could be greater than A(Z | X) merely because Y was a more surprising structure to begin with than Z. In order to remove this confound, we first fit a linear regression model predicting A(Y | X) from the surprisal of Y prior to adaptation (Surp(Y )):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Calculating the adaptation effect (AE)", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "A(Y | X) = \u03b2 0 + \u03b2 1 Surp(Y ) +", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Calculating the adaptation effect (AE)", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We then regressed out the linear relationship between A(Y | X) and Surp(Y ) as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Calculating the adaptation effect (AE)", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "AE(Y | X) = A(Y | X) \u2212 \u03b2 1 Surp(Y ) = \u03b2 0 + Since Surp(Y ) was centered around its mean, \u03b2 0 reflects the mean of A(Y | X) when Surp(Y )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Calculating the adaptation effect (AE)", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "is equal to the mean surprisal of all sentences prior to adaptation. The term reflects any variance in A(Y | X) that is not predicted by Surp(Y ). By summing these two terms together, AE(Y | X) reflects the change in surprisal for Y after adapting to X that is independent of Surp(Y ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Calculating the adaptation effect (AE)", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We used linear mixed effects models (Pinheiro et al., 2000) to test for statistical significance; all of the results reported below were highly significant. Details about the statistical analyses can be found in the Supplementary Materials.", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 59, |
| "text": "(Pinheiro et al., 2000)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Statistical analyses", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "As discussed in Section 2.3, under the adaptationas-priming paradigm, we would expect sentences that share the same specific structure to be more similar to each other than lexically matched sentences that do not share the structure. 9 In other words, if X 1 and X 2 are non-lexically-overlapping sets of sentences with shared structure S X , and Y 2 is a set of sentences with structure S Y , but is lexically matched with X 2 , then we would expect AE(X 2 | X 1 ) > AE(Y 2 | X 1 ). We found this prediction to be true for all of our seven structures (Figure 2a ), thus validating our similarity metric.", |
| "cite_spans": [ |
| { |
| "start": 234, |
| "end": 235, |
| "text": "9", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 552, |
| "end": 562, |
| "text": "(Figure 2a", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Validating AE as a similarity metric", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Our two coordination conditions were structurally identical to each other but varied in their semantic plausibility -the sentences in PS/ORC-matched coordination condition were often semantically implausible whereas sentences in ASRC-matched condition were always semantically plausible (see footnote 5). If sentences that were structurally similar were close together irrespective of semantic plausibility, then we expect sentences with coordination to be more similar to each other than lexically matched sentences with RCs. Consistent with this prediction, the adaptation effect for models adapted to one type of coordination was greater when the models were tested on sentences with the other type of coordination than when they were tested on sentences with RCs (top panel of Figure 2b) . ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 781, |
| "end": 791, |
| "text": "Figure 2b)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Similarity between sentences with different types of VP coordination", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Unlike sentences with coordination, sentences with different types of RCs differ from each other at a surface level (see Table 1 ). However, at a more abstract level they all share a common property: a gap. If the RNN LMs were keeping track of whether or not a sentence contained a gap, we would expect sentences with different types of RCs to be more similar to each other in the RNN LMs' representation space than lexically matched sentences without a gap. In other words, if RC X and RC Y are two different types of RCs and Coord Y is a sentence with verb coordination lexically matched with RC Y , then we would ex-", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 121, |
| "end": 128, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Similarity between sentences with different types of RCs", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "pect AE(RC Y | RC X ) > AE(Coord Y | RC X ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity between sentences with different types of RCs", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Consistent with this prediction, the adaptation effect for models adapted to RCs was greater when they were tested on sentences with other types of RCs than when they were tested on sentences with coordination (bottom panel of Figure 2b ). This suggests that the LMs do keep track of whether or not a sentence contains a gap, even though this property is not overtly indicated by a lexical item that is shared across all types of RCs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 227, |
| "end": 236, |
| "text": "Figure 2b", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Similarity between sentences with different types of RCs", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The different types of RCs we tested can be divided into sub-classes based on at least two linguistically interpretable features: reduction and passivity. Reduction distinguishes reduced passive and object RCs on the one hand from unreduced passive and object RCs on the other. Passivity dis-tinguishes reduced and unreduced passive RCs on the one hand from reduced and unreduced object RCs on the other. The LMs could be tracking either, both or none of these features. We probed whether the LMs track these features by comparing the similarity between sentences that share one feature but not the other, with the similarity between sentences that share neither feature. If the adaptation effect is greater when there is a match in one feature than when there is a match in neither of the features, we can infer that the LMs track whether sentences have that feature. We found that the LMs track both of these features (Figure 3) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 920, |
| "end": 930, |
| "text": "(Figure 3)", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Similarity between sentences belonging to different sub-classes of RCs", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Additionally, we probed which of the features contributes more towards the similarity between sentences by comparing the similarity between sentences that match only in passivity with sentences that match only in reduction. When the adaptation and test sets matched only in passivity, the adaptation effect was slightly (but significantly) greater than when the adaptation and test sets matched only in reduction (Figure 3) . In other words, in the LMs' representation space, (12) is more similar to (13) than it is to (14), suggesting that passivity contributes more towards the similarity between sentences than reduction.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 413, |
| "end": 423, |
| "text": "(Figure 3)", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Similarity between sentences belonging to different sub-classes of RCs", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "(12) The conspiracy the employee welcomed divided the country.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity between sentences belonging to different sub-classes of RCs", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "(13) The conspiracy that the employee welcomed divided the country. 14The conspiracy welcomed by the employee divided the country.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity between sentences belonging to different sub-classes of RCs", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "This result is both intuitive and linguistically interpretable -the edit distance between reduced and unreduced RCs is smaller than the that between object and passive RCs; the syntax tree for (12) is also more similar to (13) than it is to (14).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity between sentences belonging to different sub-classes of RCs", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Our analyses so far have demonstrated that sentences that belong to linguistically interpretable classes (e.g., sentences that match in reduction) are more similar to each other in the LMs' representation space than they are to sentences that do not belong to those classes (e.g., sentences that do not match in reduction). However, it is unclear what properties of the sentences are driving this similarity between members of the class. For al-most all of the linguistically interpretable classes we considered, all sentences belonging to a class shared at least some, if not all, function words. The only exception was the class of all RCs, where the property shared by all sentences in this class (the presence of a gap) was not overtly observable. Therefore, it is possible that the similarity between members of most of the classes we tested was being driven entirely by the presence of these function words. In order to test whether the similarity between members of classes was indeed being driven by the presence of shared function words, we compared the representation space of the models we tested in the previous sections (henceforth trained models) with the representation space of models trained on no data (henceforth baseline models). Since the baseline models were only ever exposed to the 20 sentences in the adaptation set and there was no lexical overlap in content words between adaptation and test sets, any similarity between sentences in the representation space of these models would be driven by the presence of function words. If the similarity between sentences in the representation space of the trained models was being driven by factors other than the presence of function words, we would expect this similarity to be greater than the similarity between these sentences in the representation space of the baseline models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "What properties of sentences drive the similarity between them?", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "We cannot directly use adaptation effect to compare the similarity between sentences in the representation spaces of trained models and baseline models, however: models trained on more data are likely to have stronger priors and are therefore less likely to drastically change their representations after 20 sentences than models trained on less data. In order to mitigate this issue, we defined a distance measure between sentences that belong to a class and sentences that do not belong to a class S X as follows (see Figure 4 for a schematic):", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 520, |
| "end": 528, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "What properties of sentences drive the similarity between them?", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "D(S X , \u00acS X ) = AE(X 2 | X 1 ) AE(\u00acX 2 | X 1 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "What properties of sentences drive the similarity between them?", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "This value would be greater than one if sentences that belonged to a class were more similar to each other than they were to sentences that did not belong to the class. Since the strength of prior belief would affect sentences that belong to the class the same way it would affect sentences that do not belong to the class, the effect would cancel out.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "What properties of sentences drive the similarity between them?", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "We measured the distance between members and non-members for three linguistically inter- Figure 4 : A schematic of how D(RC, \u00acRC) is calculated. For any given row, the black square indicates the specific structure the models were adapted to, the blue squares indicate other structures that belong to the same linguistically defined class as the black square and the pink squares indicate the structures that do not belong to this linguistically defined class. In calculating the distance, we first calculated the proportion between the mean adaptation effect for the blue squares and the mean adaptation effect for pink squares for each row. We then averaged across the proportion for each row to arrive at one number. pretable classes: sentences which contained the same type of RC, sentences that matched in their reduction or sentences that contained any type of RC. In our baseline models, for all three classes, sentences that belonged to one of these classes were more similar to each other than sentences that did not belong to that class (Figure 5a ). This was surprising for the class of sentences that contained any type of RC because there was no function word that was shared by all sentences in this class. We hypothesize that this is because sentences without RCs always contained the word and, whereas sentences with RCs never did.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 89, |
| "end": 97, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 1046, |
| "end": 1056, |
| "text": "(Figure 5a", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "What properties of sentences drive the similarity between them?", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "In cases where members of the class shared at least some function words, the distance between sentences that belonged to the class and sentences that did not for the trained models was greater than that for the baseline models. This suggests that the similarity between sentences in the representation space of trained models was being driven by factors other than the mere presence of function words. However, somewhat surprisingly, as the number of training tokens increased, the distance between members and non-members decreased.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "What properties of sentences drive the similarity between them?", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "In the case where the members of the class did not share any function words, the distance between sentences that belonged to the class and sentences that did not belong to the class did not differ be- Reduced Object RC Unreduced Object RC 0.9 1.0 1.1 1.2 1.3 1.4 0.9 1.0 1.1 1. Figure 5 : (a) Effect of hidden layer size and corpus size on the distance between sentences with specific RCs and sentences without (left), between sentences that match in reduction and sentences that do not (middle) and between sentences with RCs and sentences without (right). The solid black line indicates the point at which sentences that belong to a particular class are equally similar to other sentences that belong to that class and sentences that do not. tween the trained models and the baseline models. This suggests that any similarity between sentences in the representation space of trained models was driven purely by the presence (or in this case absence) of lexical items.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 278, |
| "end": 286, |
| "text": "Figure 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "What properties of sentences drive the similarity between them?", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "Marvin and Linzen (2018) created a dataset that evaluated the grammaticality of the predictions of language models. Using this dataset, they showed that LSTM LMs could not accurately predict the number of the main verb if the main clause subject was modified by an object RCs (either reduced or unreduced). However, the models had better performance if the main clause was modified by an active subject RC. For example, the models were at near chance levels in predicting that (15a) should have higher probability than (15b), but were slightly better at predicting that (16a) should have higher probability than (16b):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Does D(RC, \u00acRC) predict agreement prediction accuracy?", |
| "sec_num": "5.6" |
| }, |
| { |
| "text": "(15) a. The farmer that the parents love swims. b. *The farmer that the parents love swim.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Does D(RC, \u00acRC) predict agreement prediction accuracy?", |
| "sec_num": "5.6" |
| }, |
| { |
| "text": "(16) a. The farmer that loves the parents swims. b. *The farmer that loves the parents swim.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Does D(RC, \u00acRC) predict agreement prediction accuracy?", |
| "sec_num": "5.6" |
| }, |
| { |
| "text": "One possible explanation for this poor performance is that object RCs, either reduced or unreduced, are quite infrequent (Roland et al., 2007) . If the LM treats object RCs as unrelated to other RCs, there are likely very few training examples from which the models can learn about subjectverb agreement when the subject is modified by an object RC. If the LM had instead treated ob-ject RCs as belonging to the same class as other RCs, it could learn to generalize from training examples of subject-verb agreement when the subject is modified by other RCs. This suggests the hypothesis that agreement prediction accuracy on object RCs will be higher in LMs in which the representation of object RCs is more similar to the representation of other RCs.", |
| "cite_spans": [ |
| { |
| "start": 121, |
| "end": 142, |
| "text": "(Roland et al., 2007)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Does D(RC, \u00acRC) predict agreement prediction accuracy?", |
| "sec_num": "5.6" |
| }, |
| { |
| "text": "The similarity between object RCs and other RCs was defined as in the previous section (the proportion of blue squares to pink squares of the top two rows in Figure 4 ). There was an increase in accuracy as the number of hidden units increased (see Figure 5b) . However, the similarity between object RCs and other types of RCs did not significantly correlate with agreement prediction; we therefore did not find any evidence for the hypothesis mentioned above. 10", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 158, |
| "end": 166, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 249, |
| "end": 259, |
| "text": "Figure 5b)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Does D(RC, \u00acRC) predict agreement prediction accuracy?", |
| "sec_num": "5.6" |
| }, |
| { |
| "text": "Drawing on the syntactic priming paradigm from psycholinguistics, we proposed a new technique to analyze how the representations of sentences in neural language models (LMs) are organized. Applying this paradigm to sentences with relative clauses (RCs), we found that the representations of these sentences were organized in a linguistically interpretable hierarchical manner (summarized in Figure 6 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 391, |
| "end": 399, |
| "text": "Figure 6", |
| "ref_id": "FIGREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We investigated whether this hierarchical organization was driven by function words that are shared among sentences sentences or whether there was evidence that LMs were tracking more abstract properties of the sentence. We found that for at least some linguistically interpretable classes, sentences that belonged to these classes were more similar to each other in the representation space of the LMs we tested than in the representation space of baseline LMs that were not trained on any data. This suggests that the trained LMs were capable of tracking abstract properties of the sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "However, for linguistically interpretable classes in which sentences shared a non-lexically observable property (e.g. presence of a gap), sentences were as similar to each other in the representation space of the LMs we tested as in the representation space of baseline LMs. Taken together, these results suggest that LMs might be able to track abstract properties of classes of sentences only if these classes also share a lexically observable property.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Additionally, we found that the sentences belonging to linguistically interpretable classes were more similar to each other in the representation spaces of models trained on 2 million tokens than in the representation spaces for models trained on 20 million tokens. We infer from this that LMs' ability to track abstract properties of sentences decreases with an increase in the training corpus size. This suggests that if we want these LMs to track more abstract linguistic properties, training them on more data from the same distribution is unlikely to help (cf. van Schijndel et al. 2019) . Future work can explore how to bias these models to track linguistically useful properties through architectural biases (Dyer et al., 2016) , training on auxiliary tasks (Enguehard et al., 2017) or data augmentation (Perez and Wang, 2017) .", |
| "cite_spans": [ |
| { |
| "start": 561, |
| "end": 592, |
| "text": "(cf. van Schijndel et al. 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 715, |
| "end": 734, |
| "text": "(Dyer et al., 2016)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 765, |
| "end": 789, |
| "text": "(Enguehard et al., 2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 811, |
| "end": 833, |
| "text": "(Perez and Wang, 2017)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We hypothesized that models' accuracy on subject verb agreement when preceded by object RCs would increase as the similarity between object RCs and the other types of RCs increased. However, we did not find evidence for this. This could either be because the similarity between object RCs and the other types of RCs was too weak to be useful (see Figure 5a ) or because the LMs do not use this property when predicting verb agreement. Future work can disambiguate these reasons by testing models that are biased to treat sentences with object RCs and other RCs as being similar.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 347, |
| "end": 356, |
| "text": "Figure 5a", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Finally, our method allows us to generate a similarity matrix in the LMs representation space for any given set of structures. In the future, generating a similar matrix for human representations using priming experiments and comparing these two matrices using analysis methods from cognitive neuroscience (Kriegeskorte et al., 2008) may enable us to gain insight into how human-like the LM representations are and vice versa.", |
| "cite_spans": [ |
| { |
| "start": 306, |
| "end": 333, |
| "text": "(Kriegeskorte et al., 2008)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We proposed a novel technique to analyze how the representations of various syntactic structures are organized in neural language models. As a case study, we applied this technique to gain insight into the representations of sentences with relative clauses in RNN language models and found that the representations of sentences were organized in a linguistically interpretable manner.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Wells et al. (2009) measured priming effects for relative clauses, not dative constructions. For work on priming in production with dative constructions, seeKaschak et al. (2011).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In studies looking at non-cumulative priming, n = 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We illustrate the location of the gap with underscores here, but the underscores were not included in the LM's input.5 In order to maintain the same word order as in object and passive RCs, the subject of the coordinated verb phrases is an NP that tends to fill the object position in other sentences (e.g, \"the equation\"). Therefore, many of the sentences in this condition are implausible (e.g., \"The equation reviewed the physicists and challenged the method.\")", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The Supplementary Materials, the templates and and code for all the analyses along with the data can be found on GitHub: https://github.com/grushaprasad/RNN-Priming 8 Unknown words were excluded from this average.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "By lexically matched we mean that all content words were shared between sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Similar patterns were observed for the other constructions in the dataset. See Supplementary Materials.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank Sadhwi Srinivas and the members of the CAP lab at JHU for helpful discussions and valuable feedback.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": "8" |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Analyzing and interpreting neural networks for NLP: A report on the first BlackboxNLP workshop", |
| "authors": [ |
| { |
| "first": "Afra", |
| "middle": [], |
| "last": "Alishahi", |
| "suffix": "" |
| }, |
| { |
| "first": "Grzegorz", |
| "middle": [], |
| "last": "Chrupa\u0142a", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Journal of Natural Language Engineering", |
| "volume": "25", |
| "issue": "4", |
| "pages": "543--557", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Afra Alishahi, Grzegorz Chrupa\u0142a, and Tal Linzen. 2019. Analyzing and interpreting neural networks for NLP: A report on the first BlackboxNLP work- shop. Journal of Natural Language Engineering, 25(4):543-557.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Analysis methods in neural language processing: A survey", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "7", |
| "issue": "", |
| "pages": "49--72", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00254" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Belinkov and James Glass. 2019. Analysis methods in neural language processing: A survey. Transactions of the Association for Computational Linguistics, 7:49-72.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Syntactic persistence in language production", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathryn", |
| "middle": [], |
| "last": "Bock", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "Cognitive Psychology", |
| "volume": "18", |
| "issue": "3", |
| "pages": "355--387", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Kathryn Bock. 1986. Syntactic persistence in language production. Cognitive Psychology, 18(3):355-387.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "An experimental approach to linguistic representation", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Holly", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [ |
| "J" |
| ], |
| "last": "Branigan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pickering", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Behavioral and Brain Sciences", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Holly P. Branigan and Martin J. Pickering. 2017. An experimental approach to linguistic representation. Behavioral and Brain Sciences, 40.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Becoming syntactic", |
| "authors": [ |
| { |
| "first": "Franklin", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Gary", |
| "middle": [ |
| "S" |
| ], |
| "last": "Dell", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathryn", |
| "middle": [], |
| "last": "Bock", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Psychological Review", |
| "volume": "113", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franklin Chang, Gary S. Dell, and Kathryn Bock. 2006. Becoming syntactic. Psychological Review, 113(2):234.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "An LSTM adaptation study of (un)grammaticality", |
| "authors": [ |
| { |
| "first": "Absar", |
| "middle": [], |
| "last": "Shammur", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Chowdhury", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zamparelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "204--212", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shammur Absar Chowdhury and Roberto Zampar- elli. 2019. An LSTM adaptation study of (un)grammaticality. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Inter- preting Neural Networks for NLP, pages 204-212, Florence, Italy. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "What you can cram into a single $&!#* vector: Probing sentence embeddings for linguistic properties", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "Kruszewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2126--2136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, German Kruszewski, Guillaume Lample, Lo\u00efc Barrault, and Marco Baroni. 2018. What you can cram into a single $&!#* vector: Probing sentence embeddings for linguistic proper- ties. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 2126-2136, Melbourne, Australia. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Recurrent neural network grammars", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Adhiguna", |
| "middle": [], |
| "last": "Kuncoro", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Ballesteros", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "199--209", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/COLI_a_00300" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Dyer, Adhiguna Kuncoro, Miguel Ballesteros, and Noah A. Smith. 2016. Recurrent neural network grammars. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 199-209, San Diego, Califor- nia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Exploring the syntactic abilities of RNNs with multi-task learning", |
| "authors": [ |
| { |
| "first": "Emile", |
| "middle": [], |
| "last": "Enguehard", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 21st Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "3--14", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K17-1003" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emile Enguehard, Yoav Goldberg, and Tal Linzen. 2017. Exploring the syntactic abilities of RNNs with multi-task learning. In Proceedings of the 21st Conference on Computational Natural Lan- guage Learning (CoNLL 2017), pages 3-14, Van- couver, Canada. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Neural language models as psycholinguistic subjects: Representations of syntactic state", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Futrell", |
| "suffix": "" |
| }, |
| { |
| "first": "Ethan", |
| "middle": [], |
| "last": "Wilcox", |
| "suffix": "" |
| }, |
| { |
| "first": "Takashi", |
| "middle": [], |
| "last": "Morita", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Ballesteros", |
| "suffix": "" |
| }, |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "32--42", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Futrell, Ethan Wilcox, Takashi Morita, Peng Qian, Miguel Ballesteros, and Roger Levy. 2019. Neural language models as psycholinguistic sub- jects: Representations of syntactic state. In Pro- ceedings of the 2019 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long and Short Papers), pages 32-42, Min- neapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Under the hood: Using diagnostic classifiers to investigate and improve how language models track agreement information", |
| "authors": [ |
| { |
| "first": "Mario", |
| "middle": [], |
| "last": "Giulianelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Jack", |
| "middle": [], |
| "last": "Harding", |
| "suffix": "" |
| }, |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Mohnert", |
| "suffix": "" |
| }, |
| { |
| "first": "Dieuwke", |
| "middle": [], |
| "last": "Hupkes", |
| "suffix": "" |
| }, |
| { |
| "first": "Willem", |
| "middle": [], |
| "last": "Zuidema", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "240--248", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mario Giulianelli, Jack Harding, Florian Mohnert, Dieuwke Hupkes, and Willem Zuidema. 2018. Un- der the hood: Using diagnostic classifiers to in- vestigate and improve how language models track agreement information. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and In- terpreting Neural Networks for NLP, pages 240- 248, Brussels, Belgium. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Improving neural language models with a continuous cache", |
| "authors": [ |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Usunier", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Fifth International Conference on Learning Representations. International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edouard Grave, Armand Joulin, and Nicolas Usunier. 2017. Improving neural language models with a continuous cache. In Yoshua Bengio and Yann Le- Cun, editors, Proceedings of the Fifth International Conference on Learning Representations. Interna- tional Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Colorless green recurrent networks dream hierarchically", |
| "authors": [ |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Gulordava", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1195--1205", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1108" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kristina Gulordava, Piotr Bojanowski, Edouard Grave, Tal Linzen, and Marco Baroni. 2018. Colorless green recurrent networks dream hierarchically. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1195-1205, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A probabilistic Earley parser as a psycholinguistic model", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Hale", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the second meeting of the North American Chapter of the Association for Computational Linguistics on Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Hale. 2001. A probabilistic Earley parser as a psycholinguistic model. In Proceedings of the sec- ond meeting of the North American Chapter of the Association for Computational Linguistics on Lan- guage Technologies, pages 1-8, Pittsburgh, PA. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Do language models understand anything? on the ability of LSTMs to understand negative polarity items", |
| "authors": [ |
| { |
| "first": "Jaap", |
| "middle": [], |
| "last": "Jumelet", |
| "suffix": "" |
| }, |
| { |
| "first": "Dieuwke", |
| "middle": [], |
| "last": "Hupkes", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop Black-boxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "222--231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jaap Jumelet and Dieuwke Hupkes. 2018. Do lan- guage models understand anything? on the ability of LSTMs to understand negative polarity items. In Proceedings of the 2018 EMNLP Workshop Black- boxNLP: Analyzing and Interpreting Neural Net- works for NLP, pages 222-231, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Representation of linguistic form and function in recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Akos", |
| "middle": [], |
| "last": "K\u00e1d\u00e1r", |
| "suffix": "" |
| }, |
| { |
| "first": "Grzegorz", |
| "middle": [], |
| "last": "Chrupa\u0142a", |
| "suffix": "" |
| }, |
| { |
| "first": "Afra", |
| "middle": [], |
| "last": "Alishahi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Computational Linguistics", |
| "volume": "43", |
| "issue": "4", |
| "pages": "761--780", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/COLI_a_00300" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Akos K\u00e1d\u00e1r, Grzegorz Chrupa\u0142a, and Afra Alishahi. 2017. Representation of linguistic form and func- tion in recurrent neural networks. Computational Linguistics, 43(4):761-780.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Structural priming as implicit learning: Cumulative priming effects and individual differences", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [ |
| "J" |
| ], |
| "last": "Kaschak", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "L" |
| ], |
| "last": "Kutta", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Psychonomic Bulletin & Review", |
| "volume": "18", |
| "issue": "6", |
| "pages": "1133--1139", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael P. Kaschak, Timothy J. Kutta, and John L. Jones. 2011. Structural priming as implicit learn- ing: Cumulative priming effects and individual differences. Psychonomic Bulletin & Review, 18(6):1133-1139.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Dynamic evaluation of neural sequence models", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Krause", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Kahembwe", |
| "suffix": "" |
| }, |
| { |
| "first": "Iain", |
| "middle": [], |
| "last": "Murray", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Renals", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben Krause, Emmanuel Kahembwe, Iain Murray, and Steve Renals. 2017. Dynamic evaluation of neural sequence models. Technical report, University of Edinburgh.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Representational similarity analysis-connecting the branches of systems neuroscience", |
| "authors": [ |
| { |
| "first": "Nikolaus", |
| "middle": [], |
| "last": "Kriegeskorte", |
| "suffix": "" |
| }, |
| { |
| "first": "Marieke", |
| "middle": [], |
| "last": "Mur", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "A" |
| ], |
| "last": "Bandettini", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Frontiers in Systems Neuroscience", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikolaus Kriegeskorte, Marieke Mur, and Peter A. Bandettini. 2008. Representational similarity analysis-connecting the branches of systems neuro- science. Frontiers in Systems Neuroscience, 2:4.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "The emergence of number and syntax units in LSTM language models", |
| "authors": [ |
| { |
| "first": "Yair", |
| "middle": [], |
| "last": "Lakretz", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "Kruszewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Theo", |
| "middle": [], |
| "last": "Desbordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Dieuwke", |
| "middle": [], |
| "last": "Hupkes", |
| "suffix": "" |
| }, |
| { |
| "first": "Stanislas", |
| "middle": [], |
| "last": "Dehaene", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "11--20", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yair Lakretz, German Kruszewski, Theo Desbordes, Dieuwke Hupkes, Stanislas Dehaene, and Marco Baroni. 2019. The emergence of number and syn- tax units in LSTM language models. In Proceed- ings of the 2019 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 11-20, Minneapo- lis, Minnesota. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Expectation-based syntactic comprehension", |
| "authors": [ |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Cognition", |
| "volume": "106", |
| "issue": "", |
| "pages": "1126--1177", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roger Levy. 2008. Expectation-based syntactic com- prehension. Cognition, 106:1126-1177.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Assessing the ability of LSTMs to learn syntax-sensitive dependencies", |
| "authors": [ |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Dupoux", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "4", |
| "issue": "", |
| "pages": "521--535", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00115" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tal Linzen, Emmanuel Dupoux, and Yoav Goldberg. 2016. Assessing the ability of LSTMs to learn syntax-sensitive dependencies. Transactions of the Association for Computational Linguistics, 4:521- 535.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A meta-analysis of syntactic priming in language production", |
| "authors": [ |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Mahowald", |
| "suffix": "" |
| }, |
| { |
| "first": "Ariel", |
| "middle": [], |
| "last": "James", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Futrell", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Gibson", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Journal of Memory and Language", |
| "volume": "91", |
| "issue": "", |
| "pages": "5--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyle Mahowald, Ariel James, Richard Futrell, and Ed- ward Gibson. 2016. A meta-analysis of syntactic priming in language production. Journal of Memory and Language, 91:5-27.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Targeted syntactic evaluation of language models", |
| "authors": [ |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Marvin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1192--1202", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rebecca Marvin and Tal Linzen. 2018. Targeted syn- tactic evaluation of language models. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1192-1202, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Revisiting the poverty of the stimulus: Hierarchical generalization without a hierarchical bias in recurrent neural networks", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Mccoy", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 40th Annual Conference of the Cognitive Science Society", |
| "volume": "", |
| "issue": "", |
| "pages": "2093--2098", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Thomas McCoy, Robert Frank, and Tal Linzen. 2018. Revisiting the poverty of the stimulus: Hi- erarchical generalization without a hierarchical bias in recurrent neural networks. In Proceedings of the 40th Annual Conference of the Cognitive Science Society, pages 2093-2098, Austin, TX.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "The effectiveness of data augmentation in image classification using deep learning", |
| "authors": [ |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Perez", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1712.04621" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luis Perez and Jason Wang. 2017. The effectiveness of data augmentation in image classification using deep learning. arXiv preprint arXiv:1712.04621.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Mixed-Effects Models in S and S-PLUS", |
| "authors": [ |
| { |
| "first": "Jos\u00e9", |
| "middle": [], |
| "last": "Pinheiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Douglas", |
| "middle": [], |
| "last": "Bates", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jos\u00e9 Pinheiro, Douglas Bates, et al. 2000. Mixed- Effects Models in S and S-PLUS. Springer Science & Business Media.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Analyzing linguistic knowledge in sequential model of sentence", |
| "authors": [ |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Peng Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuanjing", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "826--835", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1079" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peng Qian, Xipeng Qiu, and Xuanjing Huang. 2016. Analyzing linguistic knowledge in sequential model of sentence. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Pro- cessing, pages 826-835, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Frequency of basic english grammatical structures: A corpus analysis", |
| "authors": [ |
| { |
| "first": "Douglas", |
| "middle": [], |
| "last": "Roland", |
| "suffix": "" |
| }, |
| { |
| "first": "Fredric", |
| "middle": [], |
| "last": "Dick", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [ |
| "L" |
| ], |
| "last": "Elman", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Journal of Memory and Language", |
| "volume": "57", |
| "issue": "3", |
| "pages": "348--379", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douglas Roland, Fredric Dick, and Jeffrey L. Elman. 2007. Frequency of basic english grammatical structures: A corpus analysis. Journal of Memory and Language, 57(3):348-379.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "A neural model of adaptation in reading", |
| "authors": [ |
| { |
| "first": "Marten", |
| "middle": [], |
| "last": "Van Schijndel", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "4704--4710", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marten van Schijndel and Tal Linzen. 2018. A neural model of adaptation in reading. In Proceedings of the 2018 Conference on Empirical Methods in Nat- ural Language Processing, pages 4704-4710, Brus- sels, Belgium. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Quantity doesnt buy quality syntax with neural language models", |
| "authors": [ |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Marten Van Schijndel", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Mueller", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marten van Schijndel, Aaron Mueller, and Tal Linzen. 2019. Quantity doesnt buy quality syntax with neu- ral language models. In Proceedings of the 2019 Conference on Empirical Methods in Natural Lan- guage Processing, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Does string-based neural MT learn source syntax?", |
| "authors": [ |
| { |
| "first": "Xing", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Inkit", |
| "middle": [], |
| "last": "Padhi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1526--1534", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1159" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xing Shi, Inkit Padhi, and Kevin Knight. 2016. Does string-based neural MT learn source syntax? In Pro- ceedings of the 2016 Conference on Empirical Meth- ods in Natural Language Processing, pages 1526- 1534, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Syntactic priming effects in comprehension: A critical review", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kristen", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew J", |
| "middle": [], |
| "last": "Tooley", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Traxler", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Language and Linguistics Compass", |
| "volume": "4", |
| "issue": "10", |
| "pages": "925--937", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kristen M Tooley and Matthew J Traxler. 2010. Syn- tactic priming effects in comprehension: A criti- cal review. Language and Linguistics Compass, 4(10):925-937.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "The fine line between linguistic generalization and failure in Seq2Seq-attention models", |
| "authors": [ |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Weber", |
| "suffix": "" |
| }, |
| { |
| "first": "Leena", |
| "middle": [], |
| "last": "Shekhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Niranjan", |
| "middle": [], |
| "last": "Balasubramanian", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Workshop on Generalization in the Age of Deep Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "24--27", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-1004" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noah Weber, Leena Shekhar, and Niranjan Balasubra- manian. 2018. The fine line between linguistic gen- eralization and failure in Seq2Seq-attention models. In Proceedings of the Workshop on Generalization in the Age of Deep Learning, pages 24-27, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Experience and sentence processing: Statistical learning and relative clause comprehension", |
| "authors": [ |
| { |
| "first": "Justine", |
| "middle": [ |
| "B" |
| ], |
| "last": "Wells", |
| "suffix": "" |
| }, |
| { |
| "first": "Morten", |
| "middle": [ |
| "H" |
| ], |
| "last": "Christiansen", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "S" |
| ], |
| "last": "Race", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "J" |
| ], |
| "last": "Acheson", |
| "suffix": "" |
| }, |
| { |
| "first": "Maryellen", |
| "middle": [ |
| "C" |
| ], |
| "last": "Mac-Donald", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Cognitive Psychology", |
| "volume": "58", |
| "issue": "", |
| "pages": "250--271", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Justine B. Wells, Morten H. Christiansen, David S. Race, Daniel J. Acheson, and Maryellen C. Mac- Donald. 2009. Experience and sentence processing: Statistical learning and relative clause comprehen- sion. Cognitive Psychology, 58:250-271.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "What do RNN language models learn about filler-gap dependencies?", |
| "authors": [ |
| { |
| "first": "Ethan", |
| "middle": [], |
| "last": "Wilcox", |
| "suffix": "" |
| }, |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Takashi", |
| "middle": [], |
| "last": "Morita", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Futrell", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop Black-boxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "211--221", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ethan Wilcox, Roger Levy, Takashi Morita, and Richard Futrell. 2018. What do RNN language models learn about filler-gap dependencies? In Proceedings of the 2018 EMNLP Workshop Black- boxNLP: Analyzing and Interpreting Neural Net- works for NLP, pages 211-221, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "a. The book that was liked by my cousin ... b. The book liked by my cousin ... In an object RC the gap is in the object position of the embedded clause. In English, object RCs can be unreduced (7a) or reduced (7b): (7) a. The book that my cousin liked ... b. The book my cousin liked ...Finally, we also included two additional conditions with verb coordination: one with nearly identical word order and lexical content as active subject RCs ((8); ASRC-matched Coordination), and another with nearly identical word order and lexical content as passive RCs and object RCs ((9); PS/ORC-matched Coordination). 5(8) My cousin liked the book and ..." |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "The book liked my cousin and ..." |
| }, |
| "FIGREF2": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Figure 1: A schematic for calculating the similarity between two structures S X and S Y in an LM's representation space. X 1 , X 2 and Y 1 , Y 2 are non-lexicallyoverlapping sets of sentences with S X and S Y respectively. Model X and Model Y refer to versions of a fully trained model that have been adapted to either X 1 or Y 1 respectively. Surp X () and Surp Y () are functions that return the surprisal of sentences for Model X and Model Y ." |
| }, |
| "FIGREF3": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "The adaptation effect when models adapted to sentences with reduced and unreduced RCs are tested on sentences that match only in reduction (top right), match only in passivity (bottom right), match in both reduction and passivity (top left) or sentences that match in neither (bottom right)." |
| }, |
| "FIGREF6": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "(b) Agreement prediction accuracy on reduced object RCs and unreduced object RCs as a function of D(RC, \u00acRC)" |
| }, |
| "FIGREF7": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "A schematic of how sentences belonging to different linguistically defined classes are related to each other in the LMs' representation space. Each colour indicates a different level of hierarchy." |
| }, |
| "TABREF0": { |
| "html": null, |
| "content": "<table><tr><td>representations of sentences with S Y .</td></tr><tr><td>2.3 LM adaptation as cumulative priming</td></tr><tr><td>Van Schijndel and Linzen (2018) modeled cu-</td></tr><tr><td>mulative priming in recurrent neural networks</td></tr><tr><td>(RNNs) by adapting fully trained RNN LMs to</td></tr><tr><td>new stimuli -i.e. taking a fully trained RNN LM</td></tr><tr><td>and continuing to train it on a small set of sen-</td></tr><tr><td>tences (cf. Grave et al. 2017; Krause et al. 2017;</td></tr></table>", |
| "type_str": "table", |
| "text": "Examples of sentences generated using templates containing the seven abstract structures we analyzed (optional elements, which only occur in a subset of the examples, are indicated in grey).", |
| "num": null |
| } |
| } |
| } |
| } |