| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:15:04.545262Z" |
| }, |
| "title": "Data-driven Identification of Idioms in Song Lyrics", |
| "authors": [ |
| { |
| "first": "Miriam", |
| "middle": [], |
| "last": "Amin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Leipzig University", |
| "location": { |
| "settlement": "Leipzig", |
| "country": "Germany" |
| } |
| }, |
| "email": "miriam_amin@web.de" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Fankhauser", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "fankhauser|kupietz|schneider@ids-mannheim.de" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Kupietz", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Roman", |
| "middle": [], |
| "last": "Schneider", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The automatic recognition of idioms poses a challenging problem for NLP applications. Whereas native speakers can intuitively handle multiword expressions whose compositional meanings are hard to trace back to individual word semantics, there is still ample scope for improvement regarding computational approaches. We assume that idiomatic constructions can be characterized by gradual intensities of semantic non-compositionality, formal fixedness, and unusual usage context, and introduce a number of measures for these characteristics, comprising count-based and predictive collocation measures together with measures of context (un)similarity. We evaluate our approach on a manually labelled gold standard, derived from a corpus of German pop lyrics. To this end, we apply a Random Forest classifier to analyze the individual contribution of features for automatically detecting idioms, and study the trade-off between recall and precision. Finally, we evaluate the classifier on an independent dataset of idioms extracted from a list of Wikipedia idioms, achieving state-of-the art accuracy.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The automatic recognition of idioms poses a challenging problem for NLP applications. Whereas native speakers can intuitively handle multiword expressions whose compositional meanings are hard to trace back to individual word semantics, there is still ample scope for improvement regarding computational approaches. We assume that idiomatic constructions can be characterized by gradual intensities of semantic non-compositionality, formal fixedness, and unusual usage context, and introduce a number of measures for these characteristics, comprising count-based and predictive collocation measures together with measures of context (un)similarity. We evaluate our approach on a manually labelled gold standard, derived from a corpus of German pop lyrics. To this end, we apply a Random Forest classifier to analyze the individual contribution of features for automatically detecting idioms, and study the trade-off between recall and precision. Finally, we evaluate the classifier on an independent dataset of idioms extracted from a list of Wikipedia idioms, achieving state-of-the art accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Traditional accounts of idiomaticity distinguish idiomatic use of language from literal use, claiming that idioms are multiword expressions (MWEs) which do not conform to Frege's principle, i.e. whose meaning as a whole cannot fully be derived from the aggregated meaning of their components (Gibbon, 1982) . In other words, the definition refers to non-compositionality and non-transparencyidiomatic MWEs seem semantically opaque; Baldwin and Kim (2010) consider this \"lexical idiomaticity\" to be one of five sub-types of idiomacity. Classifying idioms is not trivial: With reference to recent findings in discourse analysis and psycholinguistics, Wulff (2008) describes idiomati-city as a non-binary, multifactorial concept for a \"continuum ranging from clearly non-idiomatic patterns to core idioms\"; Pradhan et al. (2018) support this observation experimentally. At least core idioms are considered to be (mentally) lexicalized: Schneider et al. (2014) describe them as \"lexicalized combinations of two or more words\" which, though often syntactically diverse, \"are exceptional enough to be considered as individual units in the lexicon\". This corresponds to Sinclair's idiom principle (Sinclair, 1991) , postulating that text is often constructed from ready made phrases. Due to morphological and syntactic variation, the degree of formal fixedness ranges from semi-to fully-fixed. However, idiomaticity should be corpus-based verifiable, as e.g. Gries (2008, p. 22) states that \"researchers interested in phraseologisms use frequencies and other more elaborated statistics\" to identify \"symbolic units and constructions\". Some of these statistics may relate to local contexts, because one can reasonably argue that words that are not used literally will probably be somehow surprising in their context.", |
| "cite_spans": [ |
| { |
| "start": 292, |
| "end": 306, |
| "text": "(Gibbon, 1982)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 649, |
| "end": 661, |
| "text": "Wulff (2008)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 804, |
| "end": 825, |
| "text": "Pradhan et al. (2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 933, |
| "end": 956, |
| "text": "Schneider et al. (2014)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1190, |
| "end": 1206, |
| "text": "(Sinclair, 1991)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 1452, |
| "end": 1471, |
| "text": "Gries (2008, p. 22)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Against this background, we regard idioms as a subcategory of MWEs that are conspicuous in function, form and distribution -and with fuzzy boundaries to other multiword units like metaphors (Stefanowitsch and Gries, 2007) or proverbs. Our objective is to cover idiom characteristics with an innovative set of quantitative features, taking up some ideas described in the subsequent section, and to apply and evaluate machine-learning classifiers for a presumable idiomatically rich specialized corpus.", |
| "cite_spans": [ |
| { |
| "start": 209, |
| "end": 221, |
| "text": "Gries, 2007)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Idioms are a key concern and pose challenging problems for NLP applications such as information extraction, retrieval, summarization and translation, as well as for lexicographical studies or lan-guage learning; see Constant et al. (2017) . Sag et al. (2002) refer to them as \"a pain in the neck for NLP\"; consequently their machine-supported recognition constitutes an ideal testbed for a variety of methodical approaches and is the subject of shared tasks; see, e.g., Markantonatou et al. (2020) . Fazly and Stevenson (2006) propose measures that quantify the degree of lexical and syntactic fixedness. Verma and Vuppuluri (2015) rely on lexical features in order to identify MWEs whose meanings differ from their components' meanings. Sporleder and Li (2009) include the collocational contexts of idiomatic MWEs into their computation; they model semantic relatedness with the help of lexical chains and cohesion graphs, and, based on this, compare supervised with unsupervised approaches for token-based idiom classification. Katz and Giesbrecht (2006) use latent semantic analysis in order to verify whether context word vectorsimilarity between idiomatic MWEs and its constituents helps with the calculation. Muzny and Zettlemoyer (2013) achieve a precision level of 65% for the distinction between idiomatic and literal wiktionary phrases, using lexical and graph-based features in order to quantify the assumption that literal phrases are more likely to have closely related words in their definition clause than idiomatic phrases. Salton et al. (2016) investigate whether Sentential Distributed Semantics of idiomatic verbnoun (VN) combinations show significant differences from non-idiomatic usage, and therefore train Sent2Vec models for sentence-level contexts. Using the same dataset, compute local context differences between word vector matrices on the basis of Frobenius norm. Senaldi et al. 2019train vector-based models on a gold standard of VN constructions that has been annotated regarding idiomaticity on a 1-7 Likert scale. Hashempour and Villavicencio (2020) use contextualized word embeddings in order to distinguish between literal and idiomatic senses of MWEs that are treated as individual tokens in training and testing, producing average F1-scores of more than 70%.", |
| "cite_spans": [ |
| { |
| "start": 216, |
| "end": 238, |
| "text": "Constant et al. (2017)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 241, |
| "end": 258, |
| "text": "Sag et al. (2002)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 470, |
| "end": 497, |
| "text": "Markantonatou et al. (2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 500, |
| "end": 526, |
| "text": "Fazly and Stevenson (2006)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 605, |
| "end": 631, |
| "text": "Verma and Vuppuluri (2015)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 738, |
| "end": 761, |
| "text": "Sporleder and Li (2009)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1030, |
| "end": 1056, |
| "text": "Katz and Giesbrecht (2006)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1540, |
| "end": 1560, |
| "text": "Salton et al. (2016)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 2047, |
| "end": 2082, |
| "text": "Hashempour and Villavicencio (2020)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We take up the idea of evaluating different context representations, expand corresponding measures with syntagmatic and other statistical features, and analyze how they complement each other to characterize idioms. Furthermore, we broaden the scope by extending the dataset beyond VN combinations, including all kinds of MWEs without morphosyntactic restrictions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The aim of this study is to evaluate quantitative features of MWEs with regard to their suitability of detecting idiomatic MWEs in a given text corpus. Contemporary pop song lyrics -a yet sparsely examined register -seem intrinsically promising for two reasons: Firstly, lyrics combine qualities of spoken and written language (Werner, 2012) with wordplay creativity (Kreyer, 2012) and can thus be expected to constitute a valuable source of both well-known and innovative idiomatic constructions. Secondly, on account of their formal structure, catchy and often idiomatic phrases tend to be repeated in choruses, so that there should be good prospects for empirical evidence. We use the freely available Corpus of German Song Lyrics (Schneider, 2020) , covering a period of five decades and a broad range of artists, in order to ensure that our findings can be reproduced and compared by future studies. The general approach should also be applicable to languages other than German.", |
| "cite_spans": [ |
| { |
| "start": 327, |
| "end": 341, |
| "text": "(Werner, 2012)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 367, |
| "end": 381, |
| "text": "(Kreyer, 2012)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 734, |
| "end": 751, |
| "text": "(Schneider, 2020)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Although the corpus comes with XML-coded multi-layer annotations, we mainly work on the raw data and do not rely on linguistic preprocessing like parsing or lemmatization. To avoid reference to lexica or pre-defined syntactic template lists (like V-NP constructions), we include any ngram, spanning a minimum of two word tokens and a maximum of six word tokens within sentence boundaries. This yields a dataset of more than six million ngrams. From these we randomly select a sample of 10,000 ngrams.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "This dataset is manually annotated by a native speaker in order to serve as a gold standard. To cope with the abovementioned fact that idiomatic status cannot always be described as either clearly idiomatic or clearly literal, we allow for three categories and mark idiom candidates as either literal, idiomatic, or partly idiomatic, where the latter comprises ngrams with both idiomatic and nonidiomatic content, which are excluded for our analysis, see Table 4 in Section 4, for exact numbers.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 455, |
| "end": 462, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As a starting point for our evaluation, each dataset entry is automatically annotated with a number of features. We distinguish between three main groups of features to characterize idioms, for a detailed break down see Table 5 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 220, |
| "end": 227, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Syntagmatic features (SY) measure collocation strength between all word pairs within an idiom candidate. Context features (CO) measure semantic similarity between the words within an idiom can-didate and the words in its left/right context. Finally, other features (O) represent a variety of counts to assess the amount of evidence available, such as number of words in an idiom candidate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "SY_C1 and SY_C2 comprise a number of countbased collocation measures between a word and its neighbours within a window of +/-5 1 (Evert, 2008) . SY_C1 are based on the counts in DeReKo (Kupietz et al., 2010) , whereas SY_C2 are based on the counts in the pop lyrics corpus. These count-based measures all aim at identifying MWEs that occur more often than randomly expected. We expect that idioms, like other MWEs, are characterized by high SY_C.", |
| "cite_spans": [ |
| { |
| "start": 129, |
| "end": 142, |
| "text": "(Evert, 2008)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 185, |
| "end": 207, |
| "text": "(Kupietz et al., 2010)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "SY_W comprises a number of predictive collocation measures. These are all calculated by aggregating the output activations in a three layer neural network using the structured skipgram variant (Ling et al., 2015) of word2vec (Mikolov et al., 2013) , again with a window size of +/-5 2 . As shown by Levy and Goldberg (2014) , these output activations approximate the shifted pointwise mutual information 3 . These predictive measures generalize from actually used collocations by means of dimensionality reduction in the hidden layer and thus can also predict unseen but meaningful collocations. However, due to generalization they are typically biased towards the dominant, usually literal usage. Thus, we expect that idioms, unlike other MWEs, are characterized by low SY_W.", |
| "cite_spans": [ |
| { |
| "start": 193, |
| "end": 212, |
| "text": "(Ling et al., 2015)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 225, |
| "end": 247, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 299, |
| "end": 323, |
| "text": "Levy and Goldberg (2014)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Tables 1 and 2 exemplify the interplay between count-based and predictive collocations. Among the top 10 count-based collocates of 'Kuh' (cow), there are 6 collocates (in bold) stemming from idiomatic use, for example, 'die Kuh vom Eis kriegen' literally for 'getting the cow from the ice' meaning 'working out a situation'. In contrast, the predictive collocates all pertain to the literal meaning of cow as a domestic animal; e.g., 'Eis' does not occur among the top 400 predictive collocates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The count-based and predictive collocates of 'Versuch' ('attempt'), on the other hand, show no such difference. Both refer to the literal meaning of 'Versuch'. However, also here we can observe a bias of the predictive collocates towards 'failed attempts'.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "SY_R comprises non-parametric variants for some collocation measures by means of their ranks to account for the different scales of SY_C1 and SY_W. This includes SY_C1_R, SY_W_R1, SY_W_R2, and the rank difference SY_R_D.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As depicted in Equation 1, for all syntagmatic collocation measures , we take the average over all pairs of words , in an idiom candidate of size | |. Null-values, occurring when there exists no pair with measures from DeReKo, are transformed to min (or max) values appropriate for each feature.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2211 \u2260 ( , )/| |(| | \u2212 1)", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The context features CO_VEC and CO_VEC_LEX aim at identifying idioms based on the heuristics that they occur within unusual thematic contexts. Idiomatic ngrams such as 'Perlen vor die S\u00e4ue werfen' ('cast pearls before swine') are often found in local contexts that are thematically rather untypical for non-idiomatic uses of the individual ngram words. The expression can be expected in a theatre review or a political speech, but rather not in texts explicitly dealing with jewellery or livestock. To this end, CO_VEC uses cosine similarity between word vectors, which identifies paradigmatically related words occurring in similar usage contexts, comprising (near) synonyms, but also hyponyms, meronyms, etc.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Continuing with the above example, among the most similar words for 'Perle' are words like 'Kostbarkeit' ('preciousness'), 'Schatztruhe' ('treasure chest'), 'Liebeserkl\u00e4rung' ('declaration of love') or 'Brosche' ('brooch'). Close to 'S\u00e4ue', we find 'Rindvieh' ('cattle'), 'Schafe' ('sheep'), 'K\u00f6ter' ('pooch'), 'Hufe' ('hooves') or 'Schlachtbank' ('slaughterhouse'). Assuming that these words appear less likely in the local contexts of our example idiom than in the typical contexts of its constituents, low value for CO_VEC may indicate idiomatic use.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "More specifically, CO_VEC is calculated as the mean cosine similarity between all pairs of words in the idiom candidate of size | | and words in the left/right context of size | | (in the present case we include five context words to the left and right 4 ; see Figure 1 and Equation 2). CO_VEC_LEX is calculated like CO_VEC, but only takes lexical words into account, i.e. nouns, verbs, adverbs and adjectives. If the idiom candidate appears at several places within the corpus, an average is calculated.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 261, |
| "end": 269, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2211 , ( , )/| || |", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The last group O comprises O_GRAM, the number of words in an idiom candidate, O_NSTOPW 5 , the number of non stopwords, and O_DEREKO, the number of words for which a word embedding is available.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In summary, the syntagmatic features (SY) analyze idiom candidates for frequent (SY_C), but unusual (SY_W) collocations along the syntagmatic axis to assess their phraseness and non transparency. The context features (CO) analyze their surrounding context for unsimilar words along the paradigmatic axis as a complementary measure of non transparency. Both feature sets utilize the observation that word embeddings are typically biased towards the dominant/transparent meaning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "To evaluate our feature set we have trained a Random Forest classifier 6 . Unless stated explicitly otherwise, all results have been obtained using 5-fold cross validation. To avoid overlap between training and test sets, we have removed all duplicates after lower-casing and stopword removal, leaving a dataset with 542 idioms and 8697 non-idioms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Because this dataset is highly unbalanced, we have systematically varied the Random Forest's cutoff hyperparameter (default 0.5). As shown in Figure 2 , a cutoff of 0.3 achieves the best F1-Score of 61.9%, balancing recall and precision around 62%. The best balanced accuracy of 83% is achieved at a much smaller cutoff of about 0.05. This may be a more appropriate cutoff for explorative idiom detection, where sensitivity (recall) is more important than precision.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 142, |
| "end": 150, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To assess the contribution of the individual feature sets, we compare classification performance between using all features, each feature set individually, and subsets of features obtained by excluding individual feature sets. Table 3 summarizes the results 7 : All individual feature sets except O contribute to classification performance. The biggest contribution comes from the collocation features based on DeReKo counts (SY_C1), followed by the collocation features based on the (much smaller) pop lyrics corpus (SY_C2) and the predictive collocation features SY_W. The bottom half of the table analyzes how much performance is lost when excluding a feature set. The relative order is largely consistent with the upper half. In particular, also from this perspective, count-based collocations SY_C1 (including their rank variants) turn out to be most important, i.e., they lead to the largest loss in performance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 227, |
| "end": 234, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Interestingly, omitting the other features (O) also decreases performance, even though they do not contribute individually. This may be due to the fact that they do not model intrinsic characteristics of idioms, but just the number of word pairs available for estimating SY and CO feature sets, i.e., essentially the amount of evidence available. Thus they are only useful in combination with other feature sets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For SY_R the effect is the other way around. SY_R has a remarkable F1-Score of 29.5% when taken alone, but the overall performance increases, when the classifier is trained on all feature sets but SY_R. The lack of loss in performance may be due to the fact SY_R is highly correlated with SY_C1 and SY_W by construction, and thus does not add information. The slight increase seems to be a random effect. Table 4 details the classification performance for the best feature set (w/o SY_R). Interestingly enough, when inspecting the false positives, we find that our approach identifies full idioms overlooked by the manual dataset annotation, such as 'in meine Fu\u00dfstapfen treten' ('follow in my footsteps') or 'hinter Gitterst\u00e4ben' (lit. 'behind thick bars', meaning: 'in prison'). We also see partly idiomatic MWEs like 's\u00fc\u00dfes Gift' ('sweet poison'), as well as supposedly incomplete idioms like 'nur ein leeres [Versprechen?]' ('only an empty [promise?]'). The automatic classification even detects previously hidden teenage slang idioms such as 'Optik schieben' (lit'\u1e6bo push optics', approximately: 'to be under the influence of hallucinogenic drugs'). Besides, related phenomena like metaphors ('fahren in Richtung Gold', literal: 'drive towards gold') and allegories ('das ganze Leben ist ein Quiz', literal: 'all of life is a quiz') are labelled. Indeed, approximately 8% of the false positives show idiomatic or figurative use.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 405, |
| "end": 412, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In order to better understand the interplay between features, the information gain (*1000), TTest the degree of significance by a Welch two sample t-test for confidence levels 0.95 (*), 0.99 (**), and 0.999 (***), and \u0394 the sign of the difference between the mean of a feature for idioms vs. non-idioms. The context features CO_VEC and CO_VEC_LEX have the highest MDA followed by the other features O and the count-based collocation features estimated from the pop lyrics corpus SY_C2. All collocation (and rank) features estimated from DeReKo are in a similar range. Note however, that MDA tends to be shared among correlated features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "IGain assesses the individual (univariate) contribution of the features for classification. The two estimates of the overall frequency of an idiom candidate O_C2_N and O_C2_SGT have the highest IGain, closely followed by the count-based collocation features SY_C2 and SY_C1. The predictive collocation features SY_W and context features CO have slightly smaller IGain. This largely corroborates the results of the analysis of feature sets above.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "With the exception of CO_VEC and two of the predictive collocation features, the difference between the means of all features in idioms vs. nonidioms is highly significant.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To better understand the contribution of the individual features, it is helpful to look at the difference \u0394 between their means: Compared to all non-idioms, words within idioms have a lower cosine similarity CO_VEC (but still higher CO_VEC_LEX) to their left and right neighbours, i.e., indeed they occur in unusual contexts. On the other hand, they have a higher count-based and predictive collocation strength among each other (SY_C1, SY_C2, SY_W) with some exceptions (SY_C1_LL,SY_W_CON,SY_W_NSUMAF).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Consequently, they also have a smaller rank for these measures (SY_C1_R, SY_W_R1, SY_W_R2), although we would expect larger ranks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "However, non-idioms comprise random ngrams that do not occur more often than expected as well as frequent MWEs with high collocation strength. Thus it is instructive to constrain the comparsion as follows: \u0394 \u2032 gives the sign of the difference between the mean for idioms and all those non-idioms with SY_C1_LD larger than the mean of SY_C1_LD of all non-idioms, i.e., only the non-idiomatic but still frequent MWEs. Incidentally, all these differences are highly significant (at least 0.99), with the exception of CO_VEC. In this comparison, the context features CO and both, the count-based and predictive collocation features estimated from DeReKo (SY_C1 and SY_W, except SY_C1_MI,) are smaller, and accordingly the corresponding rank features are larger for idioms. In particular, the rank difference SY_R_D between count-based and predictive collocation is larger, i.e., co-occuring words in an idiom tend to be less represented by the predictive collocations which are biased towards the dominant meaning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In summary, idioms, like non-idiomatic MWEs, are characterized by high collocation strength in comparison to randomly selected ngrams. However, in comparison with non-idiomatic but frequent MWEs, they are characterized by occurring in unusual contexts (low CO_VEC), and by low predictive collocation strength SY_W; or, put more bluntly, idiomatic MWEs occur frequently but are unusual.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To demonstrate the transferability of our approach, we have applied it to a dataset of German idioms extracted from German Wikipedia 8 . After removing duplicates (72) with our gold standard 9 , and all idioms that consist of less than 2 words after stopword removal, this set comprises 760 idioms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "As training set for this out-of-domain scenario, we use a sample of 80% of non-idioms and all idioms of our base data set. The test set consists of the remaining 20% of the non-idioms and the Wikipedia idioms. We train the classifier on the feature ensemble SY_C1 + SY_W + SY_R + O (without the feature O_DEREKO). This is because the feature sets SY_C2 and CO are calculated based on", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods and results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "MDA IGain TTest \u0394 \u0394 \u2032 Description SY_C1_LD 9.8 30.4 *** + -logdice (Rychl\u00fd, 2008 Figure 3 shows the trade-off curves of the predictions on the Wikipedia dataset for a range of cut-off thresholds. The obtained results are rather convincing. With a cutoff threshold of 0.05, the classifier achieves an F1-Score of 71.0% and a recall of 80.3%, which means that the classifier is able to detect the majority of the unknown Wikipedia idioms. While not directly comparable due to different datasets and classification tasks, these results are in the same ballpark as e.g. Hashempour and Villavicencio (2020) who report F1-Scores of 70%. Table 6 gives the confusion matrix of the prediction on the unknown idioms.", |
| "cite_spans": [ |
| { |
| "start": 67, |
| "end": 80, |
| "text": "(Rychl\u00fd, 2008", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 566, |
| "end": 601, |
| "text": "Hashempour and Villavicencio (2020)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 81, |
| "end": 89, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 631, |
| "end": 638, |
| "text": "Table 6", |
| "ref_id": "TABREF9" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature", |
| "sec_num": null |
| }, |
| { |
| "text": "The aim of this study was to model well-studied idiom characteristics with quantitative features and to evaluate them on suitable datasets. Our evaluations show that count-based collocation measures indeed characterize idioms' frequent usage and stable occurrence, i.e. phraseness. The predictive collocation measures and the context features on the other hand are able to model uncommon usage, that is, non transparency. By applying our model, trained on an annotated dataset that was sampled from a pop lyrics corpus, to an out-of-domain dataset of idioms crawled from Wikipedia, we demonstrated the generalizability of our approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The introduced features do not require sophisticated or knowledge intensive preprocessing, and need only minimal context. Even, when no context is available, as for the out-of-domain dataset, we achieve state-of-the art classification performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "However, the feature set also has limitations. For idioms that consist of only one content word, possibly with some stopwords, the collocation measures do not produce very meaningful results. In this case we need to entirely rely on the context features. In a similar vein, count based collocation strength obviously does not apply to novel idioms. Moreover, when idiomatic use constitutes the overwhelmingly dominant use, such as 'kenne meine Pappenheimer' (literal: 'know my Pappenheimers', roughly: 'know the weak people (in my team)'), neither CO nor SY_W features can contribute.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "But in sum, all evaluation results -and the detailed analysis of how the count-based and predictive features complement each other for discriminating between idioms and non idioms -shed an additional empirical light on the linguistically intricate and multifaceted phenomenon of idiomaticity. Waiving limitations on morphosyntactic templates (like, e.g., VN constructions), our approach should work well for any potentially idiomatic MWEs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For future work, we intend to apply the approach to bigger datasets; attractive candidates might be the corpora of the PARSEME (PARsing and Multiword Expressions) network Savary et al. (2018) or the COLF-VID dataset of verbal idioms Ehren et al. (2020) . We will also experiment with additional features, in particular to better capture fixedness of idiomaticity and cope with non transparent compound idiomatic words.", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 191, |
| "text": "Savary et al. (2018)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 233, |
| "end": 252, |
| "text": "Ehren et al. (2020)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "All data and source code is publicly available under a Creative Commons license at http:// songkorpus.de/data/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "All measures with autofocus (AF) select those neighbours in the window which maximize the measure.2 DeReKoVecs(Fankhauser and Kupietz, 2019, http://corpora.ids-mannheim.de/ openlab/derekovecs,accessed 2021-04-23)) has been trained on DeReKo. 3 ( , ) = ( ( , ) ( ) ( ) ) \u2212 ( ), with the number of negative samples used during training, and ( ), ( ), ( , ) the individual and joint relative frequencies of a word and its neighbour", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Similar measures, applied to context words within sentence boundaries, has been used in K\u00f6per and Schulte im Walde (2017) orKurfal\u0131 and \u00d6stling (2020) for the detection of non-literal meaning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "SY_C1 and S_W features are calculated on the idiom candidate after stopword removal.6 Support Vector Machines yield similar accuracies and scores.7 Standard deviation of Balanced Accuracy, measured over 10 5 x cross validations with different seeds is around 0.5 for all feature combinations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://de.wikipedia.org/wiki/ Liste_deutscher_Redewendungen, accessed February, 22, 2021.9 All these duplicates have been independently annotated correctly as idioms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Multiword expressions", |
| "authors": [ |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Su", |
| "middle": [ |
| "Nam" |
| ], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Handbook of Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "267--292", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timothy Baldwin and Su Nam Kim. 2010. Multiword expressions. In Nitin Indurkhya and Fred J. Dam- erau, editors, Handbook of Natural Language Pro- cessing, Second Edition, pages 267-292. CRC Press, Boca Raton.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Survey: Multiword expression processing: A Survey", |
| "authors": [ |
| { |
| "first": "Mathieu", |
| "middle": [], |
| "last": "Constant", |
| "suffix": "" |
| }, |
| { |
| "first": "G\u00fcl\u015fen", |
| "middle": [], |
| "last": "Eryi\u01e7it", |
| "suffix": "" |
| }, |
| { |
| "first": "Johanna", |
| "middle": [], |
| "last": "Monti", |
| "suffix": "" |
| }, |
| { |
| "first": "Lonneke", |
| "middle": [], |
| "last": "Van Der", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Plas", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Ramisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Amalia", |
| "middle": [], |
| "last": "Rosner", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Todirascu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Computational Linguistics", |
| "volume": "43", |
| "issue": "4", |
| "pages": "837--892", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/COLI_a_00302" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mathieu Constant, G\u00fcl\u015fen Eryi\u01e7it, Johanna Monti, Lon- neke van der Plas, Carlos Ramisch, Michael Rosner, and Amalia Todirascu. 2017. Survey: Multiword ex- pression processing: A Survey. Computational Lin- guistics, 43(4):837-892.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Approche mixte pour l'extraction de terminologie: statistique lexicale et filtres linguistiques", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "B\u00e9atrice Daille", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B\u00e9atrice Daille. 1994. Approche mixte pour l'extrac- tion de terminologie: statistique lexicale et filtres lin- guistiques. Ph.D. thesis, Paris 7.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Gravity counts for the boundaries of collocations", |
| "authors": [ |
| { |
| "first": "Vidas", |
| "middle": [], |
| "last": "Daudaravi\u010dius", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u016bta", |
| "middle": [], |
| "last": "Marcinkevi\u010dien\u0117", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "International Journal of Corpus Linguistics", |
| "volume": "9", |
| "issue": "2", |
| "pages": "321--348", |
| "other_ids": { |
| "DOI": [ |
| "10.1075/ijcl.9.2.08dau" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vidas Daudaravi\u010dius and R\u016bta Marcinkevi\u010dien\u0117. 2004. Gravity counts for the boundaries of colloca- tions. International Journal of Corpus Linguistics, 9(2):321-348.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Supervised disambiguation of German verbal idioms with a BiLSTM architecture", |
| "authors": [ |
| { |
| "first": "Rafael", |
| "middle": [], |
| "last": "Ehren", |
| "suffix": "" |
| }, |
| { |
| "first": "Timm", |
| "middle": [], |
| "last": "Lichte", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Kallmeyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakub", |
| "middle": [], |
| "last": "Waszczuk", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Second Workshop on Figurative Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.figlang-1.29" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rafael Ehren, Timm Lichte, Laura Kallmeyer, and Jakub Waszczuk. 2020. Supervised disambiguation of German verbal idioms with a BiLSTM architec- ture. In Proceedings of the Second Workshop on Fig- urative Language Processing, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Corpora and collocations", |
| "authors": [ |
| { |
| "first": "Stefan", |
| "middle": [ |
| "Evert" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Corpus Linguistics. An International Handbook, chapter 58", |
| "volume": "", |
| "issue": "", |
| "pages": "1212--1248", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefan Evert. 2008. Corpora and collocations. In Anke L\u00fcdeling and Merja Kyt\u00f6, editors, Corpus Linguist- ics. An International Handbook, chapter 58, pages 1212-1248. Mouton de Gruyter, Berlin, Germany.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Analyzing domain specific word embeddings for a large corpus of contemporary German", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Fankhauser", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Kupietz", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Corpus Linguistics Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.14618/ids-pub-9117" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Fankhauser and Marc Kupietz. 2019. Analyzing domain specific word embeddings for a large corpus of contemporary German. In International Corpus Linguistics Conference, Cardiff, Wales, UK, July 22- 26, 2019, Mannheim. Leibniz-Institut f\u00fcr Deutsche Sprache (IDS).", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Automatically constructing a lexicon of verb phrase idiomatic combinations", |
| "authors": [ |
| { |
| "first": "Afsaneh", |
| "middle": [], |
| "last": "Fazly", |
| "suffix": "" |
| }, |
| { |
| "first": "Suzanne", |
| "middle": [], |
| "last": "Stevenson", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 11st Conference of the European Chapter of the Association for Computational Linguistics (EACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "337--344", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Afsaneh Fazly and Suzanne Stevenson. 2006. Automat- ically constructing a lexicon of verb phrase idiomatic combinations. In Proceedings of the 11st Confer- ence of the European Chapter of the Association for Computational Linguistics (EACL), pages 337-344.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Violations of frege's principle and their significance for contrastive semantics. Papers and Studies in Contrastive Linguistics", |
| "authors": [ |
| { |
| "first": "Dafydd", |
| "middle": [], |
| "last": "Gibbon", |
| "suffix": "" |
| } |
| ], |
| "year": 1982, |
| "venue": "", |
| "volume": "14", |
| "issue": "", |
| "pages": "5--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dafydd Gibbon. 1982. Violations of frege's principle and their significance for contrastive semantics. Pa- pers and Studies in Contrastive Linguistics, 14:5-24.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Phraseology and linguistic theory: A brief survey", |
| "authors": [ |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Gries", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Phraseology: An interdisciplinary perspective", |
| "volume": "", |
| "issue": "", |
| "pages": "3--25", |
| "other_ids": { |
| "DOI": [ |
| "10.1075/z.139.06gri" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefan Gries. 2008. Phraseology and linguistic the- ory: A brief survey. In Sylviane Granger and Fanny Meunier, editors, Phraseology: An interdisciplin- ary perspective, pages 3-25. Amsterdam: John Ben- jamins.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Lexical gravity across varieties of english: An ice-based study of n-grams in asian englishes", |
| "authors": [ |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Gries", |
| "suffix": "" |
| }, |
| { |
| "first": "Joybrato", |
| "middle": [], |
| "last": "Mukherjee", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "International Journal of Corpus Linguistics", |
| "volume": "15", |
| "issue": "", |
| "pages": "520--548", |
| "other_ids": { |
| "DOI": [ |
| "10.1075/ijcl.15.4.04gri" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefan Gries and Joybrato Mukherjee. 2010. Lexical gravity across varieties of english: An ice-based study of n-grams in asian englishes. International Journal of Corpus Linguistics, 15:520-548.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Leveraging contextual embeddings and idiom principle for detecting idiomaticity in potentially idiomatic expressions", |
| "authors": [ |
| { |
| "first": "Reyhaneh", |
| "middle": [], |
| "last": "Hashempour", |
| "suffix": "" |
| }, |
| { |
| "first": "Aline", |
| "middle": [], |
| "last": "Villavicencio", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Workshop on the Cognitive Aspects of the Lexicon", |
| "volume": "", |
| "issue": "", |
| "pages": "72--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reyhaneh Hashempour and Aline Villavicencio. 2020. Leveraging contextual embeddings and idiom prin- ciple for detecting idiomaticity in potentially idio- matic expressions. In Proceedings of the Workshop on the Cognitive Aspects of the Lexicon, pages 72-80. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Automatic identification of non-compositional multiword expressions using latent semantic analysis", |
| "authors": [ |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Katz", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugenie", |
| "middle": [], |
| "last": "Giesbrecht", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Workshop on Multiword Expressions: Identifying and Exploiting Underlying Properties", |
| "volume": "", |
| "issue": "", |
| "pages": "12--19", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Graham Katz and Eugenie Giesbrecht. 2006. Auto- matic identification of non-compositional multi- word expressions using latent semantic analysis. In Proceedings of the Workshop on Multiword Expres- sions: Identifying and Exploiting Underlying Prop- erties, pages 12-19, Sydney, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Love is like a stove -it burns you when it's hot\": A corpus-linguistic view on the (non-)creative use of love-related metaphors in pop songs", |
| "authors": [ |
| { |
| "first": "Rolf", |
| "middle": [], |
| "last": "Kreyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Language and Computers", |
| "volume": "", |
| "issue": "", |
| "pages": "103--115", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rolf Kreyer. 2012. \"Love is like a stove -it burns you when it's hot\": A corpus-linguistic view on the (non- )creative use of love-related metaphors in pop songs. Language and Computers, pages 103-115.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "The German Reference Corpus DeReKo: A Primordial Sample for Linguistic Research", |
| "authors": [ |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Kupietz", |
| "suffix": "" |
| }, |
| { |
| "first": "Cyril", |
| "middle": [], |
| "last": "Belica", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Keibel", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Witt", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Seventh International Conference On Language Resources And Evaluation (LREC'10)", |
| "volume": "", |
| "issue": "", |
| "pages": "1848--1854", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc Kupietz, Cyril Belica, Holger Keibel, and An- dreas Witt. 2010. The German Reference Cor- pus DeReKo: A Primordial Sample for Linguistic Research. In Proceedings of the Seventh Interna- tional Conference On Language Resources And Eval- uation (LREC'10), page 1848-1854, Valletta / Paris. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Disambiguation of potentially idiomatic expressions with contextual embeddings", |
| "authors": [ |
| { |
| "first": "Murathan", |
| "middle": [], |
| "last": "Kurfal\u0131", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "\u00d6stling", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Joint Workshop on Multiword Expressions and Electronic Lexicons", |
| "volume": "", |
| "issue": "", |
| "pages": "85--94", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Murathan Kurfal\u0131 and Robert \u00d6stling. 2020. Disam- biguation of potentially idiomatic expressions with contextual embeddings. In Proceedings of the Joint Workshop on Multiword Expressions and Electronic Lexicons, pages 85-94, online. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Applying multi-sense embeddings for German verbs to determine semantic relatedness and to detect nonliteral language", |
| "authors": [ |
| { |
| "first": "Maximilian", |
| "middle": [], |
| "last": "K\u00f6per", |
| "suffix": "" |
| }, |
| { |
| "first": "Sabine", |
| "middle": [], |
| "last": "Schulte Im Walde", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter", |
| "volume": "2", |
| "issue": "", |
| "pages": "535--542", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maximilian K\u00f6per and Sabine Schulte im Walde. 2017. Applying multi-sense embeddings for German verbs to determine semantic relatedness and to detect non- literal language. In Proceedings of the 15th Confer- ence of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 535-542.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Neural word embedding as implicit matrix factorization", |
| "authors": [ |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems (NIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omer Levy and Yoav Goldberg. 2014. Neural word embedding as implicit matrix factorization. In Ad- vances in Neural Information Processing Systems (NIPS).", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Two/too simple adaptations of Word2Vec for syntax problems", |
| "authors": [ |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabel", |
| "middle": [], |
| "last": "Trancoso", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1299--1304", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/N15-1142" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang Ling, Chris Dyer, Alan W. Black, and Isabel Trancoso. 2015. Two/too simple adaptations of Word2Vec for syntax problems. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1299-1304, Denver, Colorado. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Ashwini Vaidya, Petya Osenova, and Agata Savary", |
| "authors": [ |
| { |
| "first": "Stella", |
| "middle": [], |
| "last": "Markantonatou", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Mccrae", |
| "suffix": "" |
| }, |
| { |
| "first": "Jelena", |
| "middle": [], |
| "last": "Mitrovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Carole", |
| "middle": [], |
| "last": "Tiberius", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Ramisch", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "2020. Proceedings of the Joint Workshop on Multiword Expressions and Electronic Lexicons", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stella Markantonatou, John McCrae, Jelena Mitrovi\u0107, Carole Tiberius, Carlos Ramisch, Ashwini Vaidya, Petya Osenova, and Agata Savary, editors. 2020. Proceedings of the Joint Workshop on Multiword Ex- pressions and Electronic Lexicons. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "26", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in Neural Information Processing Systems, volume 26. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Automatic idiom identification in Wiktionary", |
| "authors": [ |
| { |
| "first": "Grace", |
| "middle": [], |
| "last": "Muzny", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1417--1421", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Grace Muzny and Luke Zettlemoyer. 2013. Automatic idiom identification in Wiktionary. In Proceed- ings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1417-1421, Seattle, Washington, USA. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A distributional semantics model for idiom detection -the case of english and russian", |
| "authors": [ |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Katsiaryna", |
| "middle": [], |
| "last": "Aharodnik", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Feldman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ICAART", |
| "volume": "", |
| "issue": "", |
| "pages": "675--682", |
| "other_ids": { |
| "DOI": [ |
| "10.5220/0006733806750682" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jing Peng, Katsiaryna Aharodnik, and Anna Feldman. 2018. A distributional semantics model for idiom detection -the case of english and russian. ICAART, pages 675-682.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Idioms: Humans or machines, it's all about context", |
| "authors": [ |
| { |
| "first": "Manali", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Feldman", |
| "suffix": "" |
| }, |
| { |
| "first": "Bianca", |
| "middle": [], |
| "last": "Wright", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Computational Linguistics and Intelligent Text Processing -18th International Conference, CICLing 2017, Revised Selected Papers, Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)", |
| "volume": "", |
| "issue": "", |
| "pages": "291--304", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-3-319-77113-7_23" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manali Pradhan, Jing Peng, Anna Feldman, and Bianca Wright. 2018. Idioms: Humans or machines, it's all about context. In Computational Linguistics and In- telligent Text Processing -18th International Confer- ence, CICLing 2017, Revised Selected Papers, Lec- ture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 291-304. Springer.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A lexicographer-friendly association score", |
| "authors": [ |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Rychl\u00fd", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 2nd Workshop on Recent Advances in Slavonic Natural Languages Processing, RASLAN", |
| "volume": "", |
| "issue": "", |
| "pages": "6--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pavel Rychl\u00fd. 2008. A lexicographer-friendly associ- ation score. Proceedings of the 2nd Workshop on Recent Advances in Slavonic Natural Languages Pro- cessing, RASLAN, pages 6-9.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Multiword expressions: A pain in the neck for nlp", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ivan", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Sag", |
| "suffix": "" |
| }, |
| { |
| "first": "Francis", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ann", |
| "middle": [], |
| "last": "Bond", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Copestake", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Flickinger", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Computational Linguistics and Intelligent Text Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1--15", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan A. Sag, Timothy Baldwin, Francis Bond, Ann Copestake, and Dan Flickinger. 2002. Multiword ex- pressions: A pain in the neck for nlp. In Compu- tational Linguistics and Intelligent Text Processing, pages 1-15, Berlin, Heidelberg. Springer Berlin Heidelberg.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Idiom token classification using sentential distributed semantics", |
| "authors": [ |
| { |
| "first": "Giancarlo", |
| "middle": [], |
| "last": "Salton", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Kelleher", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Ross", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "194--204", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1019" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Giancarlo Salton, John Kelleher, and Robert Ross. 2016. Idiom token classification using sentential distrib- uted semantics. In Proceedings of the 54th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), pages 194-204.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Parseme multilingual corpus of verbal multiword expressions", |
| "authors": [ |
| { |
| "first": "Agata", |
| "middle": [], |
| "last": "Savary", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie", |
| "middle": [], |
| "last": "Candito", |
| "suffix": "" |
| }, |
| { |
| "first": "Verginica", |
| "middle": [], |
| "last": "Mititelu", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Bej\u010dek", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabienne", |
| "middle": [], |
| "last": "Cap", |
| "suffix": "" |
| }, |
| { |
| "first": "Slavom\u00edr", |
| "middle": [], |
| "last": "\u010c\u00e9pl\u00f6", |
| "suffix": "" |
| }, |
| { |
| "first": "Silvio", |
| "middle": [], |
| "last": "Cordeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "G\u00fcl\u015fen", |
| "middle": [], |
| "last": "Eryi\u011fit", |
| "suffix": "" |
| }, |
| { |
| "first": "Voula", |
| "middle": [], |
| "last": "Giouli", |
| "suffix": "" |
| }, |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Van Gompel", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaakov", |
| "middle": [], |
| "last": "Hacohen-Kerner", |
| "suffix": "" |
| }, |
| { |
| "first": "Jolanta", |
| "middle": [], |
| "last": "Kovalevskait\u0117", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Krek", |
| "suffix": "" |
| }, |
| { |
| "first": "Chaya", |
| "middle": [], |
| "last": "Liebeskind", |
| "suffix": "" |
| }, |
| { |
| "first": "Johanna", |
| "middle": [], |
| "last": "Monti", |
| "suffix": "" |
| }, |
| { |
| "first": "Carla", |
| "middle": [], |
| "last": "Parra Escart\u00edn", |
| "suffix": "" |
| }, |
| { |
| "first": "Lonneke", |
| "middle": [], |
| "last": "Der", |
| "suffix": "" |
| }, |
| { |
| "first": "Qasemi", |
| "middle": [], |
| "last": "Behrang", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Zadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Veronika", |
| "middle": [], |
| "last": "Ramisch", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vincze", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Multiword expressions at length and in depth: Extended papers from the MWE 2017 workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "87--147", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Agata Savary, Marie Candito, Verginica Mititelu, Eduard Bej\u010dek, Fabienne Cap, Slavom\u00edr \u010c\u00e9pl\u00f6, Silvio Cordeiro, G\u00fcl\u015fen Eryi\u011fit, Voula Giouli, Maarten Van Gompel, Yaakov HaCohen-Kerner, Jolanta Kovalevskait\u0117, Simon Krek, Chaya Liebes- kind, Johanna Monti, Carla Parra Escart\u00edn, Lon- neke Der, Behrang Qasemi Zadeh, Carlos Ramisch, and Veronika Vincze. 2018. Parseme multilingual corpus of verbal multiword expressions. In Stella Markantonatou, Carlos Ramisch, Agata Savary, and Veronika Vincze, editors, Multiword expressions at length and in depth: Extended papers from the MWE 2017 workshop, pages 87-147.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Discriminative lexical semantic segmentation with gaps: Running the mwe gamut", |
| "authors": [ |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Schneider", |
| "suffix": "" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Danchik", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "193--206", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00176" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nathan Schneider, Emily Danchik, Chris Dyer, and Noah Smith. 2014. Discriminative lexical semantic segmentation with gaps: Running the mwe gamut. Transactions of the Association for Computational Linguistics, 2:193-206.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "A corpus linguistic perspective on contemporary german pop lyrics with the multi-layer annotated \"songkorpus", |
| "authors": [ |
| { |
| "first": "Roman", |
| "middle": [], |
| "last": "Schneider", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "2020", |
| "issue": "", |
| "pages": "842--848", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roman Schneider. 2020. A corpus linguistic perspect- ive on contemporary german pop lyrics with the multi-layer annotated \"songkorpus\". In Proceed- ings of The 12th Language Resources and Evaluation Conference, LREC 2020, Marseille, France, May 11- 16, 2020, pages 842-848. European Language Re- sources Association.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "What do neural networks actually learn, when they learn to identify idioms?", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "G" |
| ], |
| "last": "Marco", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuri", |
| "middle": [], |
| "last": "Senaldi", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bizzoni", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lenci", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Society for Computation in Linguistics (SCiL)", |
| "volume": "2", |
| "issue": "", |
| "pages": "310--313", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco S. G. Senaldi, Yuri Bizzoni, and A. Lenci. 2019. What do neural networks actually learn, when they learn to identify idioms? In Proceedings of the Soci- ety for Computation in Linguistics (SCiL), volume 2, pages 310-313.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Corpus, concordance, collocation", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Sinclair", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Sinclair. 1991. Corpus, concordance, collocation. University Press, Oxford.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Unsupervised recognition of literal and non-literal use of idiomatic expressions", |
| "authors": [ |
| { |
| "first": "Caroline", |
| "middle": [], |
| "last": "Sporleder", |
| "suffix": "" |
| }, |
| { |
| "first": "Linlin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 12th Conference of the European Chapter", |
| "volume": "", |
| "issue": "", |
| "pages": "754--762", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1609067.1609151" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Caroline Sporleder and Linlin Li. 2009. Unsupervised recognition of literal and non-literal use of idiomatic expressions. In Proceedings of the 12th Conference of the European Chapter of the ACL (EACL 2009), pages 754-762.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Corpus-Based Approaches to Metaphor and Metonymy", |
| "authors": [], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1515/9783110199895" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anatol Stefanowitsch and Stefan Th. Gries, editors. 2007. Corpus-Based Approaches to Metaphor and Metonymy. De Gruyter Mouton.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "A new approach for idiom identification using meanings and the web", |
| "authors": [ |
| { |
| "first": "Rakesh", |
| "middle": [], |
| "last": "Verma", |
| "suffix": "" |
| }, |
| { |
| "first": "Vasanthi", |
| "middle": [], |
| "last": "Vuppuluri", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of Recent Advances in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "681--687", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rakesh Verma and Vasanthi Vuppuluri. 2015. A new approach for idiom identification using meanings and the web. In Proceedings of Recent Advances in Natural Language Processing, pages 681-687, His- sar, Bulgaria.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Love is all around: A corpusbased study of pop lyrics", |
| "authors": [ |
| { |
| "first": "Valentin", |
| "middle": [], |
| "last": "Werner", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Corpora", |
| "volume": "7", |
| "issue": "", |
| "pages": "19--50", |
| "other_ids": { |
| "DOI": [ |
| "10.3366/cor.2012.0016" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valentin Werner. 2012. Love is all around: A corpus- based study of pop lyrics. Corpora, 7:19-50.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Rethinking Idiomaticity: A Usage-based Approach", |
| "authors": [ |
| { |
| "first": "Stefanie", |
| "middle": [], |
| "last": "Wulff", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Studies in Corpus and Discourse. Continuum", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefanie Wulff. 2008. Rethinking Idiomaticity: A Usage-based Approach. Studies in Corpus and Dis- course. Continuum, London, New York.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "text": "Local context of ngrams", |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "num": null, |
| "text": "Trade-off curves for Random Forest cut-off", |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "num": null, |
| "text": "Trade-off curves for Random Forest cut-off on the", |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "text": "", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"3\">: Count-based and predictive collocates for Kuh</td></tr><tr><td>(cow)</td><td/><td/></tr><tr><td colspan=\"2\">Versuch German</td><td>English</td></tr><tr><td>Count</td><td>unternommen</td><td>made failed in</td></tr><tr><td/><td>gescheitert Beim</td><td>second failed</td></tr><tr><td/><td>zweiten geschei-</td><td>worth third star-</td></tr><tr><td/><td>terten wert drit-</td><td>ted make failed</td></tr><tr><td/><td>ten gestartet</td><td/></tr><tr><td/><td>unternehmen</td><td/></tr><tr><td/><td>scheiterte</td><td/></tr><tr><td>Pred</td><td>untauglicher</td><td>unsuitable futile</td></tr><tr><td/><td>vergeblicher</td><td>failed made con-</td></tr><tr><td/><td>missgl\u00fcckter</td><td>vulsive failed</td></tr><tr><td/><td>unternommene</td><td>failed desper-</td></tr><tr><td/><td>krampfhaften</td><td>ate unsuitable</td></tr><tr><td/><td>fehlgeschlagener</td><td>desperate</td></tr><tr><td/><td>(\u2026)</td><td/></tr></table>" |
| }, |
| "TABREF2": { |
| "text": "Count-based and predictive collocates for Versuch (attempt)", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "text": "", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>: Performance of different feature sets in a Ran-</td></tr><tr><td>dom Forest with cutoff=0.3. SY_C1: Count-based col-</td></tr><tr><td>location measures based on DeReKo. SY_C2: Count-</td></tr><tr><td>based collocation measures based on pop lyric corpus.</td></tr><tr><td>SY_W: Predictive collocation measures. SY_R: Rank-</td></tr><tr><td>based collocation measures. SY_C1_R: SY_C1+SY_R,</td></tr><tr><td>SY_W_R: SY_W+SY_R. CO: Context features. O:</td></tr><tr><td>Other</td></tr></table>" |
| }, |
| "TABREF5": { |
| "text": "analyzes the contributions of the individual features for the classification task. MDA gives the random forest's estimate of the mean decrease in accuracy per feature, IGain", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"3\">prediction outcome</td><td/></tr><tr><td/><td>idiom</td><td colspan=\"2\">no idiom total</td></tr><tr><td>idiom</td><td>327</td><td>215</td><td>542</td></tr><tr><td>actual</td><td/><td/><td/></tr><tr><td>value</td><td/><td/><td/></tr><tr><td>no idiom</td><td>191</td><td colspan=\"2\">8506 8697</td></tr><tr><td>total</td><td>518</td><td>8721</td><td/></tr></table>" |
| }, |
| "TABREF6": { |
| "text": "Confusion Matrix for prediction with the best feature set", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF8": { |
| "text": "Features", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF9": { |
| "text": "Confusion Matrix for prediction on idioms from Wikipedia with cut-off=0.05 the ngram context within the pop lyrics corpus and are consequently not available for out-of-domain data.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |