| { |
| "paper_id": "P16-1018", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:58:57.634743Z" |
| }, |
| "title": "Literal and Metaphorical Senses in Compositional Distributional Semantic Models", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Dar\u00edo Guti\u00e9rrez", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of California", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Tyler", |
| "middle": [], |
| "last": "Marghetis", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Indiana University", |
| "location": { |
| "settlement": "Bloomington" |
| } |
| }, |
| "email": "tmarghet@cogsci.ucsd.edu" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [ |
| "K" |
| ], |
| "last": "Bergen", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "bkbergen@ucsd.edu" |
| }, |
| { |
| "first": "San", |
| "middle": [], |
| "last": "Diego", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Metaphorical expressions are pervasive in natural language and pose a substantial challenge for computational semantics. The inherent compositionality of metaphor makes it an important test case for compositional distributional semantic models (CDSMs). This paper is the first to investigate whether metaphorical composition warrants a distinct treatment in the CDSM framework. We propose a method to learn metaphors as linear transformations in a vector space and find that, across a variety of semantic domains, explicitly modeling metaphor improves the resulting semantic representations. We then use these representations in a metaphor identification task, achieving a high performance of 0.82 in terms of F-score.", |
| "pdf_parse": { |
| "paper_id": "P16-1018", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Metaphorical expressions are pervasive in natural language and pose a substantial challenge for computational semantics. The inherent compositionality of metaphor makes it an important test case for compositional distributional semantic models (CDSMs). This paper is the first to investigate whether metaphorical composition warrants a distinct treatment in the CDSM framework. We propose a method to learn metaphors as linear transformations in a vector space and find that, across a variety of semantic domains, explicitly modeling metaphor improves the resulting semantic representations. We then use these representations in a metaphor identification task, achieving a high performance of 0.82 in terms of F-score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "An extensive body of behavioral and corpuslinguistic studies suggests that metaphors are pervasive in everyday language (Cameron, 2003; Steen et al., 2010) and play an important role in how humans define and understand the world. According to Conceptual Metaphor Theory (CMT) (Lakoff and Johnson, 1981) , individual metaphorical expressions, or linguistic metaphors (LMs), are instantiations of broader generalizations referred to as conceptual metaphors (CMs). For example, the phrases half-baked idea, food for thought, and spoon-fed information are LMs that instantiate the CM IDEAS ARE FOOD. These phrases reflect a mapping from the source domain of FOOD to the target domain of IDEAS (Lakoff, 1989) . Two central claims of the CMT are that this mapping is systematic, in the sense that it consists of a fixed set of ontological correspondences, such as thinking is preparing, communication is feeding, understanding is digestion; and that this mapping can be productively extended to produce novel LMs that obey these correspondences.", |
| "cite_spans": [ |
| { |
| "start": 120, |
| "end": 135, |
| "text": "(Cameron, 2003;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 136, |
| "end": 155, |
| "text": "Steen et al., 2010)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 276, |
| "end": 302, |
| "text": "(Lakoff and Johnson, 1981)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 689, |
| "end": 703, |
| "text": "(Lakoff, 1989)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recent years have seen the rise of statistical techniques for metaphor detection. Several of these techniques leverage distributional statistics and vector-space models of meaning to classify utterances as literal or metaphorical (Utsumi, 2006; Shutova et al., 2010; Hovy et al., 2013; Tsvetkov et al., 2014) . An important insight of these studies is that metaphorical meaning is not merely a property of individual words, but rather arises through cross-domain composition. The meaning of sweet, for instance, is not intrinsically metaphorical. Yet this word may exhibit a range of metaphorical meanings-e.g., sweet dreams, sweet person, sweet victory-that are created through the interplay of source and target domains. If metaphor is compositional, how do we represent it, and how can we use it in a compositional framework for meaning?", |
| "cite_spans": [ |
| { |
| "start": 230, |
| "end": 244, |
| "text": "(Utsumi, 2006;", |
| "ref_id": "BIBREF61" |
| }, |
| { |
| "start": 245, |
| "end": 266, |
| "text": "Shutova et al., 2010;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 267, |
| "end": 285, |
| "text": "Hovy et al., 2013;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 286, |
| "end": 308, |
| "text": "Tsvetkov et al., 2014)", |
| "ref_id": "BIBREF58" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Compositional distributional semantic models (CDSMs) provide a compact model of compositionality that produces vector representations of phrases while avoiding the sparsity and storage issues associated with storing vectors for each phrase in a language explicitly. One of the most popular CDSM frameworks (Baroni and Zamparelli, 2010; Guevara, 2010; Coecke et al., 2010) represents nouns as vectors, adjectives as matrices that act on the noun vectors, and transitive verbs as third-order tensors that act on noun or noun phrase vectors. The meaning of a phrase is then derived by composing these lexical representations. The vast majority of such models build a single representation for all senses of a word, collapsing distinct senses together. One exception is the work of Kartsaklis and Sadrzadeh (2013a) , who investigated homonymy, in which lexical items have identical form but unrelated meanings (e.g., bank). They found that deriving verb tensors from all instances of a homonymous form (as compared to training a separate tensor for each distinct sense) loses information and degrades the resultant phrase vector representations. To the best of our knowledge, there has not yet been a study of regular polysemy (i.e. metaphorical or metonymic sense distinctions) in the context of compositional distributional semantics. Yet, due to systematicity in metaphorical cross-domain mappings, there are likely to be systematic contextual sense distinctions that can be captured by a CDSM, improving the resulting semantic representations.", |
| "cite_spans": [ |
| { |
| "start": 306, |
| "end": 335, |
| "text": "(Baroni and Zamparelli, 2010;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 336, |
| "end": 350, |
| "text": "Guevara, 2010;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 351, |
| "end": 371, |
| "text": "Coecke et al., 2010)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 778, |
| "end": 810, |
| "text": "Kartsaklis and Sadrzadeh (2013a)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we investigate whether metaphor, as a case of regular polysemy, warrants distinct treatment under a compositional distributional semantic framework. We propose a new approach to CDSMs, in which metaphorical meanings are distinct but structurally related to literal meanings. We then extend the generalizability of our approach by proposing a method to automatically learn metaphorical mappings as linear transformations in a CDSM. We focus on modeling adjective senses and evaluate our methods on a new data set of 8592 adjective-noun pairs annotated for metaphoricity, which we will make publicly available. Finally, we apply our models to classify unseen adjective-noun (AN) phrases as literal or metaphorical and obtain state-of-the-art performance in the metaphor identification task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Metaphors as Morphisms. The idea of metaphor as a systematic mapping has been formalized in the framework of category theory (Goguen, 1999; Kuhn and Frank, 1991) . In category theory, morphisms are transformations from one object to another that preserve some essential structure of the original object. Category theory provides a general formalism for analyzing relationships as morphisms in a wide range of systems (see Spivak (2014) ). Category theory has been used to formalize the CM hypothesis with applications to user interfaces, poetry, and information visualization (Kuhn and Frank, 1991; Goguen and Harrell, 2010; Goguen and Harrell, 2005) . Although these formal treatments of metaphors as morphisms are rigorous and wellformalized, they have been applied at a relatively limited scale. This is because this work does not suggest a straightforward and data-driven way to quantify semantic domains or morphisms, but rather focuses on the transformations and relations between semantic domains and morphisms, assuming some appropriate quantification has already been established. In contrast, our methods can learn representations of source-target domain mappings from corpus data, and so are inherently more scalable.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 139, |
| "text": "(Goguen, 1999;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 140, |
| "end": 161, |
| "text": "Kuhn and Frank, 1991)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 422, |
| "end": 435, |
| "text": "Spivak (2014)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 576, |
| "end": 598, |
| "text": "(Kuhn and Frank, 1991;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 599, |
| "end": 624, |
| "text": "Goguen and Harrell, 2010;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 625, |
| "end": 650, |
| "text": "Goguen and Harrell, 2005)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background & Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Compositional DSMs. Similar issues arose in modeling compositional semantics. Formal semantics has dealt with compositional meaning for decades, by using mathematical structures from abstract algebra, logic, and category theory (Montague, 1970; Partee, 1994; Lambek, 1999) . However, formal semantics requires manual crafting of features. The central insight of CDSMs is to model the composition of words as algebraic operations on their vector representations, as provided by a conventional DSM (Mitchell and Lapata, 2008) . Guevara (2010) and Baroni and Zamparelli (2010) were the first to treat adjectives and verbs differently from nouns. In their models, adjectives are represented by matrices that act on noun vectors. Adjective matrices can be learned using regression techniques. Other CDSMs have also been proposed and successfully applied to tasks such as sentiment analysis and paraphrase (Socher et al., 2011; Socher et al., 2012; Tsubaki et al., 2013; Turney, 2013) .", |
| "cite_spans": [ |
| { |
| "start": 228, |
| "end": 244, |
| "text": "(Montague, 1970;", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 245, |
| "end": 258, |
| "text": "Partee, 1994;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 259, |
| "end": 272, |
| "text": "Lambek, 1999)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 496, |
| "end": 523, |
| "text": "(Mitchell and Lapata, 2008)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 526, |
| "end": 540, |
| "text": "Guevara (2010)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 545, |
| "end": 573, |
| "text": "Baroni and Zamparelli (2010)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 900, |
| "end": 921, |
| "text": "(Socher et al., 2011;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 922, |
| "end": 942, |
| "text": "Socher et al., 2012;", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 943, |
| "end": 964, |
| "text": "Tsubaki et al., 2013;", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 965, |
| "end": 978, |
| "text": "Turney, 2013)", |
| "ref_id": "BIBREF60" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background & Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Handling Polysemy in CDSMs. Several researchers argue that terms with ambiguous senses can be handled by DSMs without any recourse to additional disambiguation steps, as long as contextual information is available (Boleda et al., 2012; Erk and Pad\u00f3, 2010; Pantel and Lin, 2002; Sch\u00fctze, 1998; Tsubaki et al., 2013) . conjecture that CDSMs might largely avoid problems handling adjectives with multiple senses because the matrices for adjectives implicitly incorporate contextual information. However, they do draw a distinction between two ways in which the meaning of a term can vary. Continuous polysemy-the subtle and continuous variations in meaning resulting from the different contexts in which a word appears-is relatively tractable, in their opinion. This contrasts with discrete homonymy-the association of a single term with completely independent meanings (e.g., light house vs. light work). Baroni et al. concede that homonymy is more difficult to handle in CDSMs. Unfortunately, they do not propose a definite way to determine whether any given variation in meaning is polysemy or homonymy, and offer no account of regular polysemy (i.e., metaphor and metonymy) or whether it would pose similar problems as homonymy for CDSMs.", |
| "cite_spans": [ |
| { |
| "start": 214, |
| "end": 235, |
| "text": "(Boleda et al., 2012;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 236, |
| "end": 255, |
| "text": "Erk and Pad\u00f3, 2010;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 256, |
| "end": 277, |
| "text": "Pantel and Lin, 2002;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 278, |
| "end": 292, |
| "text": "Sch\u00fctze, 1998;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 293, |
| "end": 314, |
| "text": "Tsubaki et al., 2013)", |
| "ref_id": "BIBREF56" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background & Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To handle the problematic case of homonymy, Kartsaklis and Sadrzadeh (2013b) adapt a clustering technique to disambiguate the senses of verbs, and then train separate tensors for each sense, using the previously mentioned CDSM framework of Coecke et al. (2010) . They found that prior disambiguation resulted in semantic similarity measures that correlated more closely with human judgments.", |
| "cite_spans": [ |
| { |
| "start": 240, |
| "end": 260, |
| "text": "Coecke et al. (2010)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background & Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In principle, metaphor, as a type of regular polysemy, is different from the sort of semantic ambiguity described above. General ambiguity or vagueness in meaning (e.g. bright light vs bright color) is generally context-dependent in an unsystematic manner. In contrast, in regular polysemy meaning transfer happens in a systematic way (e.g. bright light vs. bright idea), which can be explicitly modeled within a CDSM. The above CDSMs provide no account of such systematic polysemy, which is the gap this paper aims to fill.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background & Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Computational Work on Metaphor. There is now an extensive literature on statistical approaches to metaphor detection. The investigated methods include clustering (Birke and Sarkar, 2006; Shutova et al., 2010; ; topic modeling (Bethard et al., 2009; Heintz et al., 2013) ; topical structure and imageability analysis (Strzalkowski et al., 2013) ; semantic similarity graphs , and feature-based classifiers (Gedigian et al., 2006; Li and Sporleder, 2009; Turney et al., 2011; Dunn, 2013a; Dunn, 2013b; Hovy et al., 2013; Mohler et al., 2013; Neuman et al., 2013; Tsvetkov et al., 2013; Tsvetkov et al., 2014) . We refer readers to the survey by Shutova (2015) for a more thorough review.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 186, |
| "text": "(Birke and Sarkar, 2006;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 187, |
| "end": 208, |
| "text": "Shutova et al., 2010;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 226, |
| "end": 248, |
| "text": "(Bethard et al., 2009;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 249, |
| "end": 269, |
| "text": "Heintz et al., 2013)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 316, |
| "end": 343, |
| "text": "(Strzalkowski et al., 2013)", |
| "ref_id": "BIBREF55" |
| }, |
| { |
| "start": 405, |
| "end": 428, |
| "text": "(Gedigian et al., 2006;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 429, |
| "end": 452, |
| "text": "Li and Sporleder, 2009;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 453, |
| "end": 473, |
| "text": "Turney et al., 2011;", |
| "ref_id": "BIBREF59" |
| }, |
| { |
| "start": 474, |
| "end": 486, |
| "text": "Dunn, 2013a;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 487, |
| "end": 499, |
| "text": "Dunn, 2013b;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 500, |
| "end": 518, |
| "text": "Hovy et al., 2013;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 519, |
| "end": 539, |
| "text": "Mohler et al., 2013;", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 540, |
| "end": 560, |
| "text": "Neuman et al., 2013;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 561, |
| "end": 583, |
| "text": "Tsvetkov et al., 2013;", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 584, |
| "end": 606, |
| "text": "Tsvetkov et al., 2014)", |
| "ref_id": "BIBREF58" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background & Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Most relevant to the present work are approaches that attempt to identify whether adjective-noun phrases are metaphorical or literal. Krishnakumaran and Zhu (2007) use AN co-occurrence counts and WordNet hyponym/hypernym relations for this task. If the noun and its hyponyms/hypernyms do not occur frequently with the given adjective, then the AN phrase is labeled as metaphorical. Krishnaku-maran and Zhu's system achieves a precision of 0.67. Turney et al. (2011) classify verb and adjective phrases based on their level of concreteness or abstractness in relation to the noun they appear with. They learn concreteness rankings for words automatically (starting from a set of examples) and then search for expressions where a concrete adjective or verb is used with an abstract noun (e.g., dark humor is tagged as a metaphor; dark hair is not). They measure performance on a set of 100 phrases involving one of five adjectives, attaining an average accuracy of 0.79. Tsvetkov et al. (2014) train a random-forest classifier using several features, including abstractness and imageability rankings, WordNet supersenses, and DSM vectors. They report an accuracy of 0.81 on the Turney et al. 2011AN phrase set. They also introduce a new set of 200 AN phrases, on which they measure an F-score of 0.85.", |
| "cite_spans": [ |
| { |
| "start": 134, |
| "end": 163, |
| "text": "Krishnakumaran and Zhu (2007)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 969, |
| "end": 991, |
| "text": "Tsvetkov et al. (2014)", |
| "ref_id": "BIBREF58" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background & Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Corpus. We trained our DSMs from a corpus of 4.58 billion tokens. Our corpus construction procedure is modeled on that of Baroni and Zamparelli (2010) . The corpus consisted of a 2011 dump of English Wikipedia, the UKWaC (Baroni et al., 2009) , the BNC (BNC Consortium, 2007) , and the English Gigaword corpus (Graff et al., 2003) . The corpus was tokenized, lemmatized, and POStagged using the NLTK toolkit (Bird and Loper, 2004) for Python.", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 150, |
| "text": "Baroni and Zamparelli (2010)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 221, |
| "end": 242, |
| "text": "(Baroni et al., 2009)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 253, |
| "end": 275, |
| "text": "(BNC Consortium, 2007)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 310, |
| "end": 330, |
| "text": "(Graff et al., 2003)", |
| "ref_id": null |
| }, |
| { |
| "start": 408, |
| "end": 430, |
| "text": "(Bird and Loper, 2004)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Metaphor Annotations. We created an annotated dataset of 8592 AN phrases (3991 literal, 4601 metaphorical). Our choice of adjectives was inspired by the test set of Tsvetkov et al. (2014) , though our annotated dataset is considerably larger. We focused on 23 adjectives that can have both metaphorical and literal senses, and which function as source-domain words in relatively productive CMs: TEMPERATURE (cold, heated, icy, warm), LIGHT (bright, brilliant, dim), TEXTURE (rough, smooth, soft); SUBSTANCE (dense, heavy, solid), CLARITY (clean, clear, murky), TASTE (bitter, sour, sweet), STRENGTH (strong, weak), and DEPTH (deep, shallow). We extracted all AN phrases involving these adjectives that occur in our corpus at least 10 times. We filtered out all phrases that require wider context to establish their meaning or metaphoricity-e.g., bright side, weak point.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 187, |
| "text": "Tsvetkov et al. (2014)", |
| "ref_id": "BIBREF58" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The remaining phrases were annotated using a procedure based on Shutova et al. (2010) . Annotators were encouraged to rely on their own intuition of metaphor, but were provided with the following guidance:", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 85, |
| "text": "Shutova et al. (2010)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 For each phrase, establish the meaning of the adjective in the context of the phrase.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 Try to imagine a more basic meaning of this adjective in other contexts. Basic meanings tend to be: more concrete; related to embodied actions/perceptions/sensations; more precise; historically older/more \"original\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 If you can establish a basic meaning distinct from the meaning of the adjective in this context, it is likely to be used metaphorically.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "If requested, a randomly sampled sentence from the corpus that contained the phrase in question was also provided. The annotation was performed by one of the authors. The author's annotations were compared against those of a university graduate native English-speaking volunteer who was not involved in the research, on a sample of 500 phrases. Interannotator reliability (Cohen, 1960; Fleiss et al., 1969) was \u03ba = 0.80 (SE = .02). Our annotated data set is publicly available at http: //bit.ly/1TQ5czN", |
| "cite_spans": [ |
| { |
| "start": 372, |
| "end": 385, |
| "text": "(Cohen, 1960;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 386, |
| "end": 406, |
| "text": "Fleiss et al., 1969)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "4 Representing Metaphorical Senses in a Compositional DSM", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this section we test whether separate treatment of literal and metaphorical senses is justified in a CDSM framework. In that case, training adjective matrix representations on literal and metaphorical subsets separately may result in systematically improved phrase vector representations, despite each matrix making use of fewer training examples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our goal is to learn accurate vector representations for unseen adjective-noun (AN) phrases, where adjectives can take on metaphorical or literal senses. Our models build off the CDSM framework of Baroni and Zamparelli (2010) , as extended by Li et al. (2014) . Each adjective a is treated as a linear map from nouns to AN phrases:", |
| "cite_spans": [ |
| { |
| "start": 197, |
| "end": 225, |
| "text": "Baroni and Zamparelli (2010)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 243, |
| "end": 259, |
| "text": "Li et al. (2014)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "p = A a n,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where p is a vector for the phrase, n is a vector for the noun, and A a is a matrix for the adjective.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Contextual Variation Model. The traditional representations do not account for the differences in meaning of an adjective in literal vs metaphorical phrases. Their assumption is that the contextual variations in meaning that are encoded by literal and metaphorical senses may be subtle enough that they can be handled by a single catchall matrix per adjective, A BOTH(a) . In this model, every phrase i can be represented by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "p i = A BOTH(a) n i (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "regardless of whether a is used metaphorically or literally in i. This model has the advantage of simplicity and requires no information about whether an adjective is being used literally or metaphorically. In fact, to our knowledge, all previous literature has handled metaphor in this way.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Discrete Polysemy Model Alternatively, the metaphorical and literal senses of an adjective may be distinct enough that averaging the two senses together in a single adjective matrix produces representations that are not well-suited for either metaphorical or literal phrases. Thus, the literal-metaphorical distinction could be problematic for CDSMs in the way that suggested that homonyms are. Just as Kartsaklis and Sadrzadeh (2013a) solve this problem by representing each sense of a homonym by a different adjective matrix, we represent literal and metaphorical senses by different adjective matrices. Each literal phrase i is represented by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p i = A LIT(a) n i ,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where A LIT(a) is the literal matrix for adjective a.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Likewise, a metaphorical phrase is represented by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p i = A MET(a) n i ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where A MET(a) is the metaphorical matrix for a.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Learning. Given a data set of noun and phrase vectors D", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "(a) = {(n i , p i )} N i=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "for AN phrases involving adjective a extracted using a conventional DSM, our goal is to learn A D(a) . This can be treated as an optimization problem, of learning an estimate\u00c2 D(a) that minimizes a specified loss function. In the case of the squared error loss,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "L(A D(a) ) = i\u2208D(a) p i \u2212 A D(a) n i 2 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": ", the optimal solution can be found precisely using ordinary least-squares regression. However, this may result in overfitting because of the large number of parameters relative to the number of samples (i.e., phrases). Regularization parameters \u03bb = (\u03bb 1 , \u03bb 2 ) can be introduced to keep\u00c2 D(a) small:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "i\u2208D(a) p i \u2212\u00c2 D(a) n i 2 2 + R(\u03bb;\u00c2 D(a) ), where R(\u03bb;\u00c2 D ) = \u03bb 1 \u00c2 D 1 + \u03bb 2 \u00c2 D 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "This approach, known as elastic-net regression (Zou and Hastie, 2005) , produces better adjective matrices than unregularized regression (Li et al., 2014) . Note that the same procedure can be used to learn the adjective representations in both the Contextual Variation model and the Discrete Polysemy model by varying what phrases are included in the training set D(a). In the Contextual Variation model D(a) includes both metaphorical and literal phrases, while in the Discrete Polysemy model it includes only metaphorical phrases when learnin\u011d A MET(a) and testing on metaphorical phrases (and only literal phrases when learning\u00c2 LIT(a) and testing on literal phrases).", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 69, |
| "text": "(Zou and Hastie, 2005)", |
| "ref_id": "BIBREF63" |
| }, |
| { |
| "start": 137, |
| "end": 154, |
| "text": "(Li et al., 2014)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Extracting Noun & Phrase Vectors. Our approach for constructing term vector representations is similar to that of Dinu et al. (2013) . We first selected the 10K most frequent nouns, adjectives, and verbs to serve as context terms. We then constructed a co-occurrence matrix that recorded term-context co-occurrence within a symmetric 5-word context window of the 50K most frequent POS-tagged terms in the corpus. We then used these co-occurrences to compute the positive pointwise mutual information (PPMI) between every pair of terms, and collected these into a termterm matrix. Next, we reduced the dimensionality of this matrix to 100 dimensions using singularvalue decomposition. Additionally, we computed \"ground truth\" distributional vectors for all the annotated AN phrases in our data set by treating the phrases as single terms and computing their PPMI with the 50K single-word terms, and then projecting them onto the same 100-dimensional basis. Training Adjective Matrices. For each adjective a that we are testing, we split the phrases involving that adjective into two subsets, the literal (LIT) subset and the metaphorical (MET) subset. We then split the subsets into 10 folds, so that we do not train and test any matrices on the same phrases. For each fold k, we train three adjective matrices:\u00c2 MET(a) using all phrases from the MET set not in fold k;\u00c2 LIT(a) using all phrases from the LIT set not in fold k; and\u00c2 BOTH(a) using all the phrases from either subset not in fold k. Within each fold, we use nested cross-validation as out- Figure 1 : Reduction in error from training on targeted subset (MET/LIT) rather than on all phrases. lined in Li et al. (2014) to determine the regularization parameters for each regression problem.", |
| "cite_spans": [ |
| { |
| "start": 114, |
| "end": 132, |
| "text": "Dinu et al. (2013)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1663, |
| "end": 1679, |
| "text": "Li et al. (2014)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1553, |
| "end": 1561, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Evaluation. Our goal is to produce a vector prediction of each phrase that will be close to its ground truth distributional vector. Phrase vectors directly extracted from the corpus by treating the phrase as a single term are the gold standard for predicting human judgment and producing paraphrases (Dinu et al., 2013) , so we use these as our ground truth. The quality of the vector prediction for phrase i is measured using the cosine distance between the phrase's ground truth vector p i and the vector predictionp i :", |
| "cite_spans": [ |
| { |
| "start": 300, |
| "end": 319, |
| "text": "(Dinu et al., 2013)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluating Vector Representations", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "err(p i ) = 1 \u2212 cos(p i , p i ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluating Vector Representations", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We then analyze the benefit of training on a reduced subset by calculating a \"subset improvement\" (SI) score for the MET and LIT subsets of each adjective a. We define the SI for each subset D(a) \u2208 {LIT(a), MET(a)} as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluating Vector Representations", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "SI(D(a)) = 1 \u2212 i\u2208D(a) err(\u00c2 D(a) n i )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluating Vector Representations", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "i\u2208D(a) err(\u00c2 BOTH(a) n i ) Positive values of SI thus indicate improved performance when trained on a reduced subset compared to the full set of phrases. For example SI LIT(a) = 5% tells us that predicting the phrase vectors for LIT phrases of adjective a using the LIT matrix resulted in a 5% reduction in mean cosine error compared to predicting the phrase vectors using the BOTH matrix. Results. The results are summarized in Fig. 1 . Each point indicates the SI for a single adjective and for a single subset. Adjectives are grouped by source domain along the y-axis. Overall, almost every item shows a subset improvement; and, for every source domain, the majority of adjectives show a subset improvement.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 429, |
| "end": 435, |
| "text": "Fig. 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluating Vector Representations", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We analyzed per-adjective SI by fitting a linear mixed-effects model, with a fixed intercept, a fixed effect of test subset (MET vs. LIT), a random effect of source domain, and the maximal converging random effects structure (uncorrelated random intercepts and slopes) (Barr et al., 2013) . Training on a targeted subset improved performance by 4.4% \u00b1 0.009(SE) (p = .002). There was no evidence that this differed by test subset (i.e., metaphorical vs. literal senses, p = .35). The positive SI from training on a targeted subset suggests that metaphorical and literal uses of the same adjective are semantically distinct.", |
| "cite_spans": [ |
| { |
| "start": 269, |
| "end": 288, |
| "text": "(Barr et al., 2013)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluating Vector Representations", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Method. The results of the previous section suggest a straightforward classification rule: classify unseen phrase i involving adjective a as metaphorical if cos(p i ,\u00c2 MET(a) n i ) < cos(\u00c2 LIT(a) n i ). Otherwise, we classify it as literal.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metaphor Classification", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Evaluation. We test this method on our data set of 8593 annotated AN phrases using 10-fold cross validation. It is possible that our method's classification performance is not due to the compositional aspect of the model, but rather to some semantic coherence property among the nouns in the AN phrases that we are testing. To control for this possibility, we compare the performance of our method against four baselines. The first baseline, NOUN-NN, measures the cosine distance between the vector for the noun of the AN phrase being tested and the noun vectors of the nouns participating in an AN phrase in the training folds. The test phrase is then assigned the label of the AN phrase whose noun vector is nearest. PHRASE-NN proceeds similarly, but using the ground-truth phrase vectors for the test phrase and the training phrases. The test phrase is then assigned the label of the AN phrase whose vector is nearest. The baseline NOUN-CENT first computes the centroid of the noun vectors of the training phrases that are literal, and the centroid of the noun vectors of the training phrases that are metaphorical. It then assigns the test phrase the label of the centroid whose cosine distance from the test phrase's noun vector is smallest. PHRASE-CENT, proceeds similarly, but using phrase vectors. We measure performance against the manual annotations. Table 1 : Performance of the method of \u00a74.4 (MET-LIT) against various baselines.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1361, |
| "end": 1368, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Metaphor Classification", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "sults were superior to those of the baselines (Table 1) . These results are competitive with the state of the art and demonstrate the importance of compositionality in metaphor identification.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 46, |
| "end": 56, |
| "text": "(Table 1)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Metaphor Classification", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "One of the principal claims of the CM hypothesis is that CMs are productive: A CM (i.e., mapping) can generate endless new LMs (i.e., linguistic expressions). Cases where the LMs involve an adjective that has already been used metaphorically and for which we have annotated metaphorical and literal examples can be handled by the methods of \u00a74, but when the novel LM involves an adjective that has only been observed in literal usage, we need a more elaborate model. According to the CM hypothesis, an adjective's metaphorical meaning is a result of the action of a sourceto-target CM mapping on the adjective's literal sense. If so, then given an appropriate representation of this mapping it should be possible to infer the metaphorical sense of an adjective without ever seeing metaphorical exemplars-that is, using only the adjective's literal sense. Our next experiments seek to determine whether it is possible to represent and learn CM mappings as linear maps in distributional vector space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metaphors as Linear Transformations", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We model each CM mapping M from source to target domain as a linear transformation C M :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "A MET(a) n i \u2248 C M A LIT(a) n i", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Model", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We can apply a two-step regression to learn C M . First we apply elastic-net regression to learn the literal adjective matrix\u00c2 LIT(a) as in \u00a74.2. Then we can substitute this estimate into Eq. (4), and apply elastic-net regression to learn the\u0108 M that minimizes the regularized squared error loss:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "a\u2208M i\u2208D(a) p i \u2212\u0108 M\u00c2LIT(a i ) n i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "To learn C M in this regression problem, we can pool together and train on phrases from many different adjectives that participate in M.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We used a cross-validation scheme where we treated each adjective in a source domain as a fold in training the domain's metaphor transformation matrix. The nested cross-validation procedure we use to set regularization parameters \u03bb and evaluate performance requires at least 3 adjectives in a source domain, so we evaluate on the 6 source domain classes containing at least 3 adjectives. The total number of phrases for these 19 adjectives is 6987 (3659 metaphorical, 3328 literal).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Evaluation. We wish to test whether CM mappings learned from one set of adjectives are transferable to new adjectives for which metaphorical phrases are unseen. As in \u00a74, models were evaluated using cosine error compared to the ground truth phrase vector representation. Since our goal is to improve the vector representation of metaphorical phrases given no metaphorical annotations, we measure performance on the MET phrase subset for each adjective. We compare the performance of the transformed LIT matrix C M A LIT(a) against the performance of the original LIT matrix A LIT(a) by defining the metaphor transformation improvement (MTI) as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluating Vector Representations", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "M T I(a) = 1 \u2212 i\u2208MET err(C M\u00c2LIT(a) ) i\u2208MET err(\u00c2 LIT(a) )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluating Vector Representations", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": ".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluating Vector Representations", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Results. Per-adjective MTI was analyzed with a linear mixed-effects model, with a fixed intercept, a random effect of source domain, and random intercepts. Transforming the LIT matrix using the CM mapping matrix improved performance by 11.5% \u00b1 0.023(SE) (p < .001). On average, performance improved for 18 of 19 adjectives and for every source domain (p = .03, binomial test; Fig. 2 ). Thus, mapping structure is indeed shared across adjectives participating in the same CM.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 376, |
| "end": 382, |
| "text": "Fig. 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluating Vector Representations", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Method. Once again our results suggest a procedure for metaphor classification. This procedure can classify phrases involving adjectives without seeing any metaphorical annotations. For any unseen phrase i involving an adjective a i , we classify the phrase as metaphorical if", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metaphor Classification", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "cos(p i ,\u0108 M\u00c2LIT(a i ) n i ) < cos(p i ,\u00c2 LIT(a i ) n i ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metaphor Classification", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Otherwise, we classify it as literal. We used the same procedure as in \u00a74.2 to learn\u00c2 LIT(a i ) .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metaphor Classification", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Results. Our method achieved an F-score of 0.793 on the classification of phrases involving unseen adjectives. On this same set of phrases, the method of \u00a74.4 achieved an F-score of 0.838. Once again, the performance of our method was superior to the performance of the baselines (Table 2; the MET-LIT figures in Table 2 differ slightly from those in Table 1 because only 19 of 23 adjectives are tested). For comparison, we also include the classification performance using the MET-LIT method of \u00a74.4. While MET-LIT slightly outperforms TRANS-LIT, the latter has the benefit of not needing annotations for metaphorical phrases for the test adjective. Hence, our approach is generalizable to cases where such annotations are unavailable with only slight performance reduction.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 313, |
| "end": 320, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 351, |
| "end": 358, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Metaphor Classification", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Overall, our results show that taking metaphor into account has the potential to improve CDSMs and expand their domain of applicability. The findings of \u00a74 suggest that collapsing across metaphorical and literal uses may hurt accuracy of vector rep-resentations in CDSMs. While the method in \u00a74 depends on explicit annotations of metaphorical and literal senses, the method in \u00a75 provides a way to generalize these representations to adjectives for which metaphorical training data is unavailable, by showing that metaphorical mappings are transferable across adjectives from the same source domain. Note that an accurate matrix representation of the literal sense of each adjective is still required in the experimental setup of \u00a75. This particular choice of setup allowed a proof of concept of the hypothesis that metaphors function as cross-domain transformations, but in principle it would be desirable to learn transformations from a general BOTH matrix representation for any adjective in a source domain to its MET matrix representation. This would enable improved vector representations of metaphorical AN phrases without annotation for unseen adjectives.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The success of our models on the metaphor classification tasks demonstrates that there is information about metaphoricity of a phrase inherent in the composition of the meanings of its components. Notably, our results show that this metaphorical compositionality can be captured from corpus-derived distributional statistics. We also noticed some trends at the level of individual phrases. In particular, classification performance and vector accuracy tended to be lower for metaphorical phrases whose nouns are distributionally similar to nouns that tend to participate in literal phrases (e.g., reception is similar to foyer and refreshment in our corpus; warm reception is metaphorical while warm foyer is literal). Another area where classification accuracy is low is in phrases with low corpus occurrence frequency. The ground truth vectors for these phrases exhibit high sample variance and sparsity. Many such phrases sound paradoxical (e.g., bitter sweetness).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Our results could also inform debates within cognitive science. First, cognitive scientists debate whether words that are used both literally and figuratively (e.g., long road, long meeting) are best understood as having a single, abstract meaning that varies with context or two distinct but related meanings. For instance, some argue that domains like space, time, and number operate over a shared, generalized magnitude system, yet others maintain that our mental representation of time and number is distinct from our mental representation of space, yet inherited metaphorically from it (Winter et al., 2015) . Our results suggest that figurative and literal senses involve quite different patterns of use. This is statistical evidence that adjectives that are used metaphorically have distinct related senses, not a single abstract sense.", |
| "cite_spans": [ |
| { |
| "start": 591, |
| "end": 612, |
| "text": "(Winter et al., 2015)", |
| "ref_id": "BIBREF62" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Second, the Conceptual Metaphor Theory account hypothesizes that LMs are an outgrowth of metaphorical thought, which is in turn an outgrowth of embodied experiences that conflate source and target domains-experience structures thought, and thought structures language (Lakoff, 1993) . However, recent critics have argued for the opposite causal direction: Linguistic regularities may drive the mental mapping between source and target domains (Hutchinson and Louwerse, 2013; Casasanto, 2014; Hutchinson and Louwerse, 2014) . Our results show that, at least for AN pairs, the semantic structure of a source domain and its mapping to a metaphorical target domain are available in the distributional statistics of language itself. There may be no need, therefore, to invoke embodied experience to explain the prevalence of metaphorical thought in adult language users. A lifetime of experience with literal and metaphorical language may suffice.", |
| "cite_spans": [ |
| { |
| "start": 268, |
| "end": 282, |
| "text": "(Lakoff, 1993)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 443, |
| "end": 474, |
| "text": "(Hutchinson and Louwerse, 2013;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 475, |
| "end": 491, |
| "text": "Casasanto, 2014;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 492, |
| "end": 522, |
| "text": "Hutchinson and Louwerse, 2014)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We have shown that modeling metaphor explicitly within a CDSM can improve the resulting vector representations. According to our results, the systematicity of metaphor can be exploited to learn linear transformations that represent the action of metaphorical mappings across many different adjectives in the same semantic domain. Our classification results suggest that the compositional distributional semantics of a phrase can inform classification of the phrase for metaphoricity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Beyond improvements to the applications we presented, the principles underlying our methods also show potential for other tasks. For instance, the LIT and MET adjective matrices and the CM mapping matrix learned with our methods could be applied to improve automated paraphrasing of AN phrases. Our work is also directly extendable to other syntactic constructions. In the CDSM framework we apply, verbs would be represented as third-order tensors. Tractable and efficient methods for estimating these verb tensors are now available (Fried et al., 2015) . It may also be possible to extend the coverage of our system by using automated word-sense disambiguation to bootstrap annotations and therefore construct LIT and MET matrices in a minimally supervised fashion (Kartsaklis et al., 2013b) . Finally, it would be interesting to investigate modeling metaphorical mappings as nonlinear mappings within the deep learning framework.", |
| "cite_spans": [ |
| { |
| "start": 533, |
| "end": 553, |
| "text": "(Fried et al., 2015)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 766, |
| "end": 792, |
| "text": "(Kartsaklis et al., 2013b)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "+ R(\u03bb;\u0108 M ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work used the Extreme Science and Engineering Discovery Environment (XSEDE), which is supported by National Science Foundation grant number ACI-1053575. Ekaterina Shutova's research is supported by the Leverhulme Trust Early Career Fellowship.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Nouns are vectors, adjectives are matrices: Representing adjective-noun constructions in semantic space", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Zamparelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1183--1193", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Baroni and Roberto Zamparelli. 2010. Nouns are vectors, adjectives are matrices: Representing adjective-noun constructions in semantic space. In Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing, pages 1183-1193. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "The WaCky wide web: A collection of very large linguistically processed web-crawled corpora", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Bernardini", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ferraresi", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Zanchetta", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Language Resources and Evaluation", |
| "volume": "43", |
| "issue": "3", |
| "pages": "209--226", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Baroni, S. Bernardini, A. Ferraresi, and E. Zanchetta. 2009. The WaCky wide web: A collection of very large linguistically processed web-crawled corpora. Language Resources and Evaluation, 43(3):209-226.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Frege in space: A program of compositional distributional semantics. Linguistic Issues in Language Technology", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Raffaela", |
| "middle": [], |
| "last": "Bernardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Zamparelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Baroni, Raffaela Bernardi, and Roberto Zam- parelli. 2014. Frege in space: A program of compo- sitional distributional semantics. Linguistic Issues in Language Technology, 9.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Random effects structure for confirmatory hypothesis testing: Keep it maximal", |
| "authors": [ |
| { |
| "first": "Dale", |
| "middle": [ |
| "J" |
| ], |
| "last": "Barr", |
| "suffix": "" |
| }, |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Scheepers", |
| "suffix": "" |
| }, |
| { |
| "first": "Harry", |
| "middle": [ |
| "J" |
| ], |
| "last": "Tily", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Journal of Memory and Language", |
| "volume": "68", |
| "issue": "3", |
| "pages": "255--278", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dale J. Barr, Roger Levy, Christoph Scheepers, and Harry J. Tily. 2013. Random effects structure for confirmatory hypothesis testing: Keep it maximal. Journal of Memory and Language, 68(3):255-278.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Topic model analysis of metaphor frequency for psycholinguistic stimuli", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "Vicky", |
| "middle": [ |
| "Tzuyin" |
| ], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "H" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Workshop on Computational Approaches to Linguistic Creativity", |
| "volume": "", |
| "issue": "", |
| "pages": "9--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Bethard, Vicky Tzuyin Lai, and James H. Mar- tin. 2009. Topic model analysis of metaphor fre- quency for psycholinguistic stimuli. In Proceedings of the Workshop on Computational Approaches to Linguistic Creativity, pages 9-16. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "NLTK: The natural language toolkit", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Loper", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1--4", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Bird and Edward Loper. 2004. NLTK: The nat- ural language toolkit. In Proceedings of the 42nd Annual Meeting of the Association for Computa- tional Linguistics, pages 1-4.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A clustering approach for nearly unsupervised recognition of nonliteral language", |
| "authors": [ |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Birke", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Sarkar", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 11th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "329--336", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julia Birke and Anoop Sarkar. 2006. A clustering ap- proach for nearly unsupervised recognition of non- literal language. In Proceedings of the 11th Confer- ence of the European Chapter of the Association for Computational Linguistics, pages 329-336.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "British National Corpus, Version 3 BNC XML edition", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bnc Consortium", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "BNC Consortium. 2007. British National Corpus, Ver- sion 3 BNC XML edition.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "First-order vs. higherorder modification in distributional semantics", |
| "authors": [ |
| { |
| "first": "Gemma", |
| "middle": [], |
| "last": "Boleda", |
| "suffix": "" |
| }, |
| { |
| "first": "Eva", |
| "middle": [ |
| "Maria" |
| ], |
| "last": "Vecchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Miquel", |
| "middle": [], |
| "last": "Cornudella", |
| "suffix": "" |
| }, |
| { |
| "first": "Louise", |
| "middle": [], |
| "last": "Mcnally", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "1223--1233", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gemma Boleda, Eva Maria Vecchi, Miquel Cornudella, and Louise McNally. 2012. First-order vs. higher- order modification in distributional semantics. In Proceedings of the 2012 Joint Conference on Empir- ical Methods in Natural Language Processing and Computational Natural Language Learning, pages 1223-1233. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Metaphor in Educational Discourse. A&C Black", |
| "authors": [ |
| { |
| "first": "Lynne", |
| "middle": [], |
| "last": "Cameron", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lynne Cameron. 2003. Metaphor in Educational Dis- course. A&C Black, London.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Development of metaphorical thinking: The role of language", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Casasanto", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Language and the Creative Mind", |
| "volume": "", |
| "issue": "", |
| "pages": "3--18", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Casasanto. 2014. Development of metaphori- cal thinking: The role of language. In Mike Borkent, Barbara Dancygier, and Jennifer Hinnell, editors, Language and the Creative Mind, pages 3-18. CSLI Publications, Stanford.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Mathematical foundations for a compositional distributional model of meaning", |
| "authors": [ |
| { |
| "first": "Bob", |
| "middle": [], |
| "last": "Coecke", |
| "suffix": "" |
| }, |
| { |
| "first": "Mehrnoosh", |
| "middle": [], |
| "last": "Sadrzadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Linguistic Analysis (Lambek Festschrift)", |
| "volume": "", |
| "issue": "", |
| "pages": "345--384", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bob Coecke, Mehrnoosh Sadrzadeh, and Stephen Clark. 2010. Mathematical foundations for a com- positional distributional model of meaning. In Lin- guistic Analysis (Lambek Festschrift), pages 345- 384.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A coefficient of agreement for nominal scales. educational and psychosocial measurement", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| } |
| ], |
| "year": 1960, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Cohen. 1960. A coefficient of agreement for nominal scales. educational and psychosocial mea- surement.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "General estimation and evaluation of compositional distributional semantic models", |
| "authors": [ |
| { |
| "first": "Georgiana", |
| "middle": [], |
| "last": "Dinu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nghia The", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the ACL 2013 Workshop on Continuous Vector Space Models and their Compositionality (CVSC 2013)", |
| "volume": "", |
| "issue": "", |
| "pages": "50--58", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Georgiana Dinu, Nghia The Pham, and Marco Baroni. 2013. General estimation and evaluation of compo- sitional distributional semantic models. In Proceed- ings of the ACL 2013 Workshop on Continuous Vec- tor Space Models and their Compositionality (CVSC 2013), pages 50-58, East Stroudsburg, Pennsylva- nia. ACL.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Evaluating the premises and results of four metaphor identification systems", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Dunn", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Computational Linguistics and Intelligent Text Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "471--486", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan Dunn. 2013a. Evaluating the premises and results of four metaphor identification systems. In Computational Linguistics and Intelligent Text Pro- cessing, pages 471-486. Springer.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "What metaphor identification systems can tell us about metaphor-in-language", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Dunn", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the First Workshop on Metaphor in NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan Dunn. 2013b. What metaphor identification systems can tell us about metaphor-in-language. In Proceedings of the First Workshop on Metaphor in NLP, pages 1-10.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Exemplar-based models for word meaning in context", |
| "authors": [ |
| { |
| "first": "Katrin", |
| "middle": [], |
| "last": "Erk", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Pad\u00f3", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the ACL 2010 Conference Short Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "92--97", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katrin Erk and Sebastian Pad\u00f3. 2010. Exemplar-based models for word meaning in context. In Proceedings of the ACL 2010 Conference Short Papers, pages 92-97. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Large sample standard errors of kappa and weighted kappa", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [ |
| "L" |
| ], |
| "last": "Fleiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [ |
| "S" |
| ], |
| "last": "Everitt", |
| "suffix": "" |
| } |
| ], |
| "year": 1969, |
| "venue": "Psychological Bulletin", |
| "volume": "72", |
| "issue": "5", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph L. Fleiss, Jacob Cohen, and B.S. Everitt. 1969. Large sample standard errors of kappa and weighted kappa. Psychological Bulletin, 72(5):323.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Low-rank tensors for verbs in compositional distributional semantics", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Fried", |
| "suffix": "" |
| }, |
| { |
| "first": "Tamara", |
| "middle": [], |
| "last": "Polajnar", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Fried, Tamara Polajnar, and Stephen Clark. 2015. Low-rank tensors for verbs in compositional distributional semantics. In Proceedings of the 53nd Annual Meeting of the Association for Computa- tional Linguistics, Beijing.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Catching metaphors", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gedigian", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bryant", |
| "suffix": "" |
| }, |
| { |
| "first": "Srini", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Branimir", |
| "middle": [], |
| "last": "Ciric", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Third Workshop on Scalable Natural Language Understanding", |
| "volume": "", |
| "issue": "", |
| "pages": "41--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Gedigian, John Bryant, Srini Narayanan, and Bra- nimir Ciric. 2006. Catching metaphors. In Pro- ceedings of the Third Workshop on Scalable Natural Language Understanding, pages 41-48, New York. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "7 information visualisation and semiotic morphisms. Studies in Multidisciplinarity", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Joseph", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "Fox" |
| ], |
| "last": "Goguen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Harrell", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "83--97", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph A. Goguen and D. Fox Harrell. 2005. 7 infor- mation visualisation and semiotic morphisms. Stud- ies in Multidisciplinarity, 2:83-97.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Style: A computational and conceptual blending-based approach", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Joseph", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "Fox" |
| ], |
| "last": "Goguen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Harrell", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "The Structure of Style", |
| "volume": "", |
| "issue": "", |
| "pages": "291--316", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph A. Goguen and D. Fox Harrell. 2010. Style: A computational and conceptual blending-based ap- proach. In The Structure of Style, pages 291-316. Springer, New York.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "An introduction to algebraic semiotics, with application to user interface design", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Goguen", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Computation for metaphors, analogy, and agents", |
| "volume": "", |
| "issue": "", |
| "pages": "242--291", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph Goguen. 1999. An introduction to algebraic semiotics, with application to user interface design. In Computation for metaphors, analogy, and agents, pages 242-291. Springer.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A regression model of adjective-noun compositionality in distributional semantics", |
| "authors": [ |
| { |
| "first": "Emiliano", |
| "middle": [], |
| "last": "Guevara", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Workshop on GEometrical Models of Natural Language Semantics", |
| "volume": "", |
| "issue": "", |
| "pages": "33--37", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emiliano Guevara. 2010. A regression model of adjective-noun compositionality in distributional se- mantics. In Proceedings of the 2010 Workshop on GEometrical Models of Natural Language Seman- tics, pages 33-37. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Automatic extraction of linguistic metaphor with lda topic modeling", |
| "authors": [ |
| { |
| "first": "Ilana", |
| "middle": [], |
| "last": "Heintz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Gabbard", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahesh", |
| "middle": [], |
| "last": "Srinivasan", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Barner", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Donald", |
| "suffix": "" |
| }, |
| { |
| "first": "Marjorie", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Freedman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Weischedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the First Workshop on Metaphor in NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "58--66", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilana Heintz, Ryan Gabbard, Mahesh Srinivasan, David Barner, Donald S Black, Marjorie Freedman, and Ralph Weischedel. 2013. Automatic extraction of linguistic metaphor with lda topic modeling. In Pro- ceedings of the First Workshop on Metaphor in NLP, pages 58-66.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Identifying metaphorical word use with tree kernels", |
| "authors": [ |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Shashank", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujay", |
| "middle": [], |
| "last": "Kumar Jauhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Mrinmaya", |
| "middle": [], |
| "last": "Sachan", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartik", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Huiying", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Whitney", |
| "middle": [], |
| "last": "Sanders", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the First Workshop on Metaphor in NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "52--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dirk Hovy, Shashank Srivastava, Sujay Kumar Jauhar, Mrinmaya Sachan, Kartik Goyal, Huiying Li, Whit- ney Sanders, and Eduard Hovy. 2013. Identifying metaphorical word use with tree kernels. In Pro- ceedings of the First Workshop on Metaphor in NLP, pages 52-57.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Language statistics and individual differences in processing primary metaphors", |
| "authors": [ |
| { |
| "first": "Sterling", |
| "middle": [], |
| "last": "Hutchinson", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Louwerse", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Cognitive Linguistics", |
| "volume": "24", |
| "issue": "4", |
| "pages": "667--687", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sterling Hutchinson and Max Louwerse. 2013. Lan- guage statistics and individual differences in pro- cessing primary metaphors. Cognitive Linguistics, 24(4):667-687.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Language statistics explain the spatial-numerical association of response codes", |
| "authors": [ |
| { |
| "first": "Sterling", |
| "middle": [], |
| "last": "Hutchinson", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [ |
| "M" |
| ], |
| "last": "Louwerse", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Psychonomic Bulletin & Review", |
| "volume": "21", |
| "issue": "2", |
| "pages": "470--478", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sterling Hutchinson and Max M. Louwerse. 2014. Language statistics explain the spatial-numerical as- sociation of response codes. Psychonomic Bulletin & Review, 21(2):470-478.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Prior disambiguation of word tensors for constructing sentence vectors", |
| "authors": [ |
| { |
| "first": "Dimitri", |
| "middle": [], |
| "last": "Kartsaklis", |
| "suffix": "" |
| }, |
| { |
| "first": "Mehrnoosh", |
| "middle": [], |
| "last": "Sadrzadeh", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1590--1601", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dimitri Kartsaklis, Mehrnoosh Sadrzadeh, et al. 2013a. Prior disambiguation of word tensors for constructing sentence vectors. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1590-1601.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Separating disambiguation from composition in distributional semantics", |
| "authors": [ |
| { |
| "first": "Dimitri", |
| "middle": [], |
| "last": "Kartsaklis", |
| "suffix": "" |
| }, |
| { |
| "first": "Mehrnoosh", |
| "middle": [], |
| "last": "Sadrzadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Pulman", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "114--123", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dimitri Kartsaklis, Mehrnoosh Sadrzadeh, and Stephen Pulman. 2013b. Separating disambiguation from composition in distributional semantics. In Pro- ceedings of the 2013 Conference on Computational Natural Language Learning, pages 114-123.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Hunting elusive metaphors using lexical resources", |
| "authors": [ |
| { |
| "first": "Saisuresh", |
| "middle": [], |
| "last": "Krishnakumaran", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaojin", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the Workshop on Computational approaches to Figurative Language", |
| "volume": "", |
| "issue": "", |
| "pages": "13--20", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saisuresh Krishnakumaran and Xiaojin Zhu. 2007. Hunting elusive metaphors using lexical resources. In Proceedings of the Workshop on Computational approaches to Figurative Language, pages 13-20. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "A formalization of metaphors and image-schemas in user interfaces. In Cognitive and linguistic aspects of geographic space", |
| "authors": [ |
| { |
| "first": "Werner", |
| "middle": [], |
| "last": "Kuhn", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew U", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "419--434", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Werner Kuhn and Andrew U Frank. 1991. A formal- ization of metaphors and image-schemas in user in- terfaces. In Cognitive and linguistic aspects of geo- graphic space, pages 419-434. Springer.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Metaphors we live by", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Lakoff", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 1981, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Lakoff and Mark Johnson. 1981. Metaphors we live by. University of Chicago Press, Chicago.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Some empirical results about the nature of concepts", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Lakoff", |
| "suffix": "" |
| } |
| ], |
| "year": 1989, |
| "venue": "Mind & Language", |
| "volume": "4", |
| "issue": "1-2", |
| "pages": "103--129", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Lakoff. 1989. Some empirical results about the nature of concepts. Mind & Language, 4(1- 2):103-129.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "The contemporary theory of metaphor", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Lakoff", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Metaphor and Thought", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Lakoff. 1993. The contemporary theory of metaphor. In Andrew Ortony, editor, Metaphor and Thought. Cambridge University Press, Cambridge.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Type grammar revisited", |
| "authors": [ |
| { |
| "first": "Joachim", |
| "middle": [], |
| "last": "Lambek", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Logical aspects of computational linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joachim Lambek. 1999. Type grammar revisited. In Logical aspects of computational linguistics, pages 1-27. Springer, Berlin.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Classifier combination for contextual idiom detection without labelled data", |
| "authors": [ |
| { |
| "first": "Linlin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Caroline", |
| "middle": [], |
| "last": "Sporleder", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "315--323", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Linlin Li and Caroline Sporleder. 2009. Classifier combination for contextual idiom detection without labelled data. In Proceedings of the 2009 Confer- ence on Empirical Methods in Natural Language Processing: Volume 1-Volume 1, pages 315-323. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Using Gaussian mixture models to detect figurative language in context", |
| "authors": [ |
| { |
| "first": "Linlin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Caroline", |
| "middle": [], |
| "last": "Sporleder", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "297--300", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Linlin Li and Caroline Sporleder. 2010. Using Gaus- sian mixture models to detect figurative language in context. In Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Lin- guistics, pages 297-300. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Topic models for word sense disambiguation and token-based idiom detection", |
| "authors": [ |
| { |
| "first": "Linlin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| }, |
| { |
| "first": "Caroline", |
| "middle": [], |
| "last": "Sporleder", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1138--1147", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Linlin Li, Benjamin Roth, and Caroline Sporleder. 2010. Topic models for word sense disambiguation and token-based idiom detection. In Proceedings of the 48th Annual Meeting of the Association for Com- putational Linguistics, pages 1138-1147. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Improving the lexical function composition model with pathwise optimized elastic-net regression", |
| "authors": [ |
| { |
| "first": "Jiming", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgiana", |
| "middle": [], |
| "last": "Dinu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "434--442", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiming Li, Marco Baroni, and Georgiana Dinu. 2014. Improving the lexical function composition model with pathwise optimized elastic-net regression. In Proceedings of the 14th Conference of the European Chapter of the Association for Computational Lin- guistics, pages 434-442.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Vector-based models of semantic composition", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "ACL-08: HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "236--244", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeff Mitchell and Mirella Lapata. 2008. Vector-based models of semantic composition. In ACL-08: HLT, pages 236-244.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Semantic signatures for example-based linguistic metaphor detection", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Mohler", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Bracewell", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Hinote", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Tomlinson", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the First Workshop on Metaphor in NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "27--35", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Mohler, David Bracewell, David Hinote, and Marc Tomlinson. 2013. Semantic signatures for example-based linguistic metaphor detection. In Proceedings of the First Workshop on Metaphor in NLP, pages 27-35.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "English as a formal language", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Montague", |
| "suffix": "" |
| } |
| ], |
| "year": 1970, |
| "venue": "Linguaggi nella Societ\u00e0 e nella Tecnica. Edizioni di Comunit\u00e1", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Montague. 1970. English as a formal lan- guage. In B Visentini and et al, editors, Linguaggi nella Societ\u00e0 e nella Tecnica. Edizioni di Comunit\u00e1, Milan.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Metaphor identification in large texts corpora", |
| "authors": [ |
| { |
| "first": "Yair", |
| "middle": [], |
| "last": "Neuman", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Assaf", |
| "suffix": "" |
| }, |
| { |
| "first": "Yohai", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Last", |
| "suffix": "" |
| }, |
| { |
| "first": "Shlomo", |
| "middle": [], |
| "last": "Argamon", |
| "suffix": "" |
| }, |
| { |
| "first": "Newton", |
| "middle": [], |
| "last": "Howard", |
| "suffix": "" |
| }, |
| { |
| "first": "Ophir", |
| "middle": [], |
| "last": "Frieder", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "PLoS ONE", |
| "volume": "8", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yair Neuman, Dan Assaf, Yohai Cohen, Mark Last, Shlomo Argamon, Newton Howard, and Ophir Frieder. 2013. Metaphor identification in large texts corpora. PLoS ONE, 8:e62343.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Discovering word senses from text", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Pantel", |
| "suffix": "" |
| }, |
| { |
| "first": "Dekang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the Eighth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "613--619", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Pantel and Dekang Lin. 2002. Discovering word senses from text. In Proceedings of the Eighth ACM SIGKDD International Conference on Knowl- edge Discovery and Data Mining, pages 613-619. ACM.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Lexical semantics and compositionality", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Barbara", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Partee", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Invitation to Cognitive Science 2nd Edition, Part I: Language", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barbara H. Partee. 1994. Lexical semantics and com- positionality. In Lila Gleitman and Mark Liberman, editors, Invitation to Cognitive Science 2nd Edition, Part I: Language. MIT Press, Cambridge, Mass., USA.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Automatic word sense discrimination", |
| "authors": [ |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Computational Linguistics", |
| "volume": "24", |
| "issue": "1", |
| "pages": "97--123", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hinrich Sch\u00fctze. 1998. Automatic word sense dis- crimination. Computational Linguistics, 24(1):97- 123.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Metaphor identification using verb and noun clustering", |
| "authors": [ |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "" |
| }, |
| { |
| "first": "Lin", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 23rd International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1002--1010", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ekaterina Shutova, Lin Sun, and Anna Korhonen. 2010. Metaphor identification using verb and noun clustering. In Proceedings of the 23rd International Conference on Computational Linguistics, pages 1002-1010. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Design and evaluation of metaphor processing systems", |
| "authors": [ |
| { |
| "first": "Ekatrina", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ekatrina Shutova. 2015. Design and evaluation of metaphor processing systems. volume Forthcoming.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Semi-supervised recursive autoencoders for predicting sentiment distributions", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "H" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "151--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Jeffrey Pennington, Eric H. Huang, Andrew Y. Ng, and Christopher D. Manning. 2011. Semi-supervised recursive autoencoders for predict- ing sentiment distributions. In Proceedings of the Conference on Empirical Methods in Natural Lan- guage Processing, pages 151-161. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Semantic compositionality through recursive matrix-vector spaces", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Brody", |
| "middle": [], |
| "last": "Huval", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "1201--1211", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Brody Huval, Christopher D. Man- ning, and Andrew Y. Ng. 2012. Semantic composi- tionality through recursive matrix-vector spaces. In Proceedings of the 2012 Joint Conference on Empir- ical Methods in Natural Language Processing and Computational Natural Language Learning, pages 1201-1211. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Category Theory for the Sciences", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [ |
| "I" |
| ], |
| "last": "Spivak", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David I. Spivak. 2014. Category Theory for the Sci- ences. MIT Press, Cambridge, Mass., USA.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Unsupervised recognition of literal and non-literal use of idiomatic expressions", |
| "authors": [ |
| { |
| "first": "Caroline", |
| "middle": [], |
| "last": "Sporleder", |
| "suffix": "" |
| }, |
| { |
| "first": "Linlin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 12th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "754--762", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Caroline Sporleder and Linlin Li. 2009. Unsupervised recognition of literal and non-literal use of idiomatic expressions. In Proceedings of the 12th Conference of the European Chapter of the Association for Com- putational Linguistics, pages 754-762. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "A method for linguistic metaphor identification: From MIP to MIPVU", |
| "authors": [ |
| { |
| "first": "Gerard", |
| "middle": [ |
| "J" |
| ], |
| "last": "Steen", |
| "suffix": "" |
| }, |
| { |
| "first": "Aletta", |
| "middle": [ |
| "G" |
| ], |
| "last": "Dorst", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "Berenike" |
| ], |
| "last": "Herrmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Kaal", |
| "suffix": "" |
| }, |
| { |
| "first": "Tina", |
| "middle": [], |
| "last": "Krennmayr", |
| "suffix": "" |
| }, |
| { |
| "first": "Trijntje", |
| "middle": [], |
| "last": "Pasma", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "14", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gerard J. Steen, Aletta G. Dorst, J. Berenike Herrmann, Anna Kaal, Tina Krennmayr, and Trijntje Pasma. 2010. A method for linguistic metaphor identifica- tion: From MIP to MIPVU, volume 14. John Ben- jamins Publishing, Amsterdam/Philadelphia.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Robust extraction of metaphors from novel data", |
| "authors": [ |
| { |
| "first": "Tomek", |
| "middle": [], |
| "last": "Strzalkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [ |
| "A" |
| ], |
| "last": "Broadwell", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Taylor", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurie", |
| "middle": [], |
| "last": "Feldman", |
| "suffix": "" |
| }, |
| { |
| "first": "Boris", |
| "middle": [], |
| "last": "Yamrom", |
| "suffix": "" |
| }, |
| { |
| "first": "Samira", |
| "middle": [], |
| "last": "Shaikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kit", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Umit", |
| "middle": [], |
| "last": "Boz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ignacio", |
| "middle": [], |
| "last": "Cases", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Elliot", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the First Workshop on Metaphor in NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "67--76", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomek Strzalkowski, George A. Broadwell, Sarah Tay- lor, Laurie Feldman, Boris Yamrom, Samira Shaikh, Ting Liu, Kit Cho, Umit Boz, Ignacio Cases, and Kyle Elliot. 2013. Robust extraction of metaphors from novel data. In Proceedings of the First Work- shop on Metaphor in NLP, pages 67-76, Atlanta, Georgia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "Modeling and learning semantic co-compositionality through prototype projections and neural networks", |
| "authors": [ |
| { |
| "first": "Masashi", |
| "middle": [], |
| "last": "Tsubaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| }, |
| { |
| "first": "Masashi", |
| "middle": [], |
| "last": "Shimbo", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuji", |
| "middle": [], |
| "last": "Matsumoto", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "The 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "130--140", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Masashi Tsubaki, Kevin Duh, Masashi Shimbo, and Yuji Matsumoto. 2013. Modeling and learning se- mantic co-compositionality through prototype pro- jections and neural networks. In The 2013 Con- ference on Empirical Methods in Natural Language Processing, pages 130-140.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Cross-lingual metaphor detection using common semantic features", |
| "authors": [ |
| { |
| "first": "Yulia", |
| "middle": [], |
| "last": "Tsvetkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Mukomel", |
| "suffix": "" |
| }, |
| { |
| "first": "Anatole", |
| "middle": [], |
| "last": "Gershman", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yulia Tsvetkov, Elena Mukomel, and Anatole Gersh- man. 2013. Cross-lingual metaphor detection using common semantic features.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Metaphor detection with cross-lingual model transfer", |
| "authors": [ |
| { |
| "first": "Yulia", |
| "middle": [], |
| "last": "Tsvetkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonid", |
| "middle": [], |
| "last": "Boytsov", |
| "suffix": "" |
| }, |
| { |
| "first": "Anatole", |
| "middle": [], |
| "last": "Gershman", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Nyberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yulia Tsvetkov, Leonid Boytsov, Anatole Gershman, Eric Nyberg, and Chris Dyer. 2014. Metaphor de- tection with cross-lingual model transfer. In Pro- ceedings of the Annual Meeting of the Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "Literal and metaphorical sense identification through concrete and abstract context", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Yair", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Neuman", |
| "suffix": "" |
| }, |
| { |
| "first": "Yohai", |
| "middle": [], |
| "last": "Assaf", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 Conference on the Empirical Methods in Natural Language Processing, EMNLP '11", |
| "volume": "", |
| "issue": "", |
| "pages": "680--690", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter D. Turney, Yair Neuman, Dan Assaf, and Yohai Cohen. 2011. Literal and metaphorical sense identification through concrete and abstract con- text. In Proceedings of the 2011 Conference on the Empirical Methods in Natural Language Process- ing, EMNLP '11, pages 680-690, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "Distributional semantics beyond words: supervised learning of analogy and paraphrase", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Transactions of the Association for Computational Linguistics (TACL)", |
| "volume": "1", |
| "issue": "", |
| "pages": "353--366", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter D. Turney. 2013. Distributional semantics be- yond words: supervised learning of analogy and paraphrase. Transactions of the Association for Computational Linguistics (TACL), 1:353-366.", |
| "links": null |
| }, |
| "BIBREF61": { |
| "ref_id": "b61", |
| "title": "Computational exploration of metaphor comprehension processes", |
| "authors": [ |
| { |
| "first": "Akira", |
| "middle": [], |
| "last": "Utsumi", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 28th Annual Meeting of the Cognitive Science Society (CogSci2006)", |
| "volume": "", |
| "issue": "", |
| "pages": "2281--2286", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Akira Utsumi. 2006. Computational exploration of metaphor comprehension processes. In Proceedings of the 28th Annual Meeting of the Cognitive Science Society (CogSci2006), pages 2281-2286.", |
| "links": null |
| }, |
| "BIBREF62": { |
| "ref_id": "b62", |
| "title": "Of magnitudes and metaphors: Explaining cognitive interactions between space, time, and number", |
| "authors": [ |
| { |
| "first": "Bodo", |
| "middle": [], |
| "last": "Winter", |
| "suffix": "" |
| }, |
| { |
| "first": "Tyler", |
| "middle": [], |
| "last": "Marghetis", |
| "suffix": "" |
| }, |
| { |
| "first": "Teenie", |
| "middle": [], |
| "last": "Matlock", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Cortex", |
| "volume": "64", |
| "issue": "", |
| "pages": "209--224", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bodo Winter, Tyler Marghetis, and Teenie Matlock. 2015. Of magnitudes and metaphors: Explain- ing cognitive interactions between space, time, and number. Cortex, 64:209-224.", |
| "links": null |
| }, |
| "BIBREF63": { |
| "ref_id": "b63", |
| "title": "Regularization and variable selection via the elastic net", |
| "authors": [ |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Zou", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Hastie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Journal of the Royal Statistical Society: Series B (Statistical Methodology)", |
| "volume": "67", |
| "issue": "2", |
| "pages": "301--320", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hui Zou and Trevor Hastie. 2005. Regularization and variable selection via the elastic net. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 67(2):301-320.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Results. Our classification method achieved a held-out F-score of 0.817, recall of 0.793, precision of 0.842, and accuracy of 0.809. These re-", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF1": { |
| "text": "", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>: Performance of method of \u00a75.4 (TRANS-</td></tr><tr><td>LIT) against method of \u00a74.4 (MET-LIT) and vari-</td></tr><tr><td>ous baselines.</td></tr></table>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |