| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:42:55.770565Z" |
| }, |
| "title": "ALBERT-BiLSTM for Sequential Metaphor Detection", |
| "authors": [ |
| { |
| "first": "Shuqun", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Information Retrieval Laboratory", |
| "institution": "Dalian University of Technology", |
| "location": { |
| "addrLine": "{1397023717,jjwind" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Jingjie", |
| "middle": [], |
| "last": "Zeng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Information Retrieval Laboratory", |
| "institution": "Dalian University of Technology", |
| "location": { |
| "addrLine": "{1397023717,jjwind" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Jinhui", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Information Retrieval Laboratory", |
| "institution": "Dalian University of Technology", |
| "location": { |
| "addrLine": "{1397023717,jjwind" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Information Retrieval Laboratory", |
| "institution": "Dalian University of Technology", |
| "location": { |
| "addrLine": "{1397023717,jjwind" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Information Retrieval Laboratory", |
| "institution": "Dalian University of Technology", |
| "location": { |
| "addrLine": "{1397023717,jjwind" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hongfei", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Information Retrieval Laboratory", |
| "institution": "Dalian University of Technology", |
| "location": { |
| "addrLine": "{1397023717,jjwind" |
| } |
| }, |
| "email": "hflin@dlut.edu.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In our daily life, metaphor is a common way of expression. To understand the meaning of a metaphor, we should recognize the metaphor words which play important roles. In the metaphor detection task, we design a sequence labeling model based on ALBERT-LSTM-softmax. By applying this model, we carry out a lot of experiments and compare the experimental results with different processing methods, such as with different input sentences and tokens, or the methods with CRF and softmax. Then, some tricks are adopted to improve the experimental results. Finally, our model achieves a 0.707 F1-score for the all POS subtask and a 0.728 F1-score for the verb subtask on the TOEFL dataset.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In our daily life, metaphor is a common way of expression. To understand the meaning of a metaphor, we should recognize the metaphor words which play important roles. In the metaphor detection task, we design a sequence labeling model based on ALBERT-LSTM-softmax. By applying this model, we carry out a lot of experiments and compare the experimental results with different processing methods, such as with different input sentences and tokens, or the methods with CRF and softmax. Then, some tricks are adopted to improve the experimental results. Finally, our model achieves a 0.707 F1-score for the all POS subtask and a 0.728 F1-score for the verb subtask on the TOEFL dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "As a common rhetorical device, we often use metaphors to express our feelings and ideas vividly and concisely in our daily life. Detecting metaphors in texts is of great significance for analyzing the meaning and polarity of sentences. It can also be used to generate sentences that are more suitable for human expression, and promote the development of chat robots, machine translation and other fields.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Metaphor detection generally recognizes metaphorical words or phrases from the metaphorical sentence, such as \"she is a woman with a stone heart\", in which \"stone\" is a metaphorical word modified \"heart\". However, the task of metaphor detection is very challenging. Firstly, metaphor detection is a sequence labeling task, and every word in a sentence needs to be classified. Secondly, the boundaries between metaphors and non metaphors are sometimes vague. Moreover, due to the different identities of authors, some metaphorical words involve knowledge in specific fields and are difficult to recognize directly (Tsvetkov et al., 2014) . The traditional lexicon based method cannot cover all possible words occurred in metaphors. It is difficult to recognize metaphors when certain words are outof-vocabulary. Although the traditional machine learning method needs to extract features manually (Heintz et al., 2013) , its performance is still insufficient.While with the further development of language model, different kinds of end-to-end pre-trained models almost dominate the field of natural language processing, and also improve the prediction accuracy of various tasks to a higher level. Hence, in this paper we use pre-trained models to deal with metaphor detection task.", |
| "cite_spans": [ |
| { |
| "start": 613, |
| "end": 636, |
| "text": "(Tsvetkov et al., 2014)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 895, |
| "end": 916, |
| "text": "(Heintz et al., 2013)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The purpose of this metaphorical shared task is to identify the whole words and verbs in given sentences. In this paper, we design an ALBERT-BiLSTM structure to recognize metaphorical words in TOEFL dataset. Firstly, we conduct an experimental comparison on the form of input sentence, and then select the form of inputting the single sentence directly. Secondly, we compare the application of BERT on this sequence labeling problem, and extract the input form of the first part after the BPE word segmentation of BERT. Finally, the effect of conditional random field (CRF) and softmax with class weights in the output layer is compared and the result shows that softmax with class weights is better. At the same time, we also adopt some tricks in the training process, including semantic merge and loss with class weight. The final result in the test set achieves a 0.707 F1-score for the all POS subtask, and a 0.728 F1-score for the verb subtask on TOEFL dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "At present, researchers in the field of natural language processing have made a lot of effort in metaphor detection task. used unsupervised learning to detect metaphors, and applied the syntactically perceived distribution word vectors. Gong et al. (2017) used metaphorical language detection as a method to explore the composition of word vectors, and calculated cosine distance to distinguish metaphor from non-metaphor: words that are out of context in sentences may be metaphorical. Gao et al. (2018) proposed a model to connect the expression of Glove and Elmo for solving the sequence labeling task, which is also transferred to the following metaphor task. Guti\u00e9rrez et al. (2016) used the flexibility of word vectors to study metaphor and its possibility of modeling in semantic space. Mao et al. (2019) designed an end-to-end model based on Glove and Elmo, which could identify metaphors conveniently.", |
| "cite_spans": [ |
| { |
| "start": 237, |
| "end": 255, |
| "text": "Gong et al. (2017)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 487, |
| "end": 504, |
| "text": "Gao et al. (2018)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 664, |
| "end": 687, |
| "text": "Guti\u00e9rrez et al. (2016)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 794, |
| "end": 811, |
| "text": "Mao et al. (2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For metaphor often contains emotions, some researchers tended to carry on emotion analysis on metaphors. Veale (2012) constructed an lexicon based model for analyzing emotions of metaphors. Kozareva (2013) proposed a new method, which integrated the trigger factors of cognition, emotion and perception.", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 117, |
| "text": "Veale (2012)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 190, |
| "end": 205, |
| "text": "Kozareva (2013)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Verb metaphor recognition is also an important subtask of metaphor recognition. Jia and Yu (2008) used conditional random fields(CRF) model and maximum entropy(ME) model to recognize verb metaphor, and they pointed out that there were no mature syntactic and semantic tools for metaphor analysis in Chinese. Beigman Klebanov et al. (2016) studied the effectiveness of semantic generalization and classification in capturing the rules of verb behavior, and tried to analyze their metaphors from the orthographic words unigrams.", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 97, |
| "text": "Jia and Yu (2008)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "These studies also provided some guidance to our work. For example, the word vector concatenation in LSTM is similar to RNN_HG (Mao et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 127, |
| "end": 145, |
| "text": "(Mao et al., 2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The dataset of this metaphorical shared task includes two kinds: VUA (Steen, 2010) and TOEFL (Klebanov et al., 2018) . This paper mainly conducts experiments on TOEFL data. TOEFL dataset contains 180 articles written by non-native English speakers in the TOEFL test, and 60 articles in the test set. Each article is divided into several parts by sentence. At the same time, the corresponding examination questions of each article are provided, and there are 8 kinds of questions. The details of the dataset are as follows in table 1.", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 116, |
| "text": "(Klebanov et al., 2018)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task definition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We make statistics on the sentence length distribution in the data set, and the following is shown by the box chart. It can be seen that the sentence length of training set is longer than that of test set, but most of them are distributed between 0 and 100, no more than 350 tokens. It is suitable for BERT model, because the maximum sentence length that BERT can support is 512.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task definition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "This shared task subtask is divided into all POS recognition and verb recognition. It also provides some tokens' ID in sentences of the test set, and finally submits the recognition result corresponding to token ID. Final ranking and results are reported by Leong et al. (2020) .", |
| "cite_spans": [ |
| { |
| "start": 258, |
| "end": 277, |
| "text": "Leong et al. (2020)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task definition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this section, we aim to introduce the method conducted on TOEFL dataset in this shared task. We use 'BERT+BiLSTM+softmax' as the baseline model, in which BERT (Devlin et al., 2019) is a pretrained language model proposed by Google in 2018, and BiLSTM is a bidirectional Recurrent Neural Network. The details of our method are described below. ", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 183, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Our data preprocessing method mainly includes two parts: data alignment and data augmentation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data processing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In data alignment part, we associate each word with its label in the sentence to transform the task into a sequential labeling task. In data augmentation part, we introduce context information and topic information of the sentences to expand the training data. There are three forms of our processed data:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data processing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "(1) single sentence;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data processing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "(2) the form of \"sentence pair\": considering that some metaphors are related to the context, we process the sentence into \"sentence pair\" form;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data processing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "(3) the form of \"sentence-prompt pair\": similar to form (2), we convert sentences into sentence pair, but we use prompt information instead of context information.The specific form is shown in table 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data processing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Input Label single sentence B L sentence pair A+B L sentence-prompt pair B+P L Table 2 : Three different data input format. B is a sentence to be recognized and L is the label of B; A is the previous sentence of B in the text; P is the prompt of the text to which the sentence B belongs given in the TOEFL dataset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 79, |
| "end": 86, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Format", |
| "sec_num": null |
| }, |
| { |
| "text": "The reason for this is that we believe the prompt information of the sentence will influence the prediction results of the model. In order to find the best form of data, we train the baseline model on three forms of data respectively. The results is shown in table 3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Format", |
| "sec_num": null |
| }, |
| { |
| "text": "F1 single sentence 0.687 sentence pair 0.673 sentence-prompt pair 0.665 The results show that the data format (1) performs best. After observing the dataset, we believe that the poor performance of data format (2) is due to the fact that the metaphor contained in the second sentence is not closely related to the first sentence. Additional input leads to the increased difficulty in model training. And the reason for the poor performance of data format (3) is that the sentence is less related to the given prompt. In conclusion, metaphors are more related to the local information in the sentence, and we use data format (1) as the input of our method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Format", |
| "sec_num": null |
| }, |
| { |
| "text": "In addition, we find that some sentences in TOEFL data are mainly written by people from non-native English speaking countries, and there are many spelling errors. So we try to use the SpellChecker package of Python to correct the spelling of words, the F1-score of the whole tokens in cross-validation before and after correction are 0.687 and 0.681 respectively. We initially thought word correction may be a useful method. However, the results show that the corrected data is not as good as expected, so we skip this step.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Format", |
| "sec_num": null |
| }, |
| { |
| "text": "The target of this evaluation is to identify metaphorical words in sentences, and we regard this task as a sequence labeling task. Our model consists of three layers: the pre-trained model layer, the contextual layer and the classification layer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In pre-trained model layer, we use BERT for sequence labeling task. We find that the word segmentation algorithm BPE will divide the input words into smaller semantic units, i.e. subword, which leads to that the length of output sequence is greater than the length of input. To keep the length of the input and output in the same way, we propose three model input structures: (1) only the first subword of word is taken as input; (2) the input is unchanged, and only the first embedding of each word in output is taken as the representation of the current word;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "(3) the input is unchanged, and the embeddings of a word in output are merged into one embedding by convolutional neural networks which is taken as the representation of the current word. The results are as shown in table 4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The results show that the structure (1) performances best, so we use only the first subword as the input of each word. We think the reason for the poor performance of structures (2) and (3) is Input F1 First segmentation 0.687 First vector 0.674 Aggregate vector 0.681 Table 4 : The result of the three different word vector representation methods, where we use softmax as the classification layer.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 269, |
| "end": 276, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Our Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "that the provided TOEFL dataset is small and easy to be affected by input noise. The first subword is often the main part of a word, which can better express the semantic of the word compared to the rest subwords. Non-first subwords preserved by structure (2) and structure (3) will increase the input length of BERT, which brings noise while training, and makes it more difficult to learn from a small dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In contextual layer, we use BiLSTM to get the context representation of the word based on the output embedding of BERT. In classification layer, we compare the performance of CRF and softmax. The cross-validation F1-score of the whole tokens are 0.671 for CRF and 0.687 for softmax. The results show that the softmax is better than the CRF model. We believe the reason is that there is no hard relation between the metaphor words and other words, so the constraint of CRF does not work well.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Finally, we adopt ALBERT+BiLSTM+softmax as our model. As a new pre-trained model released by Google, the performance of ALBERT-xxLarge-v1(ALBERT for short) (Lan et al., 2019) on natural language understanding task is better than BERT. Since the output dimension of ALBERT model is as high as 4096 dimensions, we just concatenate the first 300 dimension output embedding of ALBERT with the output embedding of BiLSTM. Then let the merged representation go through a full connection layer to get the probability distribution. Finally, the probability distribution is classified in the softmax layer. The reason for concatenating two parts of embedding is that we hope our model can predict by combining the context meaning and the word meaning. Table 5 shows that the concatenation method used performs better.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 743, |
| "end": 750, |
| "text": "Table 5", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Our Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In this subsections, we will introduce some useful tricks used in this evaluation. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tricks", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "ALBERT has 12 layers in total. It is generally believed that each layer of language model learns different features of the text, so we make two attempts on the representation vectors for different layers: (1) we concatenate the average output of the last four layers as the final output;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic merge", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "(2) we weighted sum the output of all 12 layers as the final output. The final online results show that method (1) is better. We believe that this is because the lower level of the language model is more inclined to learn the syntactic features of the text, while the higher level is more inclined to learn the semantic features of the text (Jawahar et al., 2019) . The task of metaphor recognition is more challenging for the proposed model to understand the semantics. The addition of lower level feature representation will introduce noise information instead.", |
| "cite_spans": [ |
| { |
| "start": 341, |
| "end": 363, |
| "text": "(Jawahar et al., 2019)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic merge", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "Due to the small proportion of metaphorical words in sentences, we consider to increase the loss value of positive metaphorical samples to balance the number difference between positive and negative samples. We try the weight value of positive sample loss between 0.8 and 4, based on the results we find that when weight value of positive samples is 2, we can get the best result. The specific hyper-parameters of the model are as follows: ALBERT's learning rate is 1e-5, and weight decay is 0.01; BiLSTM has one layer, and the learning rate is 2e-3, hidden units are 256, dropout rate is 0.5; the optimizer is Adam, batch size is 2, early stopping is used. The loss weights corresponding to the positive and negative classes are set to 2 and 1 respectively. The results of our final model on the test sets are as shown in Table 6 shows that our model performs well on the TOEFL dataset, and we also tests the results of the model on the VUA dataset. The results show that the proposed model in this paper can achieve good results on both datasets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 823, |
| "end": 830, |
| "text": "Table 6", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Loss with class weight", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "In this paper, we propose a method with AL-BERT+BiLSTM+softmax to identify metaphor words in the sentence. We extract text features through ALBERT's learning ability, and use BiL-STM to get contextual representation, then get the final prediction results with softmax layers. We also try several data preprocessing methods and utilize three tricks to improve the performance of our proposed model. Besides, we analyze and explain the results of each method according to the characteristics of the metaphor detection task. The experimental results show the effectiveness of our method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Semantic classifications for detection of verb metaphors", |
| "authors": [ |
| { |
| "first": "Chee Wee", |
| "middle": [], |
| "last": "Beata Beigman Klebanov", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "Dario" |
| ], |
| "last": "Leong", |
| "suffix": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Gutierrez", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Flor", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "101--106", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-2017" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Beata Beigman Klebanov, Chee Wee Leong, E. Dario Gutierrez, Ekaterina Shutova, and Michael Flor. 2016. Semantic classifications for detection of verb metaphors. In Proceedings of the 54th Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers), pages 101-106, Berlin, Germany. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural metaphor detection in context", |
| "authors": [ |
| { |
| "first": "Ge", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Eunsol", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "607--613", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1060" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ge Gao, Eunsol Choi, Yejin Choi, and Luke Zettle- moyer. 2018. Neural metaphor detection in context. In Proceedings of the 2018 Conference on Empiri- cal Methods in Natural Language Processing, pages 607-613, Brussels, Belgium. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Geometry of compositionality", |
| "authors": [ |
| { |
| "first": "Hongyu", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| }, |
| { |
| "first": "Suma", |
| "middle": [], |
| "last": "Bhat", |
| "suffix": "" |
| }, |
| { |
| "first": "Pramod", |
| "middle": [], |
| "last": "Viswanath", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "3202--3208", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hongyu Gong, Suma Bhat, and Pramod Viswanath. 2017. Geometry of compositionality. In Proceed- ings of the Thirty-First AAAI Conference on Artifi- cial Intelligence, February 4-9, 2017, San Francisco, California, USA, pages 3202-3208. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Literal and metaphorical senses in compositional distributional semantic models", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Guti\u00e9rrez", |
| "suffix": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "" |
| }, |
| { |
| "first": "Tyler", |
| "middle": [], |
| "last": "Marghetis", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Bergen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "183--193", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1018" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "E.Dario Guti\u00e9rrez, Ekaterina Shutova, Tyler Marghetis, and Benjamin Bergen. 2016. Literal and metaphor- ical senses in compositional distributional semantic models. In Proceedings of the 54th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 183-193, Berlin, Germany. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Automatic extraction of linguistic metaphors with lda topic modeling", |
| "authors": [ |
| { |
| "first": "Ilana", |
| "middle": [], |
| "last": "Heintz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Gabbard", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahesh", |
| "middle": [ |
| "P" |
| ], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Dave", |
| "middle": [], |
| "last": "Barner", |
| "suffix": "" |
| }, |
| { |
| "first": "Donald", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "Majorie", |
| "middle": [], |
| "last": "Friedman", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [ |
| "M" |
| ], |
| "last": "Weischedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilana Heintz, Ryan Gabbard, Mahesh P. Srivastava, Dave Barner, Donald Black, Majorie Friedman, and Ralph M. Weischedel. 2013. Automatic extraction of linguistic metaphors with lda topic modeling.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "What does BERT learn about the structure of language?", |
| "authors": [ |
| { |
| "first": "Ganesh", |
| "middle": [], |
| "last": "Jawahar", |
| "suffix": "" |
| }, |
| { |
| "first": "Beno\u00eet", |
| "middle": [], |
| "last": "Sagot", |
| "suffix": "" |
| }, |
| { |
| "first": "Djam\u00e9", |
| "middle": [], |
| "last": "Seddah", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019", |
| "volume": "1", |
| "issue": "", |
| "pages": "3651--3657", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/p19-1356" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ganesh Jawahar, Beno\u00eet Sagot, and Djam\u00e9 Seddah. 2019. What does BERT learn about the structure of language? In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28-August 2, 2019, Vol- ume 1: Long Papers, pages 3651-3657. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Unsupervised Chinese verb metaphor recognition based on selectional preferences", |
| "authors": [ |
| { |
| "first": "Yuxiang", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Shiwen", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 22nd Pacific Asia Conference on Language, Information and Computation", |
| "volume": "", |
| "issue": "", |
| "pages": "207--214", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuxiang Jia and Shiwen Yu. 2008. Unsupervised Chi- nese verb metaphor recognition based on selectional preferences. In Proceedings of the 22nd Pacific Asia Conference on Language, Information and Compu- tation, pages 207-214, The University of the Philip- pines Visayas Cebu College, Cebu City, Philippines. De La Salle University, Manila, Philippines.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A corpus of non-native written english annotated for metaphor", |
| "authors": [ |
| { |
| "first": "Chee Wee", |
| "middle": [], |
| "last": "Beata Beigman Klebanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Leong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Flor", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/n18-2014" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Beata Beigman Klebanov, Chee Wee Leong, and Michael Flor. 2018. A corpus of non-native writ- ten english annotated for metaphor. In Proceedings of the 2018 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT, New", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Association for Computational Linguistics", |
| "authors": [ |
| { |
| "first": "Louisiana", |
| "middle": [], |
| "last": "Orleans", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Usa", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "86--91", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Orleans, Louisiana, USA, June 1-6, 2018, Volume 2 (Short Papers), pages 86-91. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Multilingual affect polarity and valence prediction in metaphor-rich texts", |
| "authors": [ |
| { |
| "first": "Zornitsa", |
| "middle": [], |
| "last": "Kozareva", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "682--691", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zornitsa Kozareva. 2013. Multilingual affect polarity and valence prediction in metaphor-rich texts. In Proceedings of the 51st Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 682-691, Sofia, Bulgaria. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Albert: A lite bert for self-supervised learning of language representations", |
| "authors": [ |
| { |
| "first": "Zhenzhong", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingda", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learn- ing of language representations.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A report on the 2020 vua and toefl metaphor detection shared task", |
| "authors": [ |
| { |
| "first": "Beata", |
| "middle": [ |
| "Beigman" |
| ], |
| "last": "Chee Wee Leong", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Klebanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Egon", |
| "middle": [], |
| "last": "Hamill", |
| "suffix": "" |
| }, |
| { |
| "first": "Rutuja", |
| "middle": [], |
| "last": "Stemle", |
| "suffix": "" |
| }, |
| { |
| "first": "Xianyang", |
| "middle": [], |
| "last": "Ubale", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Second Workshop on Figurative Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chee Wee Leong, Beata Beigman Klebanov, Chris Hamill, Egon Stemle, Rutuja Ubale, and Xianyang Chen. 2020. A report on the 2020 vua and toefl metaphor detection shared task. In Proceedings of the Second Workshop on Figurative Language Pro- cessing, Seattle, WA.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Endto-end sequential metaphor identification inspired by linguistic theories", |
| "authors": [ |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenghua", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Guerin", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3888--3898", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1378" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rui Mao, Chenghua Lin, and Frank Guerin. 2019. End- to-end sequential metaphor identification inspired by linguistic theories. In Proceedings of the 57th An- nual Meeting of the Association for Computational Linguistics, pages 3888-3898, Florence, Italy. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Black holes and white rabbits: Metaphor identification with visual features", |
| "authors": [ |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Maillard", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "160--170", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N16-1020" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ekaterina Shutova, Douwe Kiela, and Jean Maillard. 2016. Black holes and white rabbits: Metaphor iden- tification with visual features. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, pages 160-170, San Diego, California. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A method for linguistic metaphor identification: From mip to mipvu", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gerard", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Steen", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gerard J. Steen. 2010. A method for linguistic metaphor identification: From mip to mipvu.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Metaphor detection with cross-lingual model transfer", |
| "authors": [ |
| { |
| "first": "Yulia", |
| "middle": [], |
| "last": "Tsvetkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonid", |
| "middle": [], |
| "last": "Boytsov", |
| "suffix": "" |
| }, |
| { |
| "first": "Anatole", |
| "middle": [], |
| "last": "Gershman", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Nyberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics, ACL 2014", |
| "volume": "1", |
| "issue": "", |
| "pages": "248--258", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/p14-1024" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yulia Tsvetkov, Leonid Boytsov, Anatole Gershman, Eric Nyberg, and Chris Dyer. 2014. Metaphor detec- tion with cross-lingual model transfer. In Proceed- ings of the 52nd Annual Meeting of the Association for Computational Linguistics, ACL 2014, June 22- 27, 2014, Baltimore, MD, USA, Volume 1: Long Pa- pers, pages 248-258. The Association for Computer Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "A context-sensitive, multi-faceted model of lexico-conceptual affect", |
| "authors": [ |
| { |
| "first": "Tony", |
| "middle": [], |
| "last": "Veale", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "75--79", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tony Veale. 2012. A context-sensitive, multi-faceted model of lexico-conceptual affect. In Proceedings of the 50th Annual Meeting of the Association for Com- putational Linguistics (Volume 2: Short Papers), pages 75-79, Jeju Island, Korea. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Sentence length distribution of train set." |
| }, |
| "FIGREF1": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Sentence length distribution of test set." |
| }, |
| "FIGREF2": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "The architecture of our method." |
| }, |
| "TABREF0": { |
| "html": null, |
| "content": "<table><tr><td>Train</td><td>180</td><td>2741</td><td>45.5</td><td>0.03356</td></tr><tr><td>Test</td><td>60</td><td>968</td><td>22.9</td><td>/</td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "text": "Number of articles Total number of sentences Average sentence length Proportion of positive samples" |
| }, |
| "TABREF1": { |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "text": "The details of the data set." |
| }, |
| "TABREF2": { |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "text": "Three different data input format preprocessing methods with baseline model." |
| }, |
| "TABREF4": { |
| "html": null, |
| "content": "<table><tr><td>: The result of the three different concatenation</td></tr><tr><td>method. 300-d means concatenating LSTM output and</td></tr><tr><td>the first 300 dimensions of ALBERT output as linear</td></tr><tr><td>layer input; 0-d means taking only LSTM output as lin-</td></tr><tr><td>ear layer input; Linear mapping means mapping AL-</td></tr><tr><td>BERT output through a linear layer to 300 dimensions</td></tr><tr><td>and concatenating it with LSTM output as linear layer</td></tr><tr><td>input.</td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "text": "" |
| }, |
| "TABREF5": { |
| "html": null, |
| "content": "<table><tr><td/><td/><td>:</td></tr><tr><td/><td colspan=\"2\">ALLPOS VERB</td></tr><tr><td>TOEFL</td><td>0.707</td><td>0.728</td></tr><tr><td>VUA</td><td>0.712</td><td>0.755</td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "text": "" |
| }, |
| "TABREF6": { |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "text": "The F1-score of final model on TOEFL and VUA test sets." |
| } |
| } |
| } |
| } |