| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:15:50.620630Z" |
| }, |
| "title": "Buhscitu at SemEval-2020 Task 7: Assessing Humour in Edited News Headlines using Hand-Crafted Features and Online Knowledge Bases", |
| "authors": [ |
| { |
| "first": "Kristian", |
| "middle": [ |
| "N\u00f8rgaard" |
| ], |
| "last": "Jensen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IT University of Copenhagen", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Nicolaj", |
| "middle": [ |
| "Filrup" |
| ], |
| "last": "Rasmussen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IT University of Copenhagen", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Thai", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IT University of Copenhagen", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Placenti", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IT University of Copenhagen", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Barbara", |
| "middle": [], |
| "last": "Plank", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IT University of Copenhagen", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes our system to assess humour intensity in edited news headlines as part of a participation in the 7th task of SemEval-2020 on \"Humor, Emphasis and Sentiment\". Various factors need to be accounted for in order to assess the funniness of an edited headline. We propose an architecture that uses hand-crafted features, knowledge bases and a language model to understand humour, and combines them in a regression model. Our system outperforms two baselines. In general, automatic humour assessment remains a difficult task.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes our system to assess humour intensity in edited news headlines as part of a participation in the 7th task of SemEval-2020 on \"Humor, Emphasis and Sentiment\". Various factors need to be accounted for in order to assess the funniness of an edited headline. We propose an architecture that uses hand-crafted features, knowledge bases and a language model to understand humour, and combines them in a regression model. Our system outperforms two baselines. In general, automatic humour assessment remains a difficult task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Humour aims at generating amusement and laughter and can for this reason be considered one of the features enabling the creation of relationships in the interactions between humans. Understanding humour requires factual knowledge, context comprehension and-arguably-intelligence. Multiple factors play a role in the definition of humour, such as geographical location, culture, level of education and many others. This obviously makes the task of humour detection very hard for machines and artificially intelligent systems. In recent years, researchers operating in the field of computational linguistics have started to look into the topic, and a lot of progress has been made since the seminal paper by Mihalcea and Strapparava (2005) . However, the quality of data sets leaves many questions unanswered, mainly because they are made of single punchlines or because sentences are divided into binary categories. Hossain et al. (2019) makes a remarkable effort on creating a data set of edited headlines where each headline is assigned a score representing the intensity of humour of that headline. This innovative data set enables researchers to conduct studies on a more granular level and may unlock novel techniques to get closer to a more efficient and successful computational model of humour. In this paper we propose an architecture that accounts for multiple factors that we believe play an important role in detecting the intensity of humour in a headline. In order to analyse the sentences, we include hand-crafted features extracted from the sentence itself and enable the system to look for the meaning of unknown objects using NELL (Never-Ending Language Learning) (Mitchell et al., 2015) . This paper is a description of a system providing a solution to the SemEval2020 Task 7 (Hossain et al., 2020) , which ranked 22 nd out of 49 teams.", |
| "cite_spans": [ |
| { |
| "start": 706, |
| "end": 737, |
| "text": "Mihalcea and Strapparava (2005)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 915, |
| "end": 936, |
| "text": "Hossain et al. (2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1681, |
| "end": 1704, |
| "text": "(Mitchell et al., 2015)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1794, |
| "end": 1816, |
| "text": "(Hossain et al., 2020)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The task is comprised of two sub-tasks. The first task is a regression task aimed at predicting the humour intensity of an edited headline. The second one is a classification task in which it is required to select the funnier headline out of the two provided. Our main focus was on the first sub-task, as predicting the humour intensity of the two headlines would imply establishing which of the two has the higher score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Due to its complexity and the prerequisite for a deep understanding of humour, previous research contributions towards automatic humour recognition have been made in selected aspects of humour. Mihalcea and Strapparava (2005) introduced a binary classification task in humour recognition, using humour-specific features such as alliteration, antonyms and adult slang in conjunction with traditional text classification models: Naive Bayes and SVM. Due to feasibility, the work focused solely on short sentences, one-liners, news headlines and proverbs. In recent years with the emergence of Deep Learning, the usage of Convolutional Neural Networks and Highway Networks for humour classification tasks with a similar scope focused on puns, one-liners and short jokes was presented in Chen and Soo (2018) . Another way to find jokes is on different social media platforms. Weller and Seppi (2019) used data scraped from Reddit to assess whether a joke is funny or not. They demonstrated the effectiveness of the transformer architecture for humour classification. Purandare and Litman (2006) classified the spoken turns of the TV-show FRIENDS into humour and non-humour classes. They did so by employing the ADTree algorithm on lexical, prosody and speaker features.", |
| "cite_spans": [ |
| { |
| "start": 194, |
| "end": 225, |
| "text": "Mihalcea and Strapparava (2005)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 784, |
| "end": 803, |
| "text": "Chen and Soo (2018)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 872, |
| "end": 895, |
| "text": "Weller and Seppi (2019)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1063, |
| "end": 1090, |
| "text": "Purandare and Litman (2006)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Another related work dedicated to the effort to assimilate external knowledge is the study reported in Yang and Mitchell (2017) . The work introduced an approach to leverage external knowledge bases, such as NELL (Mitchell et al., 2015) and WordNet (Miller, 1995) (a lexical database), in order to integrate the background knowledge and enhance the learning on LSTM.", |
| "cite_spans": [ |
| { |
| "start": 103, |
| "end": 127, |
| "text": "Yang and Mitchell (2017)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 213, |
| "end": 236, |
| "text": "(Mitchell et al., 2015)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Finally, with the extensive work done in Hossain et al. (2019) on humour generation, the goal of the study was to generate a carefully curated dataset of news headlines with simple edits, based on robust generation strategies that emphasise free form over traditional jokes with a strong template. This facilitates further research into the shared tasks described and performed in this report.", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 62, |
| "text": "Hossain et al. (2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The data set consists of micro-edits on headlines: one word has been replaced by another word, e.g. \"How Trump Just Made America (Pilates) Less Safe\". Five Mechanical Turks are asked to assign a score between 0 \u2212 3 to each headline (0: not funny, 1: slightly funny, 2: moderately funny, to 3: funny) (Hossain et al., 2019) . The overall score of the headline is then the average of those five scores. Similarly to Hossain et al. 2019, we find the scores to have a correlation with the headline length -measured as number of tokens present in it -and the relative position of the replaced word within the headline. The humour increases if the edit happens toward the end of the headline, as can be seen in ", |
| "cite_spans": [ |
| { |
| "start": 300, |
| "end": 322, |
| "text": "(Hossain et al., 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Analysis", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this section we outline the structure of our system and go into details on the different components. The proposed system consists of three encoders which handle three different types of inputs. Sections 4.1 to 4.3 explain the inputs and how they are handled in each of the three encoders. Section 4.4 outlines how the results from each of the three encoders are combined and processed. Section 4.5 goes through the training parameters and extra tricks we use to get more performance from our model. The overall structure of the model is shown in Figure 2 . Section 4.6 explores further improvements we have developed after the official submission deadline.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 549, |
| "end": 557, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The word encoder handles representations of both the replaced and the replacement words in the edited headline. The encoder first encodes each of the words using a pre-trained neural probabilistic language model (NNLM) (Bengio et al., 2003) . For each of the two words it processes the representation using a Feed Forward Neural Network (FFNN) that consists of three layers (See appendix A). The NNLM and the FFNN weights are the same for each of the two words, and thus it works as a simple Siamese network (Chopra et al., 2005) . After both of the words have been processed the representations are concatenated before proceeding in the neural network.", |
| "cite_spans": [ |
| { |
| "start": 219, |
| "end": 240, |
| "text": "(Bengio et al., 2003)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 508, |
| "end": 529, |
| "text": "(Chopra et al., 2005)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Encoder", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Feature Encoder Knowledge Encoder The feature encoder takes four features that encode humour specific information from the headlines. Each feature helps the model to better understand the concepts behind humour and helps outline the strategies used by the annotators. The features are processed using a 2 layer FFNN (See appendix A).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Encoder", |
| "sec_num": null |
| }, |
| { |
| "text": "y[0, 3] w replaced w replacement", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Encoder", |
| "sec_num": null |
| }, |
| { |
| "text": "The first feature encodes the relative position of the replaced word. The position index is normalised by the maximum index to provide a number between 0 \u2212 1. It informs the system of whether the headline functions as a punchline or not.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Relative Position", |
| "sec_num": null |
| }, |
| { |
| "text": "The second feature encodes the length of the headline, as shown in fig. 1 . The length is normalised by the maximum length in the data set, thus providing a number between 0 \u2212 1. Hossain et al. (2019) uncovered a relation between the length of the headline and the score, showing that the longer headlines had the possibility of also scoring higher. This makes it a promising feature to include.", |
| "cite_spans": [ |
| { |
| "start": 179, |
| "end": 200, |
| "text": "Hossain et al. (2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 67, |
| "end": 73, |
| "text": "fig. 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentence Length", |
| "sec_num": null |
| }, |
| { |
| "text": "Phonetic Distance For the third feature the replacement and the replaced words are transcribed into phonemes and the Levenshtein distance between them is calculated, as shown in table 1. The distance is normalised by the maximum phoneme length. This feature is used to encode information regarding the strategy uncovered by Hossain et al. (2019) , about connections between the replaced and the replacement word. Here the annotators often replaced a word with either a similar sounding word or a semantically different word.", |
| "cite_spans": [ |
| { |
| "start": 324, |
| "end": 345, |
| "text": "Hossain et al. (2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Length", |
| "sec_num": null |
| }, |
| { |
| "text": "Relative Distance The fourth and last feature encodes the cosine distance between the replaced and replacement word embeddings. FastText embeddings trained on Wikipedia 2017, UMBC webbase corpus and statmt.org news data (Mikolov et al., 2018) are used. Another of the strategies found by Hossain et al. (2019) is the insertion of incongruity. We hypothesise that finding the similarity between the two words (replaced and replacement) is to some degree related to incongruity. ", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 242, |
| "text": "(Mikolov et al., 2018)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 288, |
| "end": 309, |
| "text": "Hossain et al. (2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Length", |
| "sec_num": null |
| }, |
| { |
| "text": "Levenshtein distance 'Syria' \u2192 'S IH1 R IY0 AH0' 'cereal' \u2192 'S IH1 R IY0 AH0 L' 0.1176 'coup' \u2192 'K UW1' 'ignorance' \u2192 'IH1 G N ER0 AH0 N S' 0.9474", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Replaced word Replacement word", |
| "sec_num": null |
| }, |
| { |
| "text": "The knowledge encoder is searching the headline for any known entities occurring in the NELL database or hypernym in WordNet. Table 3 lists some example headlines that contain entities such as named entities that we have highlighted in blue, which we believe would benefit from relations and its implication through their common parent defined by NELL. In contrast to a lexical database, NELL features entities that are obtained by reading the web, thus filling the gap in comprehension of concepts that are timeand event based. Even though NELL is a large network, it alone is insufficient in covering a significant part of each headline (see Each noun is converted to an IS-A relation by adding its first occurring hypernym as its generalisation. With the integration of WordNet into NELL, our coverage of entities in each headline improves significantly (see table 2).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 126, |
| "end": 133, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Knowledge Encoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "For each entity in the NELL-WordNet vocabulary, we have created an embedded representation using a Neural Association Model (NAM) presented by Liu et al. (2016) 1 . The model looks up each word in the headline and checks for a occurrence in the NELL-WordNet vocabulary. If it does exist, it will be represented by the corresponding embedding, and if it does not exist it is represented by a zero vector. The found entity embeddings and zero vectors are then summed together before they are processed in a 2-layer FFNN (See appendix A).", |
| "cite_spans": [ |
| { |
| "start": 143, |
| "end": 160, |
| "text": "Liu et al. (2016)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge Encoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Breitbart News 29th Most Trafficked Site in America , overtakes PornHub and ESPN. combines Barack Obama threatens to upstage Donald Trump 's Europe trip as he visits Germany. acid Delhi smog chokes India capital with air pollution 10 times worse than Beijing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Original Headline Substitute", |
| "sec_num": null |
| }, |
| { |
| "text": "curry Elon Musk has just blasted the world 's most powerful rocket into space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Original Headline Substitute", |
| "sec_num": null |
| }, |
| { |
| "text": "wall Table 3 : Example of headlines from the training data that would not have turned out as fun without the necessary background knowledge. Red denotes a replaced word, and blue denotes a named entity that would benefit from the integration of a knowledge base like NELL.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 5, |
| "end": 12, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Original Headline Substitute", |
| "sec_num": null |
| }, |
| { |
| "text": "A simple linear regression is applied to the concatenation of the three encoders output described above. It predicts an output in the range [0, 3] . Several output layer configurations were tested but none outperformed this simple regression.", |
| "cite_spans": [ |
| { |
| "start": 140, |
| "end": 143, |
| "text": "[0,", |
| "ref_id": null |
| }, |
| { |
| "start": 144, |
| "end": 146, |
| "text": "3]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Output", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We used Keras (Chollet and others, 2015) with the TensorFlow backend (Abadi et al., 2015) . The pretrained models, NNLM 2 and Albert 3 , were provided by the TensorFlow Hub module. For the phonetic feature we used the \"g2p: English Grapheme To Phoneme Conversion\" (Park and Kim, 2019) library.", |
| "cite_spans": [ |
| { |
| "start": 14, |
| "end": 40, |
| "text": "(Chollet and others, 2015)", |
| "ref_id": null |
| }, |
| { |
| "start": 69, |
| "end": 89, |
| "text": "(Abadi et al., 2015)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental setup", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "The Adam optimiser (Kingma and Ba, 2015) was used with a step decay learning rate schedule. The learning rate was initialised to 0.005 and drops by a factor of 2 every 10 epochs. The model used for the official submission was trained for 25 epochs where it converges. For the subsequent hyperparameter tuning we used the newly created Keras-Tuner library, which is built specifically for Keras.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental setup", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "After the official submission, the model has been further improved in two ways. First, we dedicated time for hyperparameter tuning. The Hyperband optimisation method (Li et al., 2018) was used for hyperparameter tuning. Hyperband is a Bandit-based approach to the hyperparameter tuning problem. The algorithm extends the SuccessiveHalving algorithm by using it as a subroutine. It does so to automatically select the number of configurations to try given a finite budget. The resulting model can be seen in appendix B. The tuning was done over all parameters in the network, and ran for 8 Hyperband iterations. We tested multiple layers in each of the encoders, different layer sizes, the amount of dropout and the activation function. It was found that adding extra layers to the output layer did not result in an increase in performance, thus the output was kept as is. The resulting score can be seen in table 4.", |
| "cite_spans": [ |
| { |
| "start": 166, |
| "end": 183, |
| "text": "(Li et al., 2018)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Improvements after official submission", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "In the official model configuration only the word to be replaced and the replacement word is used as context of the headline itself. The original idea was to use the NNLM part of the word encoder to encode the entire sentence. However, it was found during preliminary experiments that this did not improve performance compared to encoding just the words. In order to address this an extended configuration is made with a separate context encoder based on an Albert model (Lan et al., 2019) . The encoder takes the entire headline except the replaced word and creates context embeddings for it. The contextual embeddings are created by running the headline through the Albert model and extracting the pooled output. The embeddings created by the Albert model are processed using a 2 layer FFNN to scale down the representation and let the model process it before concatenating it with the other encoder results. The new model architecture can be seen in fig. 4 .", |
| "cite_spans": [ |
| { |
| "start": 471, |
| "end": 489, |
| "text": "(Lan et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 953, |
| "end": 959, |
| "text": "fig. 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Improvements after official submission", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "In this section we outline the results gathered from experiments with both the official model and the alternative model created after the official submission deadline. As main evaluation metric we use Root Mean Square Error (RMSE), which is defined as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "n i=1 (\u0177 i \u2212 y i ) 2 /n (1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Test Score", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": null |
| }, |
| { |
| "text": "Official Baseline (Mean) 0.57471 Linear Regression 0.57361 Our official submission 0.55115 HP Tuned official model 0.54376 Model w/ Albert context 0.54341 Table 4 : Scores on the test set", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 155, |
| "end": 162, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": null |
| }, |
| { |
| "text": "Official Results The proposed model achieves an RMSE of 0.5511 on the test set. This is also our final and official score in the competition. It gives a ranking of 22 out of 49 teams. The official baseline given for the task is the overall mean funniness grade in the training set, as reported in table 4. To create our own baseline we have set up a Linear Regression model that uses our hand-crafted features. As shown in table 4 the linear regression model reaches only around baseline performance. Our official full model presented in this paper outperforms both baselines.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": null |
| }, |
| { |
| "text": "Improvements We present two additional runs as introduced in section 4.6. Results on the test set are shown in the two last rows in table 4. We note that both tuning the system on dev and the integration of context from the headline (the context encoder with Albert embeddings) pushes performance further (after the official submission), thereby confirming our hypothesis that more headline context is helpful.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": null |
| }, |
| { |
| "text": "In this section we show results of an ablation study of our official model and discuss limitations of it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Ablation Studies Figure 3 shows the performance on the training and development sets for each of the ablated features of the submitted full model. For a more detailed overview see Appendix D. Word identity of the micro-edits turns out to be the most important feature. A clear decrease in performance (higher RMSE) can be observed on both training and development set when the Word Encoder (WE) is removed. Likewise, removing one of the two word inputs to the Word Encoder causes an increase in RMSE on the training set. Excluding the Knowledge Base (KB) tells a similar story, causing an increase of median RMSE on the training set (however, not in mean score, as shown in the appendix).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 17, |
| "end": 25, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Unfortunately, our hand-crafted features alone cause no detectable difference on either the training or the development set. Neither the feature encoder nor the knowledge encoder cause an increase in development error when removed individually. Interestingly, when both encoders are removed simultaneously (KF) an increased training error can be observed, albeit the difference is negligible on the development data. When removing the Word Encoder in combination with one of the two other encoders it performs notably worse, as expected. It is interesting to note that the combined word and feature encoder model results in the highest drop (see appendix D), but it is also the most unstable model with the highest variation as the box plot in Figure 3 reveals. This points at the importance of investigating both mean and median scores. From the single hand-crafted features we notice that contrary to expectations, the phoneme-based feature hurts performance; leaving it out improves overall RMSE, which is disappointing. Similarly, the position or length-related features of the headline itself are not helpful either.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 744, |
| "end": 752, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Limitations An underlying assumption of the proposed architecture is that some knowledge of broader context is required in order to accurately understand humour. This is also noted by Hossain et al. (2019) , who state that understanding humour often requires real world-knowledge and common sense. Successfully exploiting such knowledge in a neural model is still very difficult.", |
| "cite_spans": [ |
| { |
| "start": 184, |
| "end": 205, |
| "text": "Hossain et al. (2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We propose one way to integrate knowledge-base information via the Knowledge Encoder. However, we are unable to show any significant improvement in model performance by integrating the knowledge in the manner proposed. A possible reason for this is that the NELL and WordNet databases do not encode the necessary information for this particular task. It is also possible that the way it is employed in the model is not appropriate for the type of data it is based on, or our CBOW aggregation is too simplistic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Another limitation is the set of hand-crafted features. The phoneme-based feature using Levenshtein distance surprisingly hurts performance. Future work could study other ways of leveraging knowledge bases, integrating hand-crafted features and contextualised word representations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We proposed a simple neural model which uses three decoders to model humour intensity of edited headlines. Our official submission obtained an RSME of 0.55115 (top scoring team: 0.49725). Our ablation study shows that the most important information is word identity of the micro-edits, followed by knowledge base representations. However, we note that the way that we implement the knowledge encoder and phonemic information is somewhat ineffective in capturing the information we hoped it would, which leaves room for future work on this challenging task. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": "The embeddings can be found in the project repo: https://github.com/bachelorbois/HumorHeadlines 2 NNLM: https://tfhub.dev/google/tf2-preview/nnlm-en-dim128/1 3 Albert: https://tfhub.dev/tensorflow/albert_en_base/1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the HPC support at ITU, especially Frey Alfredsson, for support for the computational resources used in this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Tensor-Flow: Large-scale machine learning on heterogeneous systems. Software available from tensorflow.org", |
| "authors": [ |
| { |
| "first": "Mart\u00edn", |
| "middle": [], |
| "last": "Abadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Agarwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Barham", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Brevdo", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Craig", |
| "middle": [], |
| "last": "Citro", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Davis", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Devin", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjay", |
| "middle": [], |
| "last": "Ghemawat", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Goodfellow", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Harp", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Irving", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Isard", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangqing", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Rafal", |
| "middle": [], |
| "last": "Jozefowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Manjunath", |
| "middle": [], |
| "last": "Kudlur", |
| "suffix": "" |
| }, |
| { |
| "first": "Josh", |
| "middle": [], |
| "last": "Levenberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mart\u00edn Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S. Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Ian Goodfellow, Andrew Harp, Geoffrey Irv- ing, Michael Isard, Yangqing Jia, Rafal Jozefowicz, Lukasz Kaiser, Manjunath Kudlur, Josh Levenberg, Dan- delion Man\u00e9, Rajat Monga, Sherry Moore, Derek Murray, Chris Olah, Mike Schuster, Jonathon Shlens, Benoit Steiner, Ilya Sutskever, Kunal Talwar, Paul Tucker, Vincent Vanhoucke, Vijay Vasudevan, Fernanda Vi\u00e9gas, Oriol Vinyals, Pete Warden, Martin Wattenberg, Martin Wicke, Yuan Yu, and Xiaoqiang Zheng. 2015. Tensor- Flow: Large-scale machine learning on heterogeneous systems. Software available from tensorflow.org.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A neural probabilistic language model", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9jean", |
| "middle": [], |
| "last": "Ducharme", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Jauvin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of machine learning research", |
| "volume": "3", |
| "issue": "", |
| "pages": "1137--1155", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, R\u00e9jean Ducharme, Pascal Vincent, and Christian Jauvin. 2003. A neural probabilistic language model. Journal of machine learning research, 3(Feb):1137-1155.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Humor recognition using deep learning", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Von-Wun", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Soo", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "113--117", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peng-Yu Chen and Von-Wun Soo. 2018. Humor recognition using deep learning. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 113-117.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Learning a similarity metric discriminatively, with application to face verification", |
| "authors": [ |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Raia", |
| "middle": [], |
| "last": "Hadsell", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)", |
| "volume": "1", |
| "issue": "", |
| "pages": "539--546", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sumit Chopra, Raia Hadsell, and Yann LeCun. 2005. Learning a similarity metric discriminatively, with ap- plication to face verification. In 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05), volume 1, pages 539-546. IEEE.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "president vows to cut <taxes> hair\": Dataset and analysis of creative text editing for humorous headlines", |
| "authors": [ |
| { |
| "first": "Nabil", |
| "middle": [], |
| "last": "Hossain", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Krumm", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gamon", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "133--142", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nabil Hossain, John Krumm, and Michael Gamon. 2019. \"president vows to cut <taxes> hair\": Dataset and analysis of creative text editing for humorous headlines. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 133-142, Minneapolis, Minnesota, June.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Semeval-2020 Task 7: Assessing humor in edited news headlines", |
| "authors": [ |
| { |
| "first": "Nabil", |
| "middle": [], |
| "last": "Hossain", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Krumm", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gamon", |
| "suffix": "" |
| }, |
| { |
| "first": "Henry", |
| "middle": [], |
| "last": "Kautz", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of International Workshop on Semantic Evaluation (SemEval-2020)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nabil Hossain, John Krumm, Michael Gamon, and Henry Kautz. 2020. Semeval-2020 Task 7: Assessing humor in edited news headlines. In Proceedings of International Workshop on Semantic Evaluation (SemEval-2020), Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "3rd International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun, editors, 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Albert: A lite bert for self-supervised learning of language representations", |
| "authors": [ |
| { |
| "first": "Zhenzhong", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingda", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learning of language representations.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Hyperband: A novel bandit-based approach to hyperparameter optimization", |
| "authors": [ |
| { |
| "first": "Lisha", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Jamieson", |
| "suffix": "" |
| }, |
| { |
| "first": "Giulia", |
| "middle": [], |
| "last": "Desalvo", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "18", |
| "issue": "185", |
| "pages": "1--52", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lisha Li, Kevin Jamieson, Giulia DeSalvo, Afshin Rostamizadeh, and Ameet Talwalkar. 2018. Hyperband: A novel bandit-based approach to hyperparameter optimization. Journal of Machine Learning Research, 18(185):1-52.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Probabilistic reasoning via deep learning: Neural association models", |
| "authors": [ |
| { |
| "first": "Quan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Evdokimov", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1603.07704" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Quan Liu, Hui Jiang, Andrew Evdokimov, Zhen-Hua Ling, Xiaodan Zhu, Si Wei, and Yu Hu. 2016. Probabilistic reasoning via deep learning: Neural association models. arXiv preprint arXiv:1603.07704.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Making computers laugh: Investigations in automatic humor recognition", |
| "authors": [ |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlo", |
| "middle": [], |
| "last": "Strapparava", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the Conference on Human Language Technology and Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "531--538", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rada Mihalcea and Carlo Strapparava. 2005. Making computers laugh: Investigations in automatic humor recog- nition. In Proceedings of the Conference on Human Language Technology and Empirical Methods in Natural Language Processing, pages 531-538. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Advances in pre-training distributed word representations", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Puhrsch", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Edouard Grave, Piotr Bojanowski, Christian Puhrsch, and Armand Joulin. 2018. Advances in pre-training distributed word representations. In Proceedings of the International Conference on Language Resources and Evaluation (LREC 2018).", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Wordnet: a lexical database for english", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Communications of the ACM", |
| "volume": "38", |
| "issue": "11", |
| "pages": "39--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A Miller. 1995. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Never-ending learning", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hruschka", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Talukdar", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Betteridge", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Carlson", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Kisiel", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Krishnamurthy", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Lao", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Mazaitis", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Nakashole", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Platanios", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Samadi", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Settles", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Wijaya", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Saparov", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Greaves", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Welling", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence (AAAI-15)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Mitchell, W. Cohen, E. Hruschka, P. Talukdar, J. Betteridge, A. Carlson, B. Dalvi, M. Gardner, B. Kisiel, J. Kr- ishnamurthy, N. Lao, K. Mazaitis, T. Mohamed, N. Nakashole, E. Platanios, A. Ritter, M. Samadi, B. Settles, R. Wang, D. Wijaya, A. Gupta, X. Chen, A. Saparov, M. Greaves, and J. Welling. 2015. Never-ending learning. In Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence (AAAI-15).", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Humor: Prosody analysis and automatic recognition for f* r* i* e* n* d* s", |
| "authors": [ |
| { |
| "first": "Amruta", |
| "middle": [], |
| "last": "Purandare", |
| "suffix": "" |
| }, |
| { |
| "first": "Diane", |
| "middle": [], |
| "last": "Litman", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 2006 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "208--215", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amruta Purandare and Diane Litman. 2006. Humor: Prosody analysis and automatic recognition for f* r* i* e* n* d* s. In Proceedings of the 2006 Conference on Empirical Methods in Natural Language Processing, pages 208-215.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Humor detection: A transformer gets the last laugh", |
| "authors": [ |
| { |
| "first": "Orion", |
| "middle": [], |
| "last": "Weller", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Seppi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3612--3616", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Orion Weller and Kevin Seppi. 2019. Humor detection: A transformer gets the last laugh. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3612-3616.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Leveraging knowledge bases in LSTMs for improving machine reading", |
| "authors": [ |
| { |
| "first": "Bishan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1436--1446", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bishan Yang and Tom Mitchell. 2017. Leveraging knowledge bases in LSTMs for improving machine reading. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1436-1446, Vancouver, Canada, July. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Figure 1.", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Distributions of headline length and edit relative position", |
| "num": null |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Our proposed model architecture", |
| "num": null |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Ablation Study Results. RMSE: Lower is better.", |
| "num": null |
| }, |
| "TABREF0": { |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "text": "Example of phonetic distance feature showing transcription from grapheme to phoneme." |
| }, |
| "TABREF2": { |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "text": "Number of entities found in training data and NELL dictionary" |
| }, |
| "TABREF4": { |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "text": "Ablation Study Results; F = Full Model; EW = Edited Word; OW = Original Word; WE = Word Encoder; KB = Knowledge-Base Encoder; WD = Words Distance; WP = Words Position; SL = Sentence" |
| } |
| } |
| } |
| } |