| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:18:31.466214Z" |
| }, |
| "title": "ELMo-NB at SemEval-2020 Task 7: Assessing Sense of Humor in Edited News Headlines Using ELMo and NB", |
| "authors": [ |
| { |
| "first": "Enas", |
| "middle": [], |
| "last": "Khwaileh", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Jordan University of Science and Technology-Irbid", |
| "location": { |
| "country": "Jordan" |
| } |
| }, |
| "email": "ekhwaileh18@cit.just.edu.jo" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper, we present our submission for SemEval-2020 competition subtask 1 in Task 7 (Hossain et al., 2020a): Assessing Humor in Edited News Headlines. The task consists of estimating the hilariousness of news headlines that have been modified manually by humans using micro-edit changes to make them funny. Our approach is constructed to improve on a couple of aspects; preprocessing with an emphasis on humor sense detection, using embeddings from state-of-the-art language model (ELMo), and ensembling the results came up with using machine learning model Na\u00efve Bayes (NB) with a deep learning pretrained models. ELMo-NB participation has scored (0.5642) on the competition leader board, where results were measured by Root Mean Squared Error (RMSE).", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper, we present our submission for SemEval-2020 competition subtask 1 in Task 7 (Hossain et al., 2020a): Assessing Humor in Edited News Headlines. The task consists of estimating the hilariousness of news headlines that have been modified manually by humans using micro-edit changes to make them funny. Our approach is constructed to improve on a couple of aspects; preprocessing with an emphasis on humor sense detection, using embeddings from state-of-the-art language model (ELMo), and ensembling the results came up with using machine learning model Na\u00efve Bayes (NB) with a deep learning pretrained models. ELMo-NB participation has scored (0.5642) on the competition leader board, where results were measured by Root Mean Squared Error (RMSE).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Checking the degree of sentence sense of Humor through understanding and analyzing humans natural language and by connecting the text to an intelligent system is considered a critical task (Rastogi et al., 2020) . Hence Expressing the readers and writers opinions can increase several emotions, we still need to expand the positive texts and establish a way for analyzing it (Salminen et al., 2020) . Humor is considered a great way to the reader, it is resembling therapy (Ziabari and Treur, 2020) .", |
| "cite_spans": [ |
| { |
| "start": 189, |
| "end": 211, |
| "text": "(Rastogi et al., 2020)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 375, |
| "end": 398, |
| "text": "(Salminen et al., 2020)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 473, |
| "end": 498, |
| "text": "(Ziabari and Treur, 2020)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Producing machines that can determine whether the sentence contains some degree of sense of humor or not is gaining a great attention recently (Abdullah and Shaikh, 2018) . Since social media is taking over most of people's daily life routines , the culture and environment affect the content greatly (Downey et al., 2006) (Zhao et al., 2020) . Numerous factors have brought increasing attention to real-life tasks such as text classification (Howard and Ruder, 2018) (Conneau et al., 2016) (Al-Omari et al., 2020) and other text analysis like pun classification (Diao et al., 2020) . Moreover, (Miller et al., 2020) came up with an idea to detect the tweets humorousness using Gaussian Process.", |
| "cite_spans": [ |
| { |
| "start": 143, |
| "end": 170, |
| "text": "(Abdullah and Shaikh, 2018)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 301, |
| "end": 322, |
| "text": "(Downey et al., 2006)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 323, |
| "end": 342, |
| "text": "(Zhao et al., 2020)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 443, |
| "end": 467, |
| "text": "(Howard and Ruder, 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 468, |
| "end": 490, |
| "text": "(Conneau et al., 2016)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 491, |
| "end": 514, |
| "text": "(Al-Omari et al., 2020)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 563, |
| "end": 582, |
| "text": "(Diao et al., 2020)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 595, |
| "end": 616, |
| "text": "(Miller et al., 2020)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Several Natural Language Processing (NLP) applications and tools are proliferating recently (Kumar and Garg, 2020) , (Hirschberg and Manning, 2015) , especially with the rise of Deep Learning (DL) and Machine Learning (ML) enhancements (Duerr and Ramdeen, 2017) (Young et al., 2018) . One of NLP state of the art approaches is ELMo language preprocessing model as a pretrained model on general NLP tasks of language modeling (Reimers and Gurevych, 2019) . ELMo can be finetuned on specific tasks like next word prediction (Siddiqui and Hassan, 2019) , translation (Li and Chen, 2019) or question answering (McCann et al., 2018) and semantic text (Al-Asa'd et al., 2019) . Another NLP state of the art is BERT (Peters et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 114, |
| "text": "(Kumar and Garg, 2020)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 117, |
| "end": 147, |
| "text": "(Hirschberg and Manning, 2015)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 236, |
| "end": 261, |
| "text": "(Duerr and Ramdeen, 2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 262, |
| "end": 282, |
| "text": "(Young et al., 2018)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 425, |
| "end": 453, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 522, |
| "end": 549, |
| "text": "(Siddiqui and Hassan, 2019)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 564, |
| "end": 583, |
| "text": "(Li and Chen, 2019)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 606, |
| "end": 627, |
| "text": "(McCann et al., 2018)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 646, |
| "end": 669, |
| "text": "(Al-Asa'd et al., 2019)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 704, |
| "end": 730, |
| "text": "BERT (Peters et al., 2018)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "SemEval-2020 competition in Task 7 1 has 313 as a total number of participants. The goal of this task is to assess humor in news headlines that have been modified using short edits to make them funny. There are two subtasks as follows: Sub-task 1 (Funniness Estimation): regression problem, the goal is to assign a funniness grade to an edited headline between [0-3], where the systems will be ranked by Root Mean Squared Error (RMSE). Sub-task 2 (Funnier of the Two): a classification problem, given two different edited versions of the same headline. The goal of this sub task is to predict which is the funnier of the two. Systems will be ranked by prediction accuracy. Participant \"oyx\" scored the first place in both subtasks, scoring RMSE (0.50157) for subtask 1, and accuracy (0.67237) for subtask 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we have experimented ELMo with NB regression to predict the mean funniness of the edited headline in subtask 1. The main target of this task is discovering the atomic change and the tipping between the original and the humorous sentence. As a result, we need to rank the sentences between [0-3]: scores (0) it is not funny, Slightly Funny score (1), moderately funny score (2), and very funny score (3). The micro-editing made to the sentence to make them funny is defined as replacing a noun by a different noun phrase, an entity with different nouns and a verb with a different verb as in (Kanakaraj and Guddeti, 2015) . ElMo with NB is showing an amazing performance in predicting the value of humor in the news headline.", |
| "cite_spans": [ |
| { |
| "start": 606, |
| "end": 635, |
| "text": "(Kanakaraj and Guddeti, 2015)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of this paper is presented as follows: Sections 2 overviews related work. Section 3 describes the methodology proposed in this paper. Sections 4 overviews results and discusses the most important findings of some experiments and models evaluation. Finally, Section 5 concludes this research and provides possibilities for future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Creating a model that is able to judge the sensitivity to be humorist or not is still a critical task. Several Machine Learning (ML) and Deep Learning (DL) approaches are recommended strongly for working on detecting humorist sentences. For example, using BERT DL pretarined model has a significant role in detecting sentiments and emotions in text (Al-Omari et al., 2019) . A team (Mao and Liu, 2019) participated in the HAHA 2019 task used BERT as a bi-directional representation and Fine-tuned pretrain dataset. They obtained the output layer after training the model with the Mean Squad Error (MSE). Other researchers (Potash et al., 2017) added a new task called shared task between the first and second approaches to explore humour. They focused on experimentally comparing hashtag wars from TV show @midnight. The neural network-based system recorded the higher rank.", |
| "cite_spans": [ |
| { |
| "start": 349, |
| "end": 372, |
| "text": "(Al-Omari et al., 2019)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 382, |
| "end": 401, |
| "text": "(Mao and Liu, 2019)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 622, |
| "end": 643, |
| "text": "(Potash et al., 2017)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Literature Reviews", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Researchers (Joshi et al., 2016) proposed a new approach to detect sarcasm. In their experiment, they created a dataset based on quotes GoodReads website, which is one of the largest sites for reading book recommendations. They used word embedding with four types LSA, GloVe, Dependency-based, and Word2Vec. A similar task is what researchers (Hossain et al., 2020b ) did recently, where they created a competitive game called Funlines. The users can edit the news headlines. The new sentence has some degree for the sense of humour; they set a method to define the funlines and organizing the sentences to categorize (fun, interactive, collaborative, rewarding, and educational). The classification improvements used to check the performance with and without this dataset augmentation. They were showing that using BERT gave much better results than using LSTM with GloVe word vectors as a benchmark results. The application provides useful feedback to users, to improve their ability to learn and upgrade the level of humourist sentence. In this way the newly generated dataset is performing better.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 32, |
| "text": "(Joshi et al., 2016)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 343, |
| "end": 365, |
| "text": "(Hossain et al., 2020b", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Literature Reviews", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The dataset in this paper is obtained from subtask 1 in Task7-SemEval-2020 competition 2 (Hossain et al., 2019) . The researchers collected dataset from the Reddit website related to news headlines. The number of headlines on the train (9652), dev (2419), and test (3025). The teams are asked to predict the mean funniest of each edited headline. In our proposed model, we replace the target word instead of the word between the tags < / > in the original headline in both train and dev datasets in terms of predicting the mean funniest value. Then, we replace some of the abbreviations in the data, such as \"he's \" to \"he is\" applied in on all dataset as shown in Table 1 . To make this dataset more understandable, useful and ready fit in any models, we have applied a set of preprocessing techniques like converting the data to lower case, remove stemming, stop words, tokenization, punctuation marks, common & rare words, and lemmatization.", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 111, |
| "text": "(Hossain et al., 2019)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 665, |
| "end": 672, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset Preparing and Cleansing", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "New Headline Grade appar first iran israel engag militarili appar first iran israel slap militarili 0.4 told week ago flynn misl vice presid told week ago flynn misl school presid 0 franc hunt citizen join isi without trial iraq franc hunt citizen join twin without trial iraq 0.2 john kerri get presidenti fever might challeng 2020 john kerri get presidenti fever might snuggl 2020 2.6 Table 1 : Sample data from the training set", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 387, |
| "end": 394, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Original Headline", |
| "sec_num": null |
| }, |
| { |
| "text": "We have used different pre-trained word embeddings to convert each word in the input into a vector representation of 300-Dimensional word vectors. The most popular NLP pretrained models are ELMo and BERT systems. We have used ELMo as the main pretrained system for our submission.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embeddings", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "ELMo is a pre-trained model developed by Matthew Peters in 2017 and available on TensorFlow hub. This model is a contextualized deep model, which means it looks at the whole sentence before putting the embedding for the words. The ELMo is a novel technique that assigns each word vectors or embedding based on the context and used Bidirectional LSTM idea. In other words, it applies the forward and backward on each word and concatenates the two values at each layer as shown in the below figure. ELMo can deal with different NLP tasks like question answering, named entity extraction and sentiment analysis as shown BERT is a language model developed by Google in 2018 and trained on large datasets like Wikipedia. This model is performed on NLP tasks like sentiment and emotion analysis (Sun et al., 2019) , and question answering (Yang et al., 2019) . BERT converts the words into vectors or embeddings based on the context and uses the transformer method. It is a deeply bidirectional way, which means from right to left and left to right. The transformers contains encoder (read the dataset) and decoder (produce the prediction task). Through examples training, it uses two strategies which are Masked Language Model (MLM), and Next Sentence Prediction (NSP). The MLM, works by replacing 15% of words by masking each word, and try to predict these words based on the non-mask words. While the NSP, works by learning the relationship between the two sentences and produce a label in terms of the second sentence is the next sentence on not based on the meaning between them. As shown in Figure 2 , BERT applies some of the operations on the dataset before reading it: 1) add [CLS] at the beginning of the sentence and [Sep] at the end of each sentence, 2) Apply Token Embeddings, 3) Sentence Embeddings, 4) Transformer positional Embeddings.", |
| "cite_spans": [ |
| { |
| "start": 789, |
| "end": 807, |
| "text": "(Sun et al., 2019)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 833, |
| "end": 852, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1679, |
| "end": 1684, |
| "text": "[CLS]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1591, |
| "end": 1599, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Word Embeddings", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We have used the Root Mean Square Error (RMSE) value to measure the performance. To calculate RMSE, we need first to calculate Mean Square Error (MSE). So, we take the difference for each Observed (O i ) and Predicted value (P i ) and take the difference squared. Then, we divide the sum of all the values Figure 2 : Architecture of the BERT model 4 by the number of observations to get the MSE value. Finally, we take the root of MSE to get RMSE value.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 306, |
| "end": 314, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Evaluation Metrics", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "RM SE = n i=1 (P i \u2212 O i ) 2 (n)", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Model Evaluation Metrics", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "4 Experimentation and setup", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Evaluation Metrics", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Traditional ML algorithms are being widely used to make predictions based on data. In this paper, ELMo with NB is implemented as the proposed model. In our experiments, we compared the performance of the proposed model verses ELMo with Bagging NB and BERT performances. We starts with replacing one word for each original sentence by the requested new word. Next, we find the level of humor in the sentence between [0-3] scale. Different perspectives are experimented to modify and identify the humorist sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Evaluation Metrics", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "According to the performance measures, the results showed that proposed model overrides both BNB and BERT in solving the problem of humor evaluation. The proposed model achieved an RMSE of 0.5642, BERT achieved an RMSE of 0.5747, while BNB an RMSE of 0.5682 as you can see in In ML, the NB belongs to \"Probabilistic Classifiers\" family based on the Bayes theorem. The main idea of NB is finding a relationship between features using Equation 2, which represents the relationship given class label (Y) and dependent feature vector (X)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Evaluation Metrics", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(Y |X) = P (X|Y ) * P (Y ) P (X)", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Model Evaluation Metrics", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "0.5747 0.5747 Figure. 4 illustrates the general framework of the ELMo-NB model with the dataset. The NB regression is unique of its kind, it is known as the best according to the running time, high accuracy and features handling since it deals with the features as an independent member, so the decision taken is not affected by an absence of some features. Although we have a large set of data and a large number of records, NB is still giving the best RMSE over all experimented regressions. Regarding Bagging Na\u00efve Bayes (BNB), the basic concept of bagging is to build new models using the same regression and dataset variance. The concept of bagging is based on taking the dataset to be chosen more than one time and for each time it is running the NB model. Therefore, it is allowed for records to appear in several runs. As we discussed earlier, the NB prediction gives great performances with unbalanced and independent feature variables. We tried to run this kind of NB using the bagging algorithm to measure the regression behavior and make it conductible for comparison.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 14, |
| "end": 21, |
| "text": "Figure.", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Evaluation Metrics", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The third experiments is with DL, we have found that Recurrent Neural Network (RNN) is a well known architecture for NLP. It is proper to handle inputs of different lengths in order to its structure, so RNN serves us well in finding assessing humour in edited news headlines. In our experiment, we uses BERT as a standalone model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Evaluation Metrics", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Through this challenge (SemEval-2020 competition subtask 1 in Task 7), we uses Embeddings from Language Models (ELMo) with the Na\u00efve Bayes (NB) model as the primary baseline. The main focus is using the best text manipulation algorithm, where we recommend using ELMo for it is usefulness and its ability to generate \"contextualized\" word embeddings. Our trial is to use the ELMo pretrained and then let the ML models make the prediction. Where the NB with ELMo recorded lowest RMSE. For future work, we want to use different ML and DL models with the dataset. Also, we plan to use different-dimensional and pre-trained embedding. Using the XLNET, with well-spotted parameters could be useful too.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://competitions.codalab.org/competitions/20970", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to extend our sincere thanks to Dr. Malak Abdullah for her efforts and support. In order to finish this work, we had a lot of straight directions and advice from her, during the fall semester, 2019.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgement", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Teamuncc at semeval-2018 task 1: Emotion detection in english and arabic tweets using deep learning", |
| "authors": [ |
| { |
| "first": "Malak", |
| "middle": [], |
| "last": "Abdullah", |
| "suffix": "" |
| }, |
| { |
| "first": "Samira", |
| "middle": [], |
| "last": "Shaikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 12th international workshop on semantic evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "350--357", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Malak Abdullah and Samira Shaikh. 2018. Teamuncc at semeval-2018 task 1: Emotion detection in english and arabic tweets using deep learning. In Proceedings of the 12th international workshop on semantic evaluation, pages 350-357.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Question to question similarity analysis using morphological, syntactic, semantic, and lexical features", |
| "authors": [ |
| { |
| "first": "Muntaha", |
| "middle": [], |
| "last": "Al-Asa'd", |
| "suffix": "" |
| }, |
| { |
| "first": "Nour", |
| "middle": [], |
| "last": "Al-Khdour", |
| "suffix": "" |
| }, |
| { |
| "first": "Mutaz", |
| "middle": [], |
| "last": "Bni Younes", |
| "suffix": "" |
| }, |
| { |
| "first": "Enas", |
| "middle": [], |
| "last": "Khwaileh", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahmoud", |
| "middle": [], |
| "last": "Hammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Al-Smadi", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "2019 IEEE/ACS 16th International Conference on Computer Systems and Applications (AICCSA)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--6", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muntaha Al-Asa'd, Nour Al-Khdour, Mutaz Bni Younes, Enas Khwaileh, Mahmoud Hammad, and AL-Smadi Mohammad. 2019. Question to question similarity analysis using morphological, syntactic, semantic, and lexical features. In 2019 IEEE/ACS 16th International Conference on Computer Systems and Applications (AICCSA), pages 1-6. IEEE.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Emodet at semeval-2019 task 3: Emotion detection in text using deep learning", |
| "authors": [ |
| { |
| "first": "Hani", |
| "middle": [], |
| "last": "Al-Omari", |
| "suffix": "" |
| }, |
| { |
| "first": "Malak", |
| "middle": [], |
| "last": "Abdullah", |
| "suffix": "" |
| }, |
| { |
| "first": "Nabeel", |
| "middle": [], |
| "last": "Bassam", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "200--204", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hani Al-Omari, Malak Abdullah, and Nabeel Bassam. 2019. Emodet at semeval-2019 task 3: Emotion detection in text using deep learning. In Proceedings of the 13th International Workshop on Semantic Evaluation, pages 200-204.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Emodet2: Emotion detection in english textual dialogue using bert and bilstm models", |
| "authors": [ |
| { |
| "first": "Hani", |
| "middle": [], |
| "last": "Al-Omari", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Malak", |
| "suffix": "" |
| }, |
| { |
| "first": "Samira", |
| "middle": [], |
| "last": "Abdullah", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shaikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "2020 11th International Conference on Information and Communication Systems (ICICS)", |
| "volume": "", |
| "issue": "", |
| "pages": "226--232", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hani Al-Omari, Malak A Abdullah, and Samira Shaikh. 2020. Emodet2: Emotion detection in english textual dia- logue using bert and bilstm models. In 2020 11th International Conference on Information and Communication Systems (ICICS), pages 226-232. IEEE.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Spatial prediction of landslide susceptibility using data mining-based kernel logistic regression, naive bayes and rbfnetwork models for the long county area (china)", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xusheng", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhou", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Haoyuan", |
| "middle": [], |
| "last": "Hong", |
| "suffix": "" |
| }, |
| { |
| "first": "Tien", |
| "middle": [], |
| "last": "Dieu", |
| "suffix": "" |
| }, |
| { |
| "first": "Biswajeet", |
| "middle": [], |
| "last": "Bui", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Bulletin of Engineering Geology and the Environment", |
| "volume": "78", |
| "issue": "1", |
| "pages": "247--266", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Chen, Xusheng Yan, Zhou Zhao, Haoyuan Hong, Dieu Tien Bui, and Biswajeet Pradhan. 2019. Spatial predic- tion of landslide susceptibility using data mining-based kernel logistic regression, naive bayes and rbfnetwork models for the long county area (china). Bulletin of Engineering Geology and the Environment, 78(1):247-266.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Very deep convolutional networks for text classification", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1606.01781" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Holger Schwenk, Lo\u00efc Barrault, and Yann Lecun. 2016. Very deep convolutional networks for text classification. arXiv preprint arXiv:1606.01781.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Homographic pun location using multi-dimensional semantic relationships", |
| "authors": [ |
| { |
| "first": "Yufeng", |
| "middle": [], |
| "last": "Diao", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongfei", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaochao", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kan", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Soft Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "1--11", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yufeng Diao, Hongfei Lin, Liang Yang, Xiaochao Fan, Di Wu, and Kan Xu. 2020. Homographic pun location using multi-dimensional semantic relationships. Soft Computing, pages 1-11.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Cultural practice and environmental impacts on the flavonoid composition of grapes and wine: a review of recent research", |
| "authors": [ |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Mark", |
| "suffix": "" |
| }, |
| { |
| "first": "Nick", |
| "middle": [ |
| "K" |
| ], |
| "last": "Downey", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [ |
| "P" |
| ], |
| "last": "Dokoozlian", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Krstic", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "American Journal of Enology and Viticulture", |
| "volume": "57", |
| "issue": "3", |
| "pages": "257--268", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark O Downey, Nick K Dokoozlian, and Mark P Krstic. 2006. Cultural practice and environmental impacts on the flavonoid composition of grapes and wine: a review of recent research. American Journal of Enology and Viticulture, 57(3):257-268.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Natural language processing (nlp), machine learning (ml), and semantics in polar science", |
| "authors": [ |
| { |
| "first": "Ruth", |
| "middle": [], |
| "last": "Duerr", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Ramdeen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "AGUFM", |
| "volume": "", |
| "issue": "", |
| "pages": "13--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruth Duerr and Sarah Ramdeen. 2017. Natural language processing (nlp), machine learning (ml), and semantics in polar science. AGUFM, 2017:IN13D-03.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Allennlp: A deep semantic natural language processing platform", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Grus", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Oyvind", |
| "middle": [], |
| "last": "Tafjord", |
| "suffix": "" |
| }, |
| { |
| "first": "Pradeep", |
| "middle": [], |
| "last": "Dasigi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nelson", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Schmitz", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1803.07640" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson Liu, Matthew Peters, Michael Schmitz, and Luke Zettlemoyer. 2018. Allennlp: A deep semantic natural language processing platform. arXiv preprint arXiv:1803.07640.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Advances in natural language processing", |
| "authors": [ |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hirschberg", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "349", |
| "issue": "", |
| "pages": "261--266", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julia Hirschberg and Christopher D Manning. 2015. Advances in natural language processing. Science, 349(6245):261-266.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "president vows to cut\u00a1 taxes\u00bf hair", |
| "authors": [ |
| { |
| "first": "Nabil", |
| "middle": [], |
| "last": "Hossain", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Krumm", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gamon", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Dataset and analysis of creative text editing for humorous headlines", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.00274" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nabil Hossain, John Krumm, and Michael Gamon. 2019. \" president vows to cut\u00a1 taxes\u00bf hair\": Dataset and analysis of creative text editing for humorous headlines. arXiv preprint arXiv:1906.00274.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Semeval-2020 Task 7: Assessing humor in edited news headlines", |
| "authors": [ |
| { |
| "first": "Nabil", |
| "middle": [], |
| "last": "Hossain", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Krumm", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gamon", |
| "suffix": "" |
| }, |
| { |
| "first": "Henry", |
| "middle": [], |
| "last": "Kautz", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of International Workshop on Semantic Evaluation (SemEval-2020)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nabil Hossain, John Krumm, Michael Gamon, and Henry Kautz. 2020a. Semeval-2020 Task 7: Assessing humor in edited news headlines. In Proceedings of International Workshop on Semantic Evaluation (SemEval-2020), Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Stimulating creativity with funlines: A case study of humor generation in headlines", |
| "authors": [ |
| { |
| "first": "Nabil", |
| "middle": [], |
| "last": "Hossain", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Krumm", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanvir", |
| "middle": [], |
| "last": "Sajed", |
| "suffix": "" |
| }, |
| { |
| "first": "Henry", |
| "middle": [], |
| "last": "Kautz", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2002.02031" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nabil Hossain, John Krumm, Tanvir Sajed, and Henry Kautz. 2020b. Stimulating creativity with funlines: A case study of humor generation in headlines. arXiv preprint arXiv:2002.02031.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Universal language model fine-tuning for text classification", |
| "authors": [ |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Howard", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1801.06146" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. arXiv preprint arXiv:1801.06146.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Are word embedding-based features useful for sarcasm detection?", |
| "authors": [ |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Vaibhav", |
| "middle": [], |
| "last": "Tripathi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Patel", |
| "suffix": "" |
| }, |
| { |
| "first": "Pushpak", |
| "middle": [], |
| "last": "Bhattacharyya", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Carman", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1610.00883" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aditya Joshi, Vaibhav Tripathi, Kevin Patel, Pushpak Bhattacharyya, and Mark Carman. 2016. Are word embedding-based features useful for sarcasm detection? arXiv preprint arXiv:1610.00883.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Performance analysis of ensemble methods on twitter sentiment analysis using nlp techniques", |
| "authors": [ |
| { |
| "first": "Monisha", |
| "middle": [], |
| "last": "Kanakaraj", |
| "suffix": "" |
| }, |
| { |
| "first": "Ram Mohana Reddy", |
| "middle": [], |
| "last": "Guddeti", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 IEEE 9th International Conference on Semantic Computing (IEEE ICSC 2015)", |
| "volume": "", |
| "issue": "", |
| "pages": "169--170", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Monisha Kanakaraj and Ram Mohana Reddy Guddeti. 2015. Performance analysis of ensemble methods on twitter sentiment analysis using nlp techniques. In Proceedings of the 2015 IEEE 9th International Conference on Semantic Computing (IEEE ICSC 2015), pages 169-170. IEEE.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The multifaceted concept of context in sentiment analysis", |
| "authors": [ |
| { |
| "first": "Akshi", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Geetanjali", |
| "middle": [], |
| "last": "Garg", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Cognitive Informatics and Soft Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "413--421", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Akshi Kumar and Geetanjali Garg. 2020. The multifaceted concept of context in sentiment analysis. In Cognitive Informatics and Soft Computing, pages 413-421. Springer.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Human vs. ai: An assessment of the translation quality between translators and machine translation", |
| "authors": [ |
| { |
| "first": "Hanji", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Haiqing", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Journal of Translation, Interpretation, and Applied Linguistics (IJTIAL)", |
| "volume": "1", |
| "issue": "1", |
| "pages": "43--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hanji Li and Haiqing Chen. 2019. Human vs. ai: An assessment of the translation quality between translators and machine translation. International Journal of Translation, Interpretation, and Applied Linguistics (IJTIAL), 1(1):43-54.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A bert-based approach for automatic humor detection and scoring", |
| "authors": [ |
| { |
| "first": "Jihang", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanli", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Iberian Languages Evaluation Forum", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jihang Mao and Wanli Liu. 2019. A bert-based approach for automatic humor detection and scoring. In Proceed- ings of the Iberian Languages Evaluation Forum (IberLEF 2019). CEUR Workshop Proceedings, CEUR-WS, Bilbao, Spain (9 2019).", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "The natural language decathlon: Multitask learning as question answering", |
| "authors": [ |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Mccann", |
| "suffix": "" |
| }, |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Shirish Keskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1806.08730" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bryan McCann, Nitish Shirish Keskar, Caiming Xiong, and Richard Socher. 2018. The natural language decathlon: Multitask learning as question answering. arXiv preprint arXiv:1806.08730.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Predicting the humorousness of tweets using gaussian process preference learning", |
| "authors": [ |
| { |
| "first": "Tristan", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Erik-L\u00e2n Do", |
| "middle": [], |
| "last": "Dinh", |
| "suffix": "" |
| }, |
| { |
| "first": "Edwin", |
| "middle": [], |
| "last": "Simpson", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Procesamiento del Lenguaje Natural", |
| "volume": "64", |
| "issue": "", |
| "pages": "37--44", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tristan Miller, Erik-L\u00e2n Do Dinh, Edwin Simpson, and Iryna Gurevych. 2020. Predicting the humorousness of tweets using gaussian process preference learning. Procesamiento del Lenguaje Natural, 64:37-44.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1802.05365" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettle- moyer. 2018. Deep contextualized word representations. arXiv preprint arXiv:1802.05365.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Semeval-2017 task 6:# hashtagwars: Learning a sense of humor", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Potash", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexey", |
| "middle": [], |
| "last": "Romanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rumshisky", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "49--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Potash, Alexey Romanov, and Anna Rumshisky. 2017. Semeval-2017 task 6:# hashtagwars: Learning a sense of humor. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 49-57.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Intelligent analysis for personality detection on various indicators by clinical reliable psychological tth and stress surveys", |
| "authors": [ |
| { |
| "first": "Rohit", |
| "middle": [], |
| "last": "Rastogi", |
| "suffix": "" |
| }, |
| { |
| "first": "Santosh", |
| "middle": [], |
| "last": "Chaturvedi", |
| "suffix": "" |
| }, |
| { |
| "first": "Navneet", |
| "middle": [], |
| "last": "Satya", |
| "suffix": "" |
| }, |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Arora", |
| "suffix": "" |
| }, |
| { |
| "first": "Akshay Kr", |
| "middle": [], |
| "last": "Trivedi", |
| "suffix": "" |
| }, |
| { |
| "first": "Amit Kr", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Ambuj", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Computational Intelligence in Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "127--143", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rohit Rastogi, DK Chaturvedi, Santosh Satya, Navneet Arora, Piyush Trivedi, Akshay Kr Singh, Amit Kr Sharma, and Ambuj Singh. 2020. Intelligent analysis for personality detection on various indicators by clinical reliable psychological tth and stress surveys. In Computational Intelligence in Pattern Recognition, pages 127-143. Springer.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Alternative weighting schemes for elmo embeddings", |
| "authors": [ |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Reimers", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1904.02954" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nils Reimers and Iryna Gurevych. 2019. Alternative weighting schemes for elmo embeddings. arXiv preprint arXiv:1904.02954.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Developing an online hate classifier for multiple social media platforms", |
| "authors": [ |
| { |
| "first": "Joni", |
| "middle": [], |
| "last": "Salminen", |
| "suffix": "" |
| }, |
| { |
| "first": "Maximilian", |
| "middle": [], |
| "last": "Hopf", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Shammur", |
| "suffix": "" |
| }, |
| { |
| "first": "Soon-Gyo", |
| "middle": [], |
| "last": "Chowdhury", |
| "suffix": "" |
| }, |
| { |
| "first": "Hind", |
| "middle": [], |
| "last": "Jung", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernard J", |
| "middle": [], |
| "last": "Almerekhi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Human-centric Computing and Information Sciences", |
| "volume": "10", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joni Salminen, Maximilian Hopf, Shammur A Chowdhury, Soon-gyo Jung, Hind Almerekhi, and Bernard J Jansen. 2020. Developing an online hate classifier for multiple social media platforms. Human-centric Computing and Information Sciences, 10(1):1.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Effective word prediction in urdu language using stochastic model", |
| "authors": [ |
| { |
| "first": "Farhan", |
| "middle": [], |
| "last": "Siddiqui", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hassan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Sukkur IBA Journal of Computing and Mathematical Sciences", |
| "volume": "2", |
| "issue": "2", |
| "pages": "38--46", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M Farhan Siddiqui and M Hassan. 2019. Effective word prediction in urdu language using stochastic model. Sukkur IBA Journal of Computing and Mathematical Sciences, 2(2):38-46.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Utilizing bert for aspect-based sentiment analysis via constructing auxiliary sentence", |
| "authors": [ |
| { |
| "first": "Chi", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Luyao", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1903.09588" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi Sun, Luyao Huang, and Xipeng Qiu. 2019. Utilizing bert for aspect-based sentiment analysis via constructing auxiliary sentence. arXiv preprint arXiv:1903.09588.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "End-toend open-domain question answering with bertserini", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuqing", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Aileen", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Xingyu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Luchen", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Kun", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1902.01718" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Yang, Yuqing Xie, Aileen Lin, Xingyu Li, Luchen Tan, Kun Xiong, Ming Li, and Jimmy Lin. 2019. End-to- end open-domain question answering with bertserini. arXiv preprint arXiv:1902.01718.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Recent trends in deep learning based natural language processing. ieee Computational intelligenCe magazine", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| }, |
| { |
| "first": "Devamanyu", |
| "middle": [], |
| "last": "Hazarika", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "13", |
| "issue": "", |
| "pages": "55--75", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Young, Devamanyu Hazarika, Soujanya Poria, and Erik Cambria. 2018. Recent trends in deep learning based natural language processing. ieee Computational intelligenCe magazine, 13(3):55-75.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Integrating crowdsourcing and active learning for classification of work-life events from tweets", |
| "authors": [ |
| { |
| "first": "Yunpeng", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Mattia", |
| "middle": [], |
| "last": "Prosperi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianchen", |
| "middle": [], |
| "last": "Lyu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Bian", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2003.12139" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yunpeng Zhao, Mattia Prosperi, Tianchen Lyu, Yi Guo, and Jing Bian. 2020. Integrating crowdsourcing and active learning for classification of work-life events from tweets. arXiv preprint arXiv:2003.12139.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "An adaptive cognitive temporal-causal network model of a mindfulness therapy based on humor", |
| "authors": [ |
| { |
| "first": "Mohammadi", |
| "middle": [], |
| "last": "S Sahand", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ziabari", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Information Systems and Neuroscience", |
| "volume": "", |
| "issue": "", |
| "pages": "189--201", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S Sahand Mohammadi Ziabari and Jan Treur. 2020. An adaptive cognitive temporal-causal network model of a mindfulness therapy based on humor. In Information Systems and Neuroscience, pages 189-201. Springer.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Figure 4." |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Forward and Backward Techniques 3" |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Figure 3" |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "The performance of ELMo-NB, ELMo-BNB, and BERT models" |
| }, |
| "FIGREF4": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Work-Flow of ELMo-NB Model" |
| } |
| } |
| } |
| } |