| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:20:33.075359Z" |
| }, |
| "title": "LMML at SemEval-2020 Task 7: Siamese Transformers for Rating Humor in Edited News Headlines", |
| "authors": [ |
| { |
| "first": "Pramodith", |
| "middle": [], |
| "last": "Ballapuram", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "pramodith1@gmail.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper contains a description of my solution to the problem statement of SemEval 2020: Assessing the Funniness of Edited News Headlines. I propose a Siamese Transformer based approach, coupled with a Global Attention mechanism that makes use of contextual embeddings and focus words, to generate important features that are fed to a 2 layer perceptron to rate the funniness of the edited headline. I detail various experiments to show the performance of the system. The proposed approach outperforms a baseline Bi-LSTM architecture and finished 5th (out of 49 teams) in sub-task 1 and 4th (out of 32 teams) in sub-task 2 of the competition and was the best non-ensemble model in both tasks.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper contains a description of my solution to the problem statement of SemEval 2020: Assessing the Funniness of Edited News Headlines. I propose a Siamese Transformer based approach, coupled with a Global Attention mechanism that makes use of contextual embeddings and focus words, to generate important features that are fed to a 2 layer perceptron to rate the funniness of the edited headline. I detail various experiments to show the performance of the system. The proposed approach outperforms a baseline Bi-LSTM architecture and finished 5th (out of 49 teams) in sub-task 1 and 4th (out of 32 teams) in sub-task 2 of the competition and was the best non-ensemble model in both tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Machines that can recognize and understand humor can prove to be invaluable in applications like chat bots, personal digital assistants in order to make communication more fun and humane, story and script generation to provide comical relief or even in recommendation engines that can provide better recommendations to people on what Netflix stand-up show they can watch next. At the end of the day we all live to laugh don't we? Surprisingly enough, there hasn't been much work in the field of AI along these lines. The organizers of SemEval-2020 Task 7 (Hossain et al., 2020a ) released a new dataset in the English language and created a couple of sub tasks that can hopefully take us a step forward in creating machines that better understand humor.", |
| "cite_spans": [ |
| { |
| "start": 555, |
| "end": 577, |
| "text": "(Hossain et al., 2020a", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "HAHA -Humor Analysis based on Human Annotation (Castro et al., ) , (Chiruzzo et al., 2019 ) started in 2018 was a similar task, where the dataset comprised of Spanish Tweets and participants were asked to classify the tweets as either a joke or not and also rate the jokes on a scale of 0-5. In their overview paper, the authors describe that teams that used Transformer Based Models such as BERT (Devlin et al., 2019) and ULMFit (Howard and Ruder, 2018) , along with techniques like slanted learning rates, domain specific language modeling etc. proved valuable. My approach takes inspiration from this and focuses on Transformer Models. SemEval 2017 Task-6 (Potash et al., 2017) consisted of sub-tasks asking participants to rank the funniness of tweets that had a specific HashTag. They mention that some of the top teams used Siamese Networks (Bromley et al., 1994) , (Koch, 2015) based approaches. He et. al., (2019) introduce the concepts of local surprisal and global surprisal their work suggests that for a pun to be considered good, the pun-word must have high agreeableness in the local context of where it occurs in the sentence but a lower level of agreeableness in the global context of the entire sentence. I take inspiration from this idea as well. My hypothesis is that an edit in a sentence can be funny if the edited sentence continues to make sense and also have a bit of a twist. My model attempts to model the global agreeableness of the replaced and original word in the news headline. The remainder of the paper will be as follows. In section 2, I will briefly describe the task. Section 3 will detail the System design and architecture. Section 4 will state the implementation details. Section 5 will cover details of all experiments, results and interesting findings. It will be followed by a small section containing my final thoughts.", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 64, |
| "text": "(Castro et al., )", |
| "ref_id": null |
| }, |
| { |
| "start": 67, |
| "end": 89, |
| "text": "(Chiruzzo et al., 2019", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 397, |
| "end": 418, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 430, |
| "end": 454, |
| "text": "(Howard and Ruder, 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 659, |
| "end": 680, |
| "text": "(Potash et al., 2017)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 847, |
| "end": 869, |
| "text": "(Bromley et al., 1994)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 872, |
| "end": 884, |
| "text": "(Koch, 2015)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 903, |
| "end": 921, |
| "text": "He et. al., (2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Original News Headline Edit EU says summit with Turkey provides no answers to <concerns/ > stuffing The GOP just ca n't <escape/ > the 80s remember Table 1 : Sample Data from sub-task 1.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 148, |
| "end": 155, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2 Task Review Table 1 gives an example of entries in the provided dataset (Hossain et al., 2019) . The words within the angular brackets are substituted with the corresponding word in the edit column. The edited headline would read The GOP just can't remember the 80s. The score of the edited headline is a value between 0-3. It's worthy to note that there is only one edit per sample in the dataset. Sub-task 1 dealt with predicting the funniness score given the original headline and the edit. In Sub-task 2 we were given the same original headline, but there would be 2 different edited headlines and the edit could be made anywhere in the original headline. Participants were asked to classify which of the two edits was funnier. In my approach I create one model that can predict the funniness score of a given sample and I use the same model to compute the results of Sub-Task 2, by finding the difference between the funniness scores of the two edited headlines. The organizers of the task were kind enough to provide additional training samples a few months into the competition (Hossain et al., 2020b) . I make use of both the initial dataset and the additional dataset provided, to train my models.", |
| "cite_spans": [ |
| { |
| "start": 74, |
| "end": 96, |
| "text": "(Hossain et al., 2019)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1087, |
| "end": 1110, |
| "text": "(Hossain et al., 2020b)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 14, |
| "end": 21, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "3 System Design and Architecture", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "From hereon the original headline will be referred to as X org and the edited headline as X edit . I will refer to the word being replaced from the original headline and the edit word as focus words. My approach centers around the idea that a model should learn features that are conditioned on the focus words, or make use of the focus words either directly or indirectly. Wu and He (2019) show that in the task of relationship classification between two entities adding special tokens between the span of the entities leads to an improved performance. Similarly in this approach the token < is added before and after the word to be replaced in X org and\u02c6(symbol for exponent) is added before and after the edit word in X edit .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and Preprocessing", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Siamese Networks (Bromley et al., 1994) , (Koch, 2015) are twin networks that share the same parameters but each of the twins receive distinct inputs. Given that, for this task, we have an original and edited headline, I hypothesized that extracting features from both the headlines would be beneficial since humans require context of what the original sentence is, to deem an altered sentence to be funny. Each of X org and X edit is passed to one of the twins in the Siamese Network. Both of them comprise the tokens ", |
| "cite_spans": [ |
| { |
| "start": 17, |
| "end": 39, |
| "text": "(Bromley et al., 1994)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 42, |
| "end": 54, |
| "text": "(Koch, 2015)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Siamese Networks", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "X org/edit =[x org/edit 0 ,x org/edit 1 , ..x org/edit i ,..x org/editn ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Siamese Networks", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "= concat([C edit ,C org ,S org ,E edit ])", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Siamese Networks", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Over the last couple of years Transformer based Architectures (Vaswani et al., 2017 ) such as BERT (Devlin et al., 2019) , XLNet (Yang et al., 2019) , Roberta (Liu et al., 2019) etc. have become extremely popular. These models are pre-trained on language modeling tasks using large amounts of data and as a result are capable of providing contextual token embeddings that can be fine-tuned to achieve state of the art results in various downstream NLP tasks such as Question Answering, Sentiment Analysis, Natural Language Inference tasks etc. I experiment with different transformer models that act as the Siamese twins in order to obtain contextual token embeddings and choose the best one.", |
| "cite_spans": [ |
| { |
| "start": 62, |
| "end": 83, |
| "text": "(Vaswani et al., 2017", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 99, |
| "end": 120, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 129, |
| "end": 148, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 159, |
| "end": 177, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Architecture", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The token embeddings are used to create a set of useful features that are passed to a two layer perceptron to predict the funniness score of the edited news headline. The features extracted are described in the following subsection. Figure 1 depicts the model architecture.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 233, |
| "end": 241, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Architecture", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "It's been observed that different layers in a Neural Network capture different kinds of syntactic and semantic information (Yosinski et al., 2014) . Sun et al., (2019) observed an improved performance on classification tasks by concatenating the token embeddings of BERT with the embeddings from the penultimate layer i.e. layer 11. I experimented with concatenating the outputs of different layers and observed the same. I concatenate the final token embeddings with those from the 11 th layer of the Transformer. Please note that from hereon all features are concatenations of the Transformer's token embedding and its penultimate layer.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 146, |
| "text": "(Yosinski et al., 2014)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Most BERT (Devlin et al., 2019) based architectures that are fine tuned towards a downstream task tend to use the first token i.e the [CLS] token as a vector that summarizes the entire input sequence in essence. I make use of the first token from the Transformer twins for both X org and X edit , these two vectors will be referred to as S org and S edit . I also extract the vectors that correspond to the focus words. This is done easily thanks to the special tokens < and\u02c6that demarcate these words. In the event that the word spans more than one token the mean of all the tokens between the special tokens is computed to obtain a single vector. These two word vectors will be referred by E org and E edit from hereon.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 31, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "E org/edit = mean([E org/edit 1 ,E org/edit i ...,E org/editn ])", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "For each of X org and X edit I compute context vectors C org and C edit . C org/edit is computed as the result of a Global Attention Mechanism (Bahdanau et al., 2015) , (Luong et al., 2015) . The idea is that the C org/edit 1 would contain information about how well the replaced word and the edited word fit into the headline. Below \u2022 is the dot product operation.", |
| "cite_spans": [ |
| { |
| "start": 143, |
| "end": 166, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 169, |
| "end": 189, |
| "text": "(Luong et al., 2015)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "In section 5, I describe experiments where different combinations of the features which will be referred to as U are passed as input to a two layer perceptron. prelu (He et al., 2015) is used as the activation function after the first linear layer. ", |
| "cite_spans": [ |
| { |
| "start": 166, |
| "end": 183, |
| "text": "(He et al., 2015)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "attention scores org/edit = sof tmax(V \u2022 (W E org/edit ))", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Features", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "C org/edit = attention scores edit/org \u2022 V where V = (X org/edit i / \u2208 E org/edit i )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Features", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "I present two baseline architectures; In the first one I make use of a 2-layered Bidirectional LSTM (Hochreiter and Schmidhuber, 1997), initialized with Glove (Pennington et al., 2014) Embeddings of size 300 followed by a self-attention layer. The mean of the output from the self-attention layer is then passed as input to a 2 layer perceptron with tanh as the activation function. The input to this model is only the edited news headline i.e. X edit . The first linear layer projects a vector of size 600 to 128.", |
| "cite_spans": [ |
| { |
| "start": 159, |
| "end": 184, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Architecture", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "The second baseline is a Siamese 2-layered Bidirectional LSTM with a single head self-attention layer on top of it. I pass U to the 2 layer perceptron where U is defined as below. The output from the self-attention layer are treated as the token embeddings. Both of these models perform similarly with a RMSE of 0.581 on the validation set and 0.577 on the test set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Architecture", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "U = concat([C edit ,C org ,S edit ,E edit ])", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Architecture", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "All experiments mentioned below were conducted making use of Pytorch 2 , the HuggingFace 3 library was used for transformer architectures. Spacy 4 for Glove (Pennington et al., 2014) . Batch size is fixed at 64, I use Adam (Kingma and Ba, 2014) with a learning rate of 2e-5 as my optimizer. For all the experiments the best validation score and the test score of the model corresponding to the best validation score are reported. I use a linear warm-up scheduler (Howard and Ruder, 2018) with the warm-up period equal to 10% of the total number of steps. The model is trained for 5 epochs and the model with the best validation score is used at test time. A dropout of 0.1 is applied to all the transformer architectures and 0.3 to both the linear layers. I clip the norm of the gradients to 1.0. Unless stated otherwise all of the transformer models were uncased apart from Roberta, for Roberta I use the roberta-base model. All the transformer models are pre-trained models and are not trained from scratch. For all experiments reported in this the random seed is fixed to be 12. The max sequence length is fixed to 50. I make my code publicly available 5 in the form of a jupyter notebook. I experiment with using DistilBERT , BERT, Roberta for producing the token embeddings along with the baseline Bi-LSTM model (non-siamese) mentioned above. The embeddings are used to create U =concat([C edit ,C org ,S edit ,E edit ]) which is passed to the 2 layer perceptron.", |
| "cite_spans": [ |
| { |
| "start": 157, |
| "end": 182, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 463, |
| "end": 487, |
| "text": "(Howard and Ruder, 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The results are shown in the table 2. The LSTM based model has the poorest performance. Roberta shows the best performance with 0.516 on both the validation set and the test set. All experiments listed from hereon make use of the Roberta model to obtain the token embeddings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "4" |
| }, |
| { |
| "text": ". The next set of experiments show the impact of using different combinations of the features explained in section 3.4 to obtain the best U vector which is passed to the 2 layer perceptron. The model with the best RMSE on the test set is with the features C edit ,C org ,E org ,E edit . Since the model with the features C edit ,C org ,S edit ,E edit gives the most consistent results on the test and validation set all experiments following this section use the concatenation of these features as U . From these results it's not too clear that one feature or one set of features is more important than the other.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments for finding the best set of features", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In order to verify that using a Siamese architecture is advantageous. I train a non-siamese 6 network in which X org and X edit are concatenated together. In order for Roberta to recognize X org and X edit as a text pair. They're concatenated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Importance of Siamese Architecture", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "X concat = < s > + X org + < /s > + < /s > + X edit + < /s > X", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Importance of Siamese Architecture", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "concat is passed to the network to obtain the features mentioned above in section 3.4. The best RMSE of this model on the validation set is 0.5194 and the corresponding test set RMSE is 0.5247. Despite the Siamese architecture doing moderately better than this model it's not too convincing that the Siamese architecture is helping the model improve its performance. Here S edit is the first token from Roberta.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Importance of Siamese Architecture", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Sun et al., (2019) and Chiruzzo et al., (2019) mention that fine-tuning the language model of transformer based architectures against the task specific data improves performance of the models. I fine-tune the Roberta model for masked language modeling against the original news headlines. The language model is trained for 2 epochs with a batch size of 32. The model yields an RMSE of 0.5212 on the validation set and 0.5194 on the test set. It's observed that there is no notable improvement.", |
| "cite_spans": [ |
| { |
| "start": 23, |
| "end": 46, |
| "text": "Chiruzzo et al., (2019)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fine-Tuning the Language Model", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "My final submissions to the competition for Sub-task 1 and Sub-task 2 resulted in a 5th and 4th place finish in the competition and was the best non-ensemble model in both tasks amongst the final submissions. The final submissions actually corresponded to a Siamese BERT based architecture, where U = concat([C edit ,C org ,S edit ,E edit ] which I obtained by performing a search on random seeds. I didn't experiment with the Roberta model at that point of time after the release of the extra dataset. The submitted model achieved a validation score of 0.5186. and a test score of 0.5202. For Sub-task 2 I observed that the model with the best accuracy did not necessarily need to be the same as the one that had the lowest RMSE for Sub-task 1, which is quite surprising. In Sub-task 2 my official submission obtained an accuracy of 0.6465 on the validation set and 0.6468 on the test set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In conclusion, this paper presents a Siamese Transformer based approach, that makes use of features that center around the focus words and their impact against other tokens. From the experiments shown above its tough to conclude if the Siamese architecture or if any of the features in particular are responsible for an improved performance of the model, it looks like just following the best practices of training Transformer networks can yield very good results. In the future I would like to probe the model to better understand why it deems one sentence to be funnier than another, it would also be interesting to study if a model that can generate jokes can also grade how funny a joke is and vice versa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In (1) W is a learned parameter", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://pytorch.org/ 3 https://huggingface.co/ 4 https://spacy.io/ 5 https://github.com/pramodith/Humor", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "this experiment was conducted on a different GPU from that of 5.2 to accommodate longer sequence lengths.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "3rd International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2015. Neural machine translation by jointly learning to align and translate. In Yoshua Bengio and Yann LeCun, editors, 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Signature verification using a\" siamese\" time delay neural network", |
| "authors": [ |
| { |
| "first": "Jane", |
| "middle": [], |
| "last": "Bromley", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabelle", |
| "middle": [], |
| "last": "Guyon", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "S\u00e4ckinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Roopak", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "737--744", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jane Bromley, Isabelle Guyon, Yann LeCun, Eduard S\u00e4ckinger, and Roopak Shah. 1994. Signature verification using a\" siamese\" time delay neural network. In Advances in neural information processing systems, pages 737-744.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Overview of the haha task: Humor analysis based on human annotation at", |
| "authors": [ |
| { |
| "first": "Santiago", |
| "middle": [], |
| "last": "Castro", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Chiruzzo", |
| "suffix": "" |
| }, |
| { |
| "first": "Aiala", |
| "middle": [], |
| "last": "Ros\u00e1", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Santiago Castro, Luis Chiruzzo, and Aiala Ros\u00e1. Overview of the haha task: Humor analysis based on human annotation at ibereval 2018.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Overview of haha at iberlef 2019: Humor analysis based on human annotation", |
| "authors": [ |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Chiruzzo", |
| "suffix": "" |
| }, |
| { |
| "first": "Mathias", |
| "middle": [], |
| "last": "Castro", |
| "suffix": "" |
| }, |
| { |
| "first": "Diego", |
| "middle": [], |
| "last": "Etcheverry", |
| "suffix": "" |
| }, |
| { |
| "first": "Juan Jos\u00e9", |
| "middle": [], |
| "last": "Garat", |
| "suffix": "" |
| }, |
| { |
| "first": "Aiala", |
| "middle": [], |
| "last": "Prada", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ros\u00e1", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Iberian Languages Evaluation Forum", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luis Chiruzzo, S Castro, Mathias Etcheverry, Diego Garat, Juan Jos\u00e9 Prada, and Aiala Ros\u00e1. 2019. Overview of haha at iberlef 2019: Humor analysis based on human annotation. In Proceedings of the Iberian Languages Evaluation Forum (IberLEF 2019). CEUR Workshop Proceedings, CEUR-WS, Bilbao, Spain (9 2019).", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirec- tional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Delving deep into rectifiers: Surpassing humanlevel performance on imagenet classification", |
| "authors": [ |
| { |
| "first": "Kaiming", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaoqing", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the IEEE international conference on computer vision", |
| "volume": "", |
| "issue": "", |
| "pages": "1026--1034", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2015. Delving deep into rectifiers: Surpassing human- level performance on imagenet classification. In Proceedings of the IEEE international conference on computer vision, pages 1026-1034.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "president vows to cut <taxes> hair\": Dataset and analysis of creative text editing for humorous headlines", |
| "authors": [ |
| { |
| "first": "Nabil", |
| "middle": [], |
| "last": "Hossain", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Krumm", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gamon", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "133--142", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nabil Hossain, John Krumm, and Michael Gamon. 2019. \"president vows to cut <taxes> hair\": Dataset and analysis of creative text editing for humorous headlines. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 133-142, Minneapolis, Minnesota, June. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Semeval-2020 Task 7: Assessing humor in edited news headlines", |
| "authors": [ |
| { |
| "first": "Nabil", |
| "middle": [], |
| "last": "Hossain", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Krumm", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gamon", |
| "suffix": "" |
| }, |
| { |
| "first": "Henry", |
| "middle": [], |
| "last": "Kautz", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of International Workshop on Semantic Evaluation (SemEval-2020)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nabil Hossain, John Krumm, Michael Gamon, and Henry Kautz. 2020a. Semeval-2020 Task 7: Assessing humor in edited news headlines. In Proceedings of International Workshop on Semantic Evaluation (SemEval-2020), Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Stimulating creativity with funlines: A case study of humor generation in headlines", |
| "authors": [ |
| { |
| "first": "Nabil", |
| "middle": [], |
| "last": "Hossain", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Krumm", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanvir", |
| "middle": [], |
| "last": "Sajed", |
| "suffix": "" |
| }, |
| { |
| "first": "Henry", |
| "middle": [], |
| "last": "Kautz", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ACL 2020, System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nabil Hossain, John Krumm, Tanvir Sajed, and Henry Kautz. 2020b. Stimulating creativity with funlines: A case study of humor generation in headlines. In Proceedings of ACL 2020, System Demonstrations, Seattle, Washington, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Universal language model fine-tuning for text classification", |
| "authors": [ |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Howard", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In ACL. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "the 3rd International Conference for Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. cite arxiv:1412.6980Comment: Published as a conference paper at the 3rd International Conference for Learning Representations, San Diego, 2015.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Siamese neural networks for one-shot image recognition", |
| "authors": [ |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Koch", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gregory Koch. 2015. Siamese neural networks for one-shot image recognition.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Roberta: A robustly optimized BERT pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized BERT pretraining approach. CoRR, abs/1907.11692.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Effective approaches to attention-based neural machine translation", |
| "authors": [ |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1412--1421", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thang Luong, Hieu Pham, and Christopher D. Manning. 2015. Effective approaches to attention-based neural machine translation. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Pro- cessing, pages 1412-1421, Lisbon, Portugal, September. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representa- tion. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar, October. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "SemEval-2017 task 6: #HashtagWars: Learning a sense of humor", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Potash", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexey", |
| "middle": [], |
| "last": "Romanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rumshisky", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)", |
| "volume": "", |
| "issue": "", |
| "pages": "49--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Potash, Alexey Romanov, and Anna Rumshisky. 2017. SemEval-2017 task 6: #HashtagWars: Learning a sense of humor. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 49-57, Vancouver, Canada, August. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter", |
| "authors": [ |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Huggingface's transformers: State-of-theart natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "R'emi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Brew", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, R'emi Louf, Morgan Funtowicz, and Jamie Brew. 2019. Huggingface's transformers: State-of-the- art natural language processing. ArXiv, abs/1910.03771.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Russ", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5754--5764", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural information process- ing systems, pages 5754-5764.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Model architecture. In thefigure U", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Summary of symbols used.", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "num": null, |
| "text": "RMSE using different models to generate Token Embeddings.", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "content": "<table/>", |
| "num": null, |
| "text": "RMSE using different features.", |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |