| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:42:53.307749Z" |
| }, |
| "title": "Transformer-based Context-aware Sarcasm Detection in Conversation Threads from Social Media", |
| "authors": [ |
| { |
| "first": "Xiangjue", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Emory University Altanta", |
| "location": { |
| "region": "GA", |
| "country": "USA" |
| } |
| }, |
| "email": "xiangjue.dong@emory.edu" |
| }, |
| { |
| "first": "Changmao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Emory University Altanta", |
| "location": { |
| "region": "GA", |
| "country": "USA" |
| } |
| }, |
| "email": "changmao.li@emory.edu" |
| }, |
| { |
| "first": "Jinho", |
| "middle": [ |
| "D" |
| ], |
| "last": "Choi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Emory University Altanta", |
| "location": { |
| "region": "GA", |
| "country": "USA" |
| } |
| }, |
| "email": "jinho.choi@emory.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present a transformer-based sarcasm detection model that accounts for the context from the entire conversation thread for more robust predictions. Our model uses deep transformer layers to perform multi-head attentions among the target utterance and the relevant context in the thread. The context-aware models are evaluated on two datasets from social media, Twitter and Reddit, and show 3.1% and 7.0% improvements over their baselines. Our best models give the F1-scores of 79.0% and 75.0% for the Twitter and Reddit datasets respectively, becoming one of the highest performing systems among 36 participants in this shared task.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present a transformer-based sarcasm detection model that accounts for the context from the entire conversation thread for more robust predictions. Our model uses deep transformer layers to perform multi-head attentions among the target utterance and the relevant context in the thread. The context-aware models are evaluated on two datasets from social media, Twitter and Reddit, and show 3.1% and 7.0% improvements over their baselines. Our best models give the F1-scores of 79.0% and 75.0% for the Twitter and Reddit datasets respectively, becoming one of the highest performing systems among 36 participants in this shared task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Sarcasm is a form of figurative language that implies a negative sentiment while displaying a positive sentiment on the surface (Joshi et al., 2017) . Because of its conflicting nature and subtlety in language, sarcasm detection has been considered one of the most challenging tasks in natural language processing. Furthermore, when sarcasm is used in social media platforms such as Twitter or Reddit to express users' nuanced intents, the language is often full of spelling errors, acronyms, slangs, emojis, and special characters, which adds another level of difficulty in this task.", |
| "cite_spans": [ |
| { |
| "start": 128, |
| "end": 148, |
| "text": "(Joshi et al., 2017)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Despite of its challenges, sarcasm detection has recently gained substantial attention because it can bring the last gist to deep contextual understanding for various applications such as author profiling, harassment detection, and irony detection (Van Hee et al., 2018) . Many computational approaches have been proposed to detect sarcasm in conversations (Ghosh et al., 2015; Joshi et al., 2015 Joshi et al., , 2016 . However, most of the previous studies use the utterances in isolation, which makes it hard even for human to detect sarcasm without the contexts. Thus, it's essential to interpret the target utterances along with contextual information comprising textual features from the conversation thread, metadata about the conversation from external sources, or visual context (Bamman and Smith, 2015; Ghosh and Veale, 2017; Ghosh et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 248, |
| "end": 270, |
| "text": "(Van Hee et al., 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 357, |
| "end": 377, |
| "text": "(Ghosh et al., 2015;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 378, |
| "end": 396, |
| "text": "Joshi et al., 2015", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 397, |
| "end": 417, |
| "text": "Joshi et al., , 2016", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 787, |
| "end": 811, |
| "text": "(Bamman and Smith, 2015;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 812, |
| "end": 834, |
| "text": "Ghosh and Veale, 2017;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 835, |
| "end": 854, |
| "text": "Ghosh et al., 2018)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper presents a transformer-based sarcasm detection model that takes both the target utterance and its context and predicts if the target utterance involves sarcasm. Our model uses a transformer encoder to coherently generate the embedding representation for the target utterance and the context by performing multi-head attentions (Section 4). This approach is evaluated on two types of datasets collected from Twitter and Reddit (Section 3), and depicts significant improvement over the baseline using only the target utterance as input (Section 5). Our error analysis illustrates that the context-aware model can catch subtle nuance that cannot be captured by the target-oriented model (Section 6).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Just as most other types of figurative languages are, sarcasm is not necessarily complicated to express but requires comprehensive understanding in context as well as commonsense knowledge rather than its literal sense (Van Hee et al., 2018) . Various approaches have been presented for this task.", |
| "cite_spans": [ |
| { |
| "start": 219, |
| "end": 241, |
| "text": "(Van Hee et al., 2018)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Most earlier works had taken the target utterance without context as input. Both explicit and implicit incongruity features were explored in these works (Joshi et al., 2015) . To detect whether certain words in the target utterance involve sarcasm, several approaches based on distributional semantics were proposed (Ghosh et al., 2015) . Additionally, word embedding-based features like distance-weighted similarities were also adapted to capture the subtle forms of context incongruity (Joshi et al., 2016) . Nonetheless, it is difficult to detect sarcasm by considering only the target utterances in isolation.", |
| "cite_spans": [ |
| { |
| "start": 153, |
| "end": 173, |
| "text": "(Joshi et al., 2015)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 316, |
| "end": 336, |
| "text": "(Ghosh et al., 2015)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 488, |
| "end": 508, |
| "text": "(Joshi et al., 2016)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Non-textual features such as the properties of the author, audience and environment were also taken into account (Bamman and Smith, 2015) . Both the linguistic and context features were used to distinguish between information-seeking and rhetorical questions in forums and tweets (Oraby et al., 2017) . Traditional machine learning methods such as Support Vector Machines were used to model sarcasm detection as a sequential classification task over the target utterance and its surrounding utterances (Wang et al., 2015) . Recently, deep learning methods using LSTM were introduced, considering the prior turns as well as the succeeding turns (Ghosh et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 113, |
| "end": 137, |
| "text": "(Bamman and Smith, 2015)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 280, |
| "end": 300, |
| "text": "(Oraby et al., 2017)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 502, |
| "end": 521, |
| "text": "(Wang et al., 2015)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 644, |
| "end": 664, |
| "text": "(Ghosh et al., 2018)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Given a conversation thread, either from Twitter or Reddit, a target utterance is the turn to be predicted, whether or not it involves sarcasm, and the context is an ordered list of other utterances in the thread. Table 1 shows the examples of conversation threads where the target utterances involve sarcasm. 1", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 214, |
| "end": 221, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Description", |
| "sec_num": "3" |
| }, |
| { |
| "text": "This feels apt this morning but I don't feel fine ... <URL> The Twitter data is collected by using the hashtags #sarcasm and #sarcastic. The Reddit data is a subset of the Self-Annotated Reddit Corpus that consists of 1.3 million sarcastic and non-sarcastic posts (Khodak et al., 2017) . Every target utterance is annotated with one of the two labels, SARCASM and NOT_SARCASM. Table 2 shows the statistics of the two datasets provided by this shared task.", |
| "cite_spans": [ |
| { |
| "start": 264, |
| "end": 285, |
| "text": "(Khodak et al., 2017)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 377, |
| "end": 384, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Utterance C 1", |
| "sec_num": null |
| }, |
| { |
| "text": "C 2 @USER", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Utterance C 1", |
| "sec_num": null |
| }, |
| { |
| "text": "Notice the huge variances in the utterance lengths for both the Twitter and the Reddit datasets. For the Reddit dataset, the average lengths of conversations as well as utterances are significantly larger in the test set than the training set that potentially makes the model development more challenging. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Utterance C 1", |
| "sec_num": null |
| }, |
| { |
| "text": "Two types of transformer-based sarcasm detection models are used for our experiments:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "a) The target-oriented model takes only the target utterance as input (Section 4.1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "b) The context-aware model takes both the target utterance and the context utterances as input (Section 4.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "These two models are coupled with the latest transformer encoders e.g., BERT (Devlin et al., 2019) , RoBERTa (Liu et al., 2020) , and ALBERT (Lan et al., 2019) , and compared to evaluate how much impact the context makes to predict whether or not the target utterance involves sarcasm. Figure 1a shows the overview of the target-oriented model. Let W = {w 1 , . . . , w n } be the input target utterance, where w i is the i'th token in W and n is the max-number of tokens in any target utterance. W is first prepended by the special token c representing the entire target utterance, which creates the input sequence I to = {c} \u2295 W . I to is then fed into the transformer encoder, which generates the sequence of embeddings {e c } \u2295 E w , where E w = {e w 1 , . . . , e w n } is the embedding list for W and (e c , e w i ) are the embeddings of (c, w i ) respectively. Finally, e c is fed into the linear decoder to generate the output vector o to that makes the binary decision of whether or not W involves sarcasm. Figure 1b shows the overview of the context-aware model. Let L i be the i'th utterance in the context.", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 98, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 109, |
| "end": 127, |
| "text": "(Liu et al., 2020)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 141, |
| "end": 159, |
| "text": "(Lan et al., 2019)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 286, |
| "end": 295, |
| "text": "Figure 1a", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1016, |
| "end": 1025, |
| "text": "Figure 1b", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\u22ef s \u22ef v", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Target-oriented Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Then, V = L 1 \u2295 \u2022 \u2022 \u2022 \u2295 L k = {v 1 , . . . , v m }", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-aware Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "is the concatenated list of tokens in all context utterances, where k is the number of utterances in the context, v 1 is the first token in L 1 and v m is the last token in L k . The input sequence I to from Section 4.1 is appended by the special token s representing the separator between the target utterance and the context, and also V , which creates the input sequence I ca = I to \u2295 {s} \u2295 V . Then, I ca gets fed into the transformer encoder, which generates a sequence of embeddings", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-aware Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "{e c } \u2295 E w \u2295 {e s } \u2295 E v", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-aware Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": ", where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-aware Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "E v = {e v 1 , . . . , e v m }", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-aware Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "is the embedding list for V , and (e s , e v i ) are the embeddings of (s, v i ) respectively. Finally, e c is fed into the linear decoder to generate the output vector o ca that makes the same binary decision to detect sarcasm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-aware Model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For all our experiments, a mixture of the Twitter and the Reddit datasets is used. The Twitter training set provided by the shared task consists of 5,000 tweets, where the labels are equally balanced between SARCASM and NOT_SARCASM (Table 2) . We find, however, 4.82% of them are duplicates, which are removed before data splitting. As a result, 4,759 tweets are used for our experiments. Labels in the Reddit training set are also equally balanced and no duplicate is found in this dataset. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 232, |
| "end": 241, |
| "text": "(Table 2)", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Split", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Three types of transformers are used for our experiments, that are BERT-Large (Devlin et al., 2019) , RoBERTa-Large (Liu et al., 2020) , and ALBERT-xxLarge (Lan et al., 2019) , to compare the performance among the current state-of-the-art encoders. Every model is run three times and their average scores as well as standard deviations are reported. All models are trained on the combined Twitter + Reddit training set and evaluated on the combined development set (Table 3) .", |
| "cite_spans": [ |
| { |
| "start": 78, |
| "end": 99, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 116, |
| "end": 134, |
| "text": "(Liu et al., 2020)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 156, |
| "end": 174, |
| "text": "(Lan et al., 2019)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 465, |
| "end": 474, |
| "text": "(Table 3)", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "After an extensive hyper-parameter search, we set the learning rate to 3e-5, the number of epochs to 30, and use different seed values, 21, 42, 63, for the three runs. Additionally, based on the statistics of each dataset, we set the maximum sequence length to 128 for the target-oriented models while it is set to 256 for the context-aware models by considering the different lengths of the input sequences required by those approaches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The baseline scores are provided by the organizers, that are 60.0% for Reddit and 67.0% for Twitter using the single layer LSTM attention model (Ghosh et al., 2018) . Table 4 shows the results achieved by our target-oriented (Section 4.1) and the contextaware (Section 4.2) models on the combined development set. The RoBERTa-Large model gives the highest F1-scores for both the target-oriented and context-aware models. The context-aware model using RoBERTa-Large show an improvement of 1.1% over its counterpart baseline so that this model is used for our final submission to the shared task. Note that it may be possible to achieve higher performance by fine-tuning hyperparameters for the Twitter and Reddit datasets separately, which we will explore in the future.", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 164, |
| "text": "(Ghosh et al., 2018)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 167, |
| "end": 174, |
| "text": "Table 4", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "B-L 77.3 (\u00b10.6) 79.9 (\u00b10.8) 78.6 (\u00b10.1) R-L 73.4 (\u00b10.6) 88.5 (\u00b11.4) 80.2 (\u00b10.5) A-XXL 76.1 (\u00b11.4) 83.3 (\u00b12.3) 79.5 (\u00b10.2) (a) Results from the target-oriented models (Section 4.1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "P R F1", |
| "sec_num": null |
| }, |
| { |
| "text": "B .0% on the F1 scores for the Twitter and the Reddit datasets, respectively. The improvement on Reddit is particularly substantial due to the much greater lengths of the conversation threads and utterances in the test set compared to the ones in the training set (Table 2) . As the final results, we achieve 79.0% and 75.0% for the Twitter and Reddit datasets respectively that mark the 2nd places for both datasets at the time of the submission. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 264, |
| "end": 273, |
| "text": "(Table 2)", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "P R F1", |
| "sec_num": null |
| }, |
| { |
| "text": "For a better understanding in our final model, errors from the following three situations are analyzed (TO: target-oriented, CA: context-aware):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 TwCc: TO is wrong and CA is correct.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 TcCw: TO is correct and CA is wrong.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 TwCw: Both TO and CA are wrong. Table 6 shows examples for every error situation. For TwCc, TO predicts it to be NOT_SARCASM. In this example, it is difficult to tell if the target utterance involves sarcasm without having the context. For TcCw, CA predicts it to be NOT_SARCASM. It appears that the target utterance is long enough to provide enough features for TO to make the correct prediction, whereas considering the extra context may increase noise for CA to make the incorrect decision. For TwCw, both TO and CA predict it to be NOT_SARCASM. This example seems to require deeper reasoning to make the correct prediction.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 34, |
| "end": 41, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Utterance C 1 who has ever cared about y * utube r * wind .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "C 2 @USER Back when YouTube was beginning it was a cool giveback to the community to do a super polished high production value video with YT talent . Not the same now . The better move for them would be to do like 5-6 of them in several categories to give that shine .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "T @USER @USER I look forward to the eventual annual Tubies Awards livestream .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "(a) Example when TO is wrong and CA is correct.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Utterance C 1 I am asking the chairs of the House and Senate committees to investigate top secret intelligence shared with NBC prior to me seeing it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "C 2 @USER Good for you, sweetie! But using the legislative branch of the US Government to fix your media grudges seems a bit much.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "T @USER @USER @USER you look triggered after someone criticizes me, are conservatives skeptic of ppl in power?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "(b) Example when TO is correct and CA is wrong.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "If I could start my #Brand over, this is what I would emulate my #Site to look like .. And I might, once my anual contract with #WordPress is up . Even tho I don't think is very; I can't help but to find ... <URL> <URL> C 2 @USER There is no design on it except for links ? T @USER It's the of what #Works in this current #Mindset of #MassConsumption; wannabe fast due to caused by, and being just another and. is the light, bringing color back to this sad world of and.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C 1", |
| "sec_num": null |
| }, |
| { |
| "text": "(c) Example when both TO and CA are wrong. Table 6 : Examples of the three error situations. C i : i'th utterance in the context, T: the target utterance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 43, |
| "end": 50, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "C 1", |
| "sec_num": null |
| }, |
| { |
| "text": "This paper explores the benefit of considering relevant contexts for the task of sarcasm detection. Three types of state-of-the-art transformer encoders are adapted to establish the strong baseline for the target-oriented models, which are compared to the context-aware models that show significant improvements for both Twitter and Reddit datasets and become one of the highest performing models in this shared task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Note that the target utterance can appear at any position of the context although its exact position is not provided in this year's shared task data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://competitions.codalab.org/ competitions/22247", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We gratefully acknowledge the support of the AWS Machine Learning Research Awards (MLRA). Any contents in this material are those of the authors and do not necessarily reflect the views of AWS.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "All our resources are publicly available at Emory NLP's open source repository", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "All our resources are publicly available at Emory NLP's open source repository: https://github. com/emorynlp/figlang-shared-task-2020", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Contextualized Sarcasm Detection on Twitter", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Bamman", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International AAAI Conference on Web and Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "574--577", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Bamman and Noah Smith. 2015. Contextual- ized Sarcasm Detection on Twitter. In International AAAI Conference on Web and Social Media, pages 574-577.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Magnets for Sarcasm: Making Sarcasm Detection Timely, Contextual and Very Personal", |
| "authors": [ |
| { |
| "first": "Aniruddha", |
| "middle": [], |
| "last": "Ghosh", |
| "suffix": "" |
| }, |
| { |
| "first": "Tony", |
| "middle": [], |
| "last": "Veale", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "482--491", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1050" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aniruddha Ghosh and Tony Veale. 2017. Magnets for Sarcasm: Making Sarcasm Detection Timely, Con- textual and Very Personal. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 482-491, Copenhagen, Denmark. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "The Role of Conversation Context for Sarcasm Detection in Online Interactions", |
| "authors": [ |
| { |
| "first": "Debanjan", |
| "middle": [], |
| "last": "Ghosh", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "Richard" |
| ], |
| "last": "Fabbri", |
| "suffix": "" |
| }, |
| { |
| "first": "Smaranda", |
| "middle": [], |
| "last": "Muresan", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "186--196", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Debanjan Ghosh, Alexander Richard Fabbri, and Smaranda Muresan. 2017. The Role of Conversa- tion Context for Sarcasm Detection in Online Inter- actions. Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue, pages 186- 196.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Sarcasm Analysis using Conversation Context", |
| "authors": [ |
| { |
| "first": "Debanjan", |
| "middle": [], |
| "last": "Ghosh", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "Richard" |
| ], |
| "last": "Fabbri", |
| "suffix": "" |
| }, |
| { |
| "first": "Smaranda", |
| "middle": [], |
| "last": "Muresan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Comput. Linguist", |
| "volume": "44", |
| "issue": "4", |
| "pages": "755--792", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/coli_a_00336" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Debanjan Ghosh, Alexander Richard Fabbri, and Smaranda Muresan. 2018. Sarcasm Analysis us- ing Conversation Context. Comput. Linguist., 44(4):755-792.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Sarcastic or Not: Word Embeddings to Predict the Literal or Sarcastic Meaning of Words", |
| "authors": [ |
| { |
| "first": "Debanjan", |
| "middle": [], |
| "last": "Ghosh", |
| "suffix": "" |
| }, |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Smaranda", |
| "middle": [], |
| "last": "Muresan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1003--1012", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D15-1116" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Debanjan Ghosh, Weiwei Guo, and Smaranda Mure- san. 2015. Sarcastic or Not: Word Embeddings to Predict the Literal or Sarcastic Meaning of Words. In Proceedings of the 2015 Conference on Empiri- cal Methods in Natural Language Processing, pages 1003-1012, Lisbon, Portugal. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Automatic Sarcasm Detection: A Survey", |
| "authors": [ |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pushpak", |
| "middle": [], |
| "last": "Bhattacharyya", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [ |
| "J" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ACM Computing Surveys", |
| "volume": "50", |
| "issue": "5", |
| "pages": "1--22", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3124420" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aditya Joshi, Pushpak Bhattacharyya, and Mark J. Car- man. 2017. Automatic Sarcasm Detection: A Sur- vey. ACM Computing Surveys, 50(5):1-22.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Harnessing Context Incongruity for Sarcasm Detection", |
| "authors": [ |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Vinita", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Pushpak", |
| "middle": [], |
| "last": "Bhattacharyya", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "757--762", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P15-2124" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aditya Joshi, Vinita Sharma, and Pushpak Bhat- tacharyya. 2015. Harnessing Context Incongruity for Sarcasm Detection. In Proceedings of the 53rd Annual Meeting of the Association for Computa- tional Linguistics and the 7th International Joint Conference on Natural Language Processing (Vol- ume 2: Short Papers), pages 757-762, Beijing, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Are Word Embedding-based Features Useful for Sarcasm Detection?", |
| "authors": [ |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Vaibhav", |
| "middle": [], |
| "last": "Tripathi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Patel", |
| "suffix": "" |
| }, |
| { |
| "first": "Pushpak", |
| "middle": [], |
| "last": "Bhattacharyya", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Carman", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1006--1011", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1104" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aditya Joshi, Vaibhav Tripathi, Kevin Patel, Pushpak Bhattacharyya, and Mark Carman. 2016. Are Word Embedding-based Features Useful for Sarcasm De- tection? In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Process- ing, pages 1006-1011, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A Large Self-Annotated Corpus for Sarcasm", |
| "authors": [ |
| { |
| "first": "Mikhail", |
| "middle": [], |
| "last": "Khodak", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikunj", |
| "middle": [], |
| "last": "Saunshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kiran", |
| "middle": [], |
| "last": "Vodrahalli", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikhail Khodak, Nikunj Saunshi, and Kiran Vodrahalli. 2017. A Large Self-Annotated Corpus for Sarcasm. Proceedings of the Eleventh International Confer- ence on Language Resources and Evaluation (LREC 2018), abs/1704.05579.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", |
| "authors": [ |
| { |
| "first": "Zhenzhong", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingda", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| } |
| ], |
| "year": 1909, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. ALBERT: A Lite BERT for Self-supervised Learning of Language Representations. arXiv, 11942(1909).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2020. RoBERTa: A Robustly Optimized BERT Pretrain- ing Approach. In Proceedings of the International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Are you serious?: Rhetorical Questions and Sarcasm in Social Media Dialog", |
| "authors": [ |
| { |
| "first": "Shereen", |
| "middle": [], |
| "last": "Oraby", |
| "suffix": "" |
| }, |
| { |
| "first": "Vrindavan", |
| "middle": [], |
| "last": "Harrison", |
| "suffix": "" |
| }, |
| { |
| "first": "Amita", |
| "middle": [], |
| "last": "Misra", |
| "suffix": "" |
| }, |
| { |
| "first": "Ellen", |
| "middle": [], |
| "last": "Riloff", |
| "suffix": "" |
| }, |
| { |
| "first": "Marilyn", |
| "middle": [], |
| "last": "Walker", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "310--319", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-5537" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shereen Oraby, Vrindavan Harrison, Amita Misra, Ellen Riloff, and Marilyn Walker. 2017. Are you serious?: Rhetorical Questions and Sarcasm in So- cial Media Dialog. In Proceedings of the 18th An- nual SIGdial Meeting on Discourse and Dialogue, pages 310-319, Saarbr\u00fccken, Germany. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "SemEval-2018 Task 3: Irony Detection in English Tweets", |
| "authors": [ |
| { |
| "first": "Cynthia", |
| "middle": [], |
| "last": "Van Hee", |
| "suffix": "" |
| }, |
| { |
| "first": "Els", |
| "middle": [], |
| "last": "Lefever", |
| "suffix": "" |
| }, |
| { |
| "first": "V\u00e9ronique", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of The 12th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "39--50", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S18-1005" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cynthia Van Hee, Els Lefever, and V\u00e9ronique Hoste. 2018. SemEval-2018 Task 3: Irony Detection in En- glish Tweets. In Proceedings of The 12th Interna- tional Workshop on Semantic Evaluation, pages 39- 50, New Orleans, Louisiana. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Twitter Sarcasm Detection Exploiting a Context-Based Model", |
| "authors": [ |
| { |
| "first": "Zelin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhijian", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruimin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yafeng", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "WISE", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zelin Wang, Zhijian Wu, Ruimin Wang, and Yafeng Ren. 2015. Twitter Sarcasm Detection Exploiting a Context-Based Model. In WISE.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "text": "The overview of our transformer-based target-oriented and context-aware models.", |
| "uris": null |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "text": "Examples of the conversation threads where the target utterances involve sarcasm. C", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "text": "Statistics of the two datasets provided by the shared task. TRN: training set, TST: test set, NC: # of conversations, AU: Avg # of utterances per conversation (including the target utterances) and its stdev, AT: Avg # of tokens per utterance and its stdev.", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF6": { |
| "content": "<table/>", |
| "text": "", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF8": { |
| "content": "<table><tr><td>: Results on the combined Twitter+Reddit devel-</td></tr><tr><td>opment set. B-L: BERT-Large, R-L: RoBERTa-Large,</td></tr><tr><td>A-XXL: ALBERT-xxLarge.</td></tr></table>", |
| "text": "", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF9": { |
| "content": "<table><tr><td>shows the results by the RoBERTa-Large</td></tr><tr><td>models on the test sets. The scores are retrieved by</td></tr><tr><td>submitting the system outputs to the shared task's</td></tr><tr><td>CodaLab page. 2 The context-aware models sig-</td></tr><tr><td>nificantly outperform the target-oriented models</td></tr><tr><td>on the test sets, showing improvements of 3.1%</td></tr><tr><td>and 7</td></tr></table>", |
| "text": "", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF11": { |
| "content": "<table/>", |
| "text": "Results on the test sets from CodaLab.", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |