| { |
| "paper_id": "S19-1019", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:46:29.953276Z" |
| }, |
| "title": "Acquiring Structured Temporal Representation via Crowdsourcing: A Feasibility Study", |
| "authors": [ |
| { |
| "first": "Yuchen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Brandeis University", |
| "location": {} |
| }, |
| "email": "yuchenz@brandeis.edu" |
| }, |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Brandeis University", |
| "location": {} |
| }, |
| "email": "xuen@brandeis.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Temporal Dependency Trees are a structured temporal representation that represents temporal relations among time expressions and events in a text as a dependency tree structure. Compared to traditional pair-wise temporal relation representations, temporal dependency trees facilitate efficient annotations, higher inter-annotator agreement, and efficient computations. However, annotations on temporal dependency trees so far have only been done by expert annotators, which is costly and time-consuming. In this paper, we introduce a method to crowdsource temporal dependency tree annotations, and show that this representation is intuitive and can be collected with high accuracy and agreement through crowdsourcing. We produce a corpus of temporal dependency trees, and present a baseline temporal dependency parser, trained and evaluated on this new corpus.", |
| "pdf_parse": { |
| "paper_id": "S19-1019", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Temporal Dependency Trees are a structured temporal representation that represents temporal relations among time expressions and events in a text as a dependency tree structure. Compared to traditional pair-wise temporal relation representations, temporal dependency trees facilitate efficient annotations, higher inter-annotator agreement, and efficient computations. However, annotations on temporal dependency trees so far have only been done by expert annotators, which is costly and time-consuming. In this paper, we introduce a method to crowdsource temporal dependency tree annotations, and show that this representation is intuitive and can be collected with high accuracy and agreement through crowdsourcing. We produce a corpus of temporal dependency trees, and present a baseline temporal dependency parser, trained and evaluated on this new corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Temporal relation extraction is an important NLP task for a range of downstream applications, such as question answering, summarization, and storyline generation. This task has attracted a significant amount of research interest (Pustejovsky et al., 2003a; Verhagen et al., 2007 Verhagen et al., , 2010 Uz-Zaman et al., 2012; Bethard et al., 2016 Dligach et al., 2017; Leeuwenberg and Moens, 2017; Ning et al., 2017 Ning et al., , 2018a Zhang and Xue, 2018a,b) . One practical challenge in temporal relation extraction is to represent the temporal relations in a text in a way that is feasible for manual annotation and producing training data for machine learning models. Given a text of n events and time expressions, there are n 2 possible relations if the temporal relation between all pairs of events and time expressions is annotated. This quickly becomes infeasible even for a text of modest length. One way to address this problem is to represent the temporal relations in a text as a Temporal Dependency Tree (TDT) structure (Zhang and Xue, 2018b) . TDT models all time expressions and events in a text as \"nodes\" in a dependency tree, and temporal relations between each time/event and its parent time/event as \"edges\" in the tree. Figure 1 gives an example text and its TDT. Each (parent, child) pair in Figure 1 is annotated with a temporal relation. The number of temporal relations that need to be annotated in a text is therefore linear to the number of events and time expressions in a text, making the annotation task feasible. At the same time, additional temporal relations can be inferred as needed based on the TDT structure. For example, in Figure 1 since \"1918\" includes the \"born\" event and \"1929\" includes the \"won\" event, it can be inferred that the \"born\" event occurred before the \"won\" event.", |
| "cite_spans": [ |
| { |
| "start": 229, |
| "end": 256, |
| "text": "(Pustejovsky et al., 2003a;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 257, |
| "end": 278, |
| "text": "Verhagen et al., 2007", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 279, |
| "end": 302, |
| "text": "Verhagen et al., , 2010", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 303, |
| "end": 325, |
| "text": "Uz-Zaman et al., 2012;", |
| "ref_id": null |
| }, |
| { |
| "start": 326, |
| "end": 346, |
| "text": "Bethard et al., 2016", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 347, |
| "end": 368, |
| "text": "Dligach et al., 2017;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 369, |
| "end": 397, |
| "text": "Leeuwenberg and Moens, 2017;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 398, |
| "end": 415, |
| "text": "Ning et al., 2017", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 416, |
| "end": 436, |
| "text": "Ning et al., , 2018a", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 437, |
| "end": 460, |
| "text": "Zhang and Xue, 2018a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 1034, |
| "end": 1056, |
| "text": "(Zhang and Xue, 2018b)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1242, |
| "end": 1250, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1315, |
| "end": 1323, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1663, |
| "end": 1671, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "By providing annotators with detailed guidelines and training them in multiple iterations, Zhang and Xue (2018b) have shown that the TDT representation can be annotated with high interannotator agreement. Zhang and Xue (2018a) further show that a neural ranking model can be successfully trained on the corpus. However, this \"traditional\" approach to annotation is timeconsuming and expensive. The question we want to answer in this paper is whether TDT can be performed with crowdsourcing, a method that has gained popularity as a means to acquire linguistically annotated data quickly and cost-effectively for NLP research.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 112, |
| "text": "Zhang and Xue (2018b)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 205, |
| "end": 226, |
| "text": "Zhang and Xue (2018a)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Crowdsourcing has been used to annotate data for a wide range of NLP tasks that include question answering, word similarity, text entailment, word sense disambiguation, machine translation, information extraction, summarization, and semantic role labeling (Snow et al., 2008; Finin et al., 2010; Zaidan and Callison-Burch, 2011; Lloret et al., 2013; Rajpurkar et al., 2018) . The key to acquiring high quality data via crowdsourcing is to make sure that the tasks are intuitive or can be decomposed into intuitive subtasks. In this paper, we show that it is possible to acquire high quality temporal dependency structures through crowdsourcing, and that a temporal dependency parser can be successfully trained on crowdsourced TDTs.", |
| "cite_spans": [ |
| { |
| "start": 256, |
| "end": 275, |
| "text": "(Snow et al., 2008;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 276, |
| "end": 295, |
| "text": "Finin et al., 2010;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 296, |
| "end": 328, |
| "text": "Zaidan and Callison-Burch, 2011;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 329, |
| "end": 349, |
| "text": "Lloret et al., 2013;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 350, |
| "end": 373, |
| "text": "Rajpurkar et al., 2018)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of the paper is organized as follows. We first explain in detail how we set up this dependency tree crowdsourcing annotation task ( \u00a72). In ( \u00a73) we present experimental results that show that if temporal dependency structures are broken into smaller subtasks, high inter-annotator agreement can be achieved. In ( \u00a74), we show that crowdsource data can be used to successfully train temporal dependency parsers, including an attentionbased neural model ( \u00a74). We discuss related work ( \u00a75) and conclude with future work ( \u00a76).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The main contributions of this paper are: (1) we introduce an effective approach to crowdsource structured temporal annotations, a relatively complex annotation task; (2) we build an English temporal dependency tree corpus through crowdsourcing that we plan to make publicly available; and (3) we experiment with automatic temporal dependency parsers on this new corpus and report competitive results. Our TDT annotations are performed on top of the TimeBank corpus (Pustejovsky et al., 2003b) , with time expressions and events already extracted. Following (Zhang and Xue, 2018b) , we focus only on events that are matrix verbs (i.e. main verbs) in a sentence. In order to extract matrix verbs, we use the gold constituent trees for the part of TimeBank that overlaps with the Penn Treebank, and parse the rest of TimeBank with the Berkeley Neural Parser (Kitaev and Klein, 2018) . All time expressions in TimeBank are kept.", |
| "cite_spans": [ |
| { |
| "start": 466, |
| "end": 493, |
| "text": "(Pustejovsky et al., 2003b)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 558, |
| "end": 580, |
| "text": "(Zhang and Xue, 2018b)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 856, |
| "end": 880, |
| "text": "(Kitaev and Klein, 2018)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To facilitate quality control in crowdsourcing and agreement evaluation, we distinguish two subsets of the TimeBank dataset: (1) TB-small is a small subset of 10 short Wall Street Journal news documents with 59 matrix verbs. (2) TBdense consists of the same 36 documents as in the TimeBank-Dense corpus (Cassidy et al., 2014) . It contains 654 matrix verbs. TB-small and TBdense are annotated by both crowd workers and experts.", |
| "cite_spans": [ |
| { |
| "start": 303, |
| "end": 325, |
| "text": "(Cassidy et al., 2014)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We set up two annotation tasks. The first is full temporal dependency tree annotation, where crowd workers need to annotate both the dependency tree structure and the temporal relations between each parent and child. The second is relation-only annotation, where crowd workers are given the gold temporal dependency trees and their job is just to label the temporal relation for each parent-child pair.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Tasks", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "For the full temporal dependency tree annotation, in order to simplify the questions/instructions to crowd workers, we split the task of annotating a full dependency tree into (1) finding the \"parent\" for each individual event, and then (2) deciding the temporal relation between the \"parent\" and the event. A crowd worker is given a text with a highlighted target event and a list of candidate parent time expressions and events. The job of the crowd worker is to select one parent from the given list of candidates, and label the temporal relation between the parent and the target event. For relationonly annotation, a crowd worker is presented a text with the target event and its parent highlighted. The job of the worker is to decide the temporal relation between the two. See Appendix A for example crowdsourcing instructions and questions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Crowdsourcing Design", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Following standard crowdsourcing quality control, we perform a qualifying test on both annotation tasks. Any crowd worker who wants to work on these tasks needs to complete annotations on TB-small and reach at least 70% accuracy against the expert gold annotation. We also perform a surviving test on the relation-only annotation task. Crowd workers have to maintain at least a cumulative accuracy of 70% for their annotation. Workers with a lower accuracy will get blocked from the task and all of their annotations will be discarded. Every annotation is completed by at least 3 annotators and the majority vote is the final annotation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Crowdsourcing Design", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Crowdsourcing annotations on the full TimeBank corpus was performed. We report Inter-Annotator Agreement (IAA) scores in Table 1 First, crowdsourced majority annotations on TB-dense are evaluated against expert annotations, representing the quality of the crowdsourced data. For this comparison, the standard dependency parsing evaluation metrics (K\u00fcbler et al., 2009) are used as our IAA scores: structureonly annotation subtask is evaluated with the Unlabeled Attachment Agreement (UAA) score, relation-only annotation subtask is evaluated with the Label Only Agreement (LOA) score, and full pipeline annotation is evaluated with the Labeled Attachment Agreement (LAA) score.", |
| "cite_spans": [ |
| { |
| "start": 347, |
| "end": 368, |
| "text": "(K\u00fcbler et al., 2009)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 121, |
| "end": 128, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Second, crowd worker annotations are compared against each other, indicating the difficulty, consistency, and confidence of the crowdsourced data. Since crowd workers annotate isolated events/times instead of full dependency structures, the standard dependency parsing metrics are not applicable for this comparison 1 . Therefore, we adopt the Worker Agreements With Aggregate (WAWA) metric (Ning et al., 2018a) as our IAA scores. WAWA indicates the average number of crowd worker responses agreed with the aggregate answer (i.e. majority aggregation for each annotation instance), representing the agreements among crowd workers and how consistent their annotations are with each other.", |
| "cite_spans": [ |
| { |
| "start": 391, |
| "end": 411, |
| "text": "(Ning et al., 2018a)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As shown in the table, high accuracies and agreements are achieved for both the subtasks of structure annotation and relation-only annotation (above 80%).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Statistics on our corpus and other similar TimeBank-based temporal relation corpora are presented in Table 2 . As the number of temporal relations is linear to the number of events and time expressions in a text, fewer temporal relations need to be annotated in our corpus. In comparison, the recently crowdsourced temporal structure corpus MATRES (Ning et al. (2018a) , see Section 5 for more details) only annotates verb events in a document while TB-dense annotates a larger number of time expressions and events in a much smaller number of documents. Our corpus retains the full set of TimeBank time expressions and covers comparable number of events as MATRES. We pay $0.01 for each individual annotation and the entire TimeBank TDT annotation cost about $300 in total. Rels TimeBank 183 1,414 7,935 6,418 TB-Dense 36 289 1,729 12,715 MATRES 275 -1,790 13,577 This work 183 1,414 2,691 4,105 Table 2 : Documents, timex, events, and temporal relation statistics in various temporal corpora.", |
| "cite_spans": [ |
| { |
| "start": 348, |
| "end": 368, |
| "text": "(Ning et al. (2018a)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 101, |
| "end": 108, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 775, |
| "end": 918, |
| "text": "Rels TimeBank 183 1,414 7,935 6,418 TB-Dense 36 289 1,729 12,715 MATRES 275 -1,790 13,577 This work 183 1,414 2,691 4,105 Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We experiment with a state-of-the-art attentionbased neural temporal dependency parser (Zhang and Xue, 2018a) 2 on our newly annotated data. Our training data consists of two parts. The first part is the crowdsourced temporal dependency annotations over the TimeBank documents (excluding documents that are in the dev and test sets in the TimeBank-Dense corpus 3 ). The second part is our expert-annotated TDTs on the TimeBank-Dense training set documents. The parser is tuned and evaluated on our expert TDT annotations on the TimeBank-Dense dev and test sets, respectively. This neural model represents words with bi-LSTM vectors and uses an attention-based mechanism to represent multi-word time expressions and events.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We also experiment with two baseline parsers from Zhang and Xue (2018a): (1) a simple baseline that takes an event's immediate previous time expression or event as its parent and assigns the majority \"overlap\" as the temporal relation between them; and (2) a logistic regression model that represents time expressions and events with their time/event type features, lexical features, and distance features. Table 3 shows the performance of these systems on our data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 407, |
| "end": 414, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "System Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "-only F Table 3 : Parsing results of the simple baseline, logistic regression baseline, and the neural temporal dependency model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 8, |
| "end": 15, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Structure", |
| "sec_num": null |
| }, |
| { |
| "text": "Improved performance over the simple baseline with both the LogReg system and the Neural system show that temporal dependency information can be learned from this crowdsourced corpus. Comparisons between the LogReg baseline and the Neural model show that the Neural model adapts better to new data sets than the LogReg model with manually-crafted languagespecific features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure", |
| "sec_num": null |
| }, |
| { |
| "text": "Although crowdsourcing is widely used in other NLP tasks, there have been only a few temporal relation annotation tasks via crowdsourcing. The first attempt on crowdsourcing temporal relation annotations is described in Snow et al. (2008) . They selected a restricted subset of verb events from TimeBank and performed strict before/after temporal relation annotation through crowdsourcing. They reported high agreements showing that simple temporal relations are crowdsourceable. Ng and Kan (2012) adopts the TimeML representation from the TimeBank, and crowdsourced temporal annotations on news articles crawled from news websites. Their experiments show that the large crowdsourced data improved classifier performance significantly. However, both of these works focused on pair-wise temporal relations and didn't experiment with crowdsourcing more complex temporal structures. Vempala and Blanco (2018) uses a crowdsourcing approach to collect temporal and spatial knowledge. However, they first automatically generated such knowledge and then used crowdsourcing to either validate or discard these automatically generated information, and crowdsourcing was not utilized to do annotation from scratch. Ning et al. (2018a) proposed a \"multi-axis\" representation of temporal relations in a text, and published the MATRES corpus by annotating \"multiaxis\" temporal structures on top of the TempEval-3 data through crowdsourcing. In this representation, events are annotated on different \"axes\" according to their eventuality types, and for events on the same axis, pair-wise temporal relations are annotated. Their annotation task is broken down to two smaller subtasks too. In the first subtask, crowd workers annotate whether an event is on a given axis. In the second subtask, crowd workers annotate the temporal relations between pairs of events on the same axis. The main differences between their work and ours are as follows. First, they only model events, excluding time expressions which are important temporal components in text too. Second, our temporal dependency tree representation is very different from their multiaxis temporal representation, which requires different crowdsourcing task designs. In their first subtask, crowd workers need to distinguish different eventuality types, while our annotation experiments show that crowd workers can also consistently recognize \"parents\" as defined in Zhang and Xue (2018b) for given events.", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 238, |
| "text": "Snow et al. (2008)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 480, |
| "end": 497, |
| "text": "Ng and Kan (2012)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 880, |
| "end": 905, |
| "text": "Vempala and Blanco (2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1205, |
| "end": 1224, |
| "text": "Ning et al. (2018a)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this paper, we introduce a crowdsourcing approach for acquiring annotations on a relatively complex NLP concept -temporal dependency structures. We build the first English temporal dependency tree corpus through high quality crowdsourcing. Our system experiments show that competitive temporal dependency parsers can be trained on our newly collected data. In future work, we plan to crowdsource more TDT data across different domains. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "And for the same reason, Cohen's kappa and Fleiss' kappa scores are not applicable here either.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/yuchenz/tdp_ ranking 3 Standard TimeBank-Dense train/dev/test split can be found inCassidy et al. (2014).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "A Appendix: Example Crowdsourcing Instructions and Questions ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Semeval-2016 task 12: Clinical tempeval", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "Guergana", |
| "middle": [], |
| "last": "Savova", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Te", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Verhagen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)", |
| "volume": "", |
| "issue": "", |
| "pages": "1052--1062", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Bethard, Guergana Savova, Wei-Te Chen, Leon Derczynski, James Pustejovsky, and Marc Verhagen. 2016. Semeval-2016 task 12: Clinical tempeval. In Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016), pages 1052- 1062.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Semeval-2017 task 12: Clinical tempeval", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "Guergana", |
| "middle": [], |
| "last": "Savova", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)", |
| "volume": "", |
| "issue": "", |
| "pages": "565--572", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Bethard, Guergana Savova, Martha Palmer, and James Pustejovsky. 2017. Semeval-2017 task 12: Clinical tempeval. In Proceedings of the 11th International Workshop on Semantic Evalua- tion (SemEval-2017), pages 565-572.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "An annotation framework for dense event ordering", |
| "authors": [ |
| { |
| "first": "Taylor", |
| "middle": [], |
| "last": "Cassidy", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Mcdowell", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathanael", |
| "middle": [], |
| "last": "Chambers", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "501--506", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taylor Cassidy, Bill McDowell, Nathanael Chambers, and Steven Bethard. 2014. An annotation frame- work for dense event ordering. In Proceedings of the 52nd Annual Meeting of the Association for Compu- tational Linguistics (Volume 2: Short Papers), vol- ume 2, pages 501-506.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Neural temporal relation extraction", |
| "authors": [ |
| { |
| "first": "Dmitriy", |
| "middle": [], |
| "last": "Dligach", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "Guergana", |
| "middle": [], |
| "last": "Savova", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "746--751", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dmitriy Dligach, Timothy Miller, Chen Lin, Steven Bethard, and Guergana Savova. 2017. Neural tem- poral relation extraction. In Proceedings of the 15th Conference of the European Chapter of the Associa- tion for Computational Linguistics: Volume 2, Short Papers, volume 2, pages 746-751.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Annotating named entities in twitter data with crowdsourcing", |
| "authors": [ |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Finin", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Murnane", |
| "suffix": "" |
| }, |
| { |
| "first": "Anand", |
| "middle": [], |
| "last": "Karandikar", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicholas", |
| "middle": [], |
| "last": "Keller", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Martineau", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the NAACL HLT 2010 Workshop on Creating Speech and Language Data with Amazon's Mechanical Turk", |
| "volume": "", |
| "issue": "", |
| "pages": "80--88", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tim Finin, Will Murnane, Anand Karandikar, Nicholas Keller, Justin Martineau, and Mark Dredze. 2010. Annotating named entities in twitter data with crowdsourcing. In Proceedings of the NAACL HLT 2010 Workshop on Creating Speech and Language Data with Amazon's Mechanical Turk, pages 80-88. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Constituency parsing with a self-attentive encoder", |
| "authors": [ |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Kitaev", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikita Kitaev and Dan Klein. 2018. Constituency parsing with a self-attentive encoder. In Proceed- ings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), Melbourne, Australia. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Dependency parsing", |
| "authors": [ |
| { |
| "first": "Sandra", |
| "middle": [], |
| "last": "K\u00fcbler", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Synthesis Lectures on Human Language Technologies", |
| "volume": "1", |
| "issue": "1", |
| "pages": "1--127", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sandra K\u00fcbler, Ryan McDonald, and Joakim Nivre. 2009. Dependency parsing. Synthesis Lectures on Human Language Technologies, 1(1):1-127.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Structured learning for temporal relation extraction from clinical records", |
| "authors": [ |
| { |
| "first": "Tuur", |
| "middle": [], |
| "last": "Leeuwenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Francine", |
| "middle": [], |
| "last": "Moens", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1150--1158", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tuur Leeuwenberg and Marie-Francine Moens. 2017. Structured learning for temporal relation extraction from clinical records. In Proceedings of the 15th Conference of the European Chapter of the Asso- ciation for Computational Linguistics, pages 1150- 1158.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Analyzing the capabilities of crowdsourcing services for text summarization. Language resources and evaluation", |
| "authors": [ |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Lloret", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Plaza", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmet", |
| "middle": [], |
| "last": "Aker", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "47", |
| "issue": "", |
| "pages": "337--369", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elena Lloret, Laura Plaza, and Ahmet Aker. 2013. An- alyzing the capabilities of crowdsourcing services for text summarization. Language resources and evaluation, 47(2):337-369.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Improved temporal relation classification using dependency parses and selective crowdsourced annotations", |
| "authors": [ |
| { |
| "first": "Jun-Ping", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Min-Yen", |
| "middle": [], |
| "last": "Kan", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of COLING 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "2109--2124", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jun-Ping Ng and Min-Yen Kan. 2012. Improved tem- poral relation classification using dependency parses and selective crowdsourced annotations. Proceed- ings of COLING 2012, pages 2109-2124.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A structured learning approach to temporal relation extraction", |
| "authors": [ |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Ning", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhili", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1027--1037", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qiang Ning, Zhili Feng, and Dan Roth. 2017. A struc- tured learning approach to temporal relation extrac- tion. In Proceedings of the 2017 Conference on Em- pirical Methods in Natural Language Processing, pages 1027-1037.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A multiaxis annotation scheme for event temporal relations", |
| "authors": [ |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Ning", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1804.07828" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qiang Ning, Hao Wu, and Dan Roth. 2018a. A multi- axis annotation scheme for event temporal relations. arXiv preprint arXiv:1804.07828.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Exploiting partially annotated data in temporal relation extraction", |
| "authors": [ |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Ning", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongzhi", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chuchu", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics", |
| "volume": "", |
| "issue": "", |
| "pages": "148--153", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qiang Ning, Zhongzhi Yu, Chuchu Fan, and Dan Roth. 2018b. Exploiting partially annotated data in tempo- ral relation extraction. In Proceedings of the Seventh Joint Conference on Lexical and Computational Se- mantics, pages 148-153.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Timeml: Robust specification of event and temporal expressions in text", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Jos\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Castano", |
| "suffix": "" |
| }, |
| { |
| "first": "Roser", |
| "middle": [], |
| "last": "Ingria", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sauri", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Robert", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Setzer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Katz", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dragomir R Radev", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "New directions in question answering", |
| "volume": "3", |
| "issue": "", |
| "pages": "28--34", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Pustejovsky, Jos\u00e9 M Castano, Robert Ingria, Roser Sauri, Robert J Gaizauskas, Andrea Set- zer, Graham Katz, and Dragomir R Radev. 2003a. Timeml: Robust specification of event and tempo- ral expressions in text. New directions in question answering, 3:28-34.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "The timebank corpus", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Hanks", |
| "suffix": "" |
| }, |
| { |
| "first": "Roser", |
| "middle": [], |
| "last": "Sauri", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "See", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Setzer", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [], |
| "last": "Radev", |
| "suffix": "" |
| }, |
| { |
| "first": "Beth", |
| "middle": [], |
| "last": "Sundheim", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Day", |
| "suffix": "" |
| }, |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Ferro", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Corpus linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Pustejovsky, Patrick Hanks, Roser Sauri, An- drew See, Robert Gaizauskas, Andrea Setzer, Dragomir Radev, Beth Sundheim, David Day, Lisa Ferro, et al. 2003b. The timebank corpus. In Corpus linguistics, volume 2003, page 40. Lancaster, UK.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Know what you don't know: Unanswerable questions for squad", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1806.03822" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Rajpurkar, Robin Jia, and Percy Liang. 2018. Know what you don't know: Unanswerable ques- tions for squad. arXiv preprint arXiv:1806.03822.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Cheap and fast-but is it good?: evaluating non-expert annotations for natural language tasks", |
| "authors": [ |
| { |
| "first": "Rion", |
| "middle": [], |
| "last": "Snow", |
| "suffix": "" |
| }, |
| { |
| "first": "O'", |
| "middle": [], |
| "last": "Brendan", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Connor", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew Y", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the conference on empirical methods in natural language processing", |
| "volume": "", |
| "issue": "", |
| "pages": "254--263", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rion Snow, Brendan O'Connor, Daniel Jurafsky, and Andrew Y Ng. 2008. Cheap and fast-but is it good?: evaluating non-expert annotations for natu- ral language tasks. In Proceedings of the conference on empirical methods in natural language process- ing, pages 254-263. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Tempeval-3: Evaluating events, time expressions, and temporal relations", |
| "authors": [ |
| { |
| "first": "Naushad", |
| "middle": [], |
| "last": "Uzzaman", |
| "suffix": "" |
| }, |
| { |
| "first": "Hector", |
| "middle": [], |
| "last": "Llorens", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Allen", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Verhagen", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1206.5333" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naushad UzZaman, Hector Llorens, James Allen, Leon Derczynski, Marc Verhagen, and James Pustejovsky. 2012. Tempeval-3: Evaluating events, time ex- pressions, and temporal relations. arXiv preprint arXiv:1206.5333.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Annotating temporally-anchored spatial knowledge by leveraging syntactic dependencies", |
| "authors": [ |
| { |
| "first": "Alakananda", |
| "middle": [], |
| "last": "Vempala", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduardo", |
| "middle": [], |
| "last": "Blanco", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alakananda Vempala and Eduardo Blanco. 2018. An- notating temporally-anchored spatial knowledge by leveraging syntactic dependencies. In Proceedings of the Eleventh International Conference on Lan- guage Resources and Evaluation (LREC-2018).", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Semeval-2007 task 15: Tempeval temporal relation identification", |
| "authors": [ |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Verhagen", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Schilder", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Hepple", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Katz", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 4th International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "75--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc Verhagen, Robert Gaizauskas, Frank Schilder, Mark Hepple, Graham Katz, and James Pustejovsky. 2007. Semeval-2007 task 15: Tempeval temporal relation identification. In Proceedings of the 4th International Workshop on Semantic Evaluations, pages 75-80. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Semeval-2010 task 13: Tempeval-2", |
| "authors": [ |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Verhagen", |
| "suffix": "" |
| }, |
| { |
| "first": "Roser", |
| "middle": [], |
| "last": "Sauri", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommaso", |
| "middle": [], |
| "last": "Caselli", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 5th international workshop on semantic evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "57--62", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc Verhagen, Roser Sauri, Tommaso Caselli, and James Pustejovsky. 2010. Semeval-2010 task 13: Tempeval-2. In Proceedings of the 5th international workshop on semantic evaluation, pages 57-62. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Crowdsourcing translation: Professional quality from non-professionals", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Omar", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Zaidan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1220--1229", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omar F Zaidan and Chris Callison-Burch. 2011. Crowdsourcing translation: Professional quality from non-professionals. In Proceedings of the 49th Annual Meeting of the Association for Computa- tional Linguistics: Human Language Technologies- Volume 1, pages 1220-1229. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Neural ranking models for temporal dependency structure parsing", |
| "authors": [ |
| { |
| "first": "Yuchen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3339--3349", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuchen Zhang and Nianwen Xue. 2018a. Neural rank- ing models for temporal dependency structure pars- ing. In Proceedings of the 2018 Conference on Em- pirical Methods in Natural Language Processing, pages 3339-3349.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Structured interpretation of temporal relations", |
| "authors": [ |
| { |
| "first": "Yuchen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 11th edition of the Language Resources and Evaluation Conference (LREC-2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuchen Zhang and Nianwen Xue. 2018b. Structured interpretation of temporal relations. In Proceedings of 11th edition of the Language Resources and Eval- uation Conference (LREC-2018), Miyazaki, Japan.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "He was born e1 in 1918 t1 . It was e2 a tough time for his family. Later, he started e3 school at the Central Elementary. He won e4 a school prize in 1929 t2 ." |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Example text and its temporal dependency tree. The nodes in blue are meta nodes (e.g., document creation time \"DCT\", present reference time \"Present Ref\", etc.), the nodes in orange are time expressions, and the nodes in green are events." |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Example crowdsourcing instructions and questions for relation only annotation." |
| } |
| } |
| } |
| } |