| { |
| "paper_id": "P17-1039", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:16:02.591001Z" |
| }, |
| "title": "Time Expression Analysis and Recognition Using Syntactic Token Types and General Heuristic Rules", |
| "authors": [ |
| { |
| "first": "Xiaoshi", |
| "middle": [], |
| "last": "Zhong", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Nanyang Technological University", |
| "location": { |
| "country": "Singapore" |
| } |
| }, |
| "email": "xszhong@ntu.edu.sg" |
| }, |
| { |
| "first": "Aixin", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Nanyang Technological University", |
| "location": { |
| "country": "Singapore" |
| } |
| }, |
| "email": "axsun@ntu.edu.sg" |
| }, |
| { |
| "first": "Erik", |
| "middle": [], |
| "last": "Cambria", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Nanyang Technological University", |
| "location": { |
| "country": "Singapore" |
| } |
| }, |
| "email": "cambria@ntu.edu.sg" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Extracting time expressions from free text is a fundamental task for many applications. We analyze time expressions from four different datasets and find that only a small group of words are used to express time information and that the words in time expressions demonstrate similar syntactic behaviour. Based on the findings, we propose a type-based approach named SynTime 1 for time expression recognition. Specifically, we define three main syntactic token types, namely time token, modifier, and numeral, to group time-related token regular expressions. On the types we design general heuristic rules to recognize time expressions. In recognition, SynTime first identifies time tokens from raw text, then searches their surroundings for modifiers and numerals to form time segments, and finally merges the time segments to time expressions. As a lightweight rule-based tagger, SynTime runs in real time, and can be easily expanded by simply adding keywords for the text from different domains and different text types. Experiments on benchmark datasets and tweets data show that SynTime outperforms state-of-the-art methods.", |
| "pdf_parse": { |
| "paper_id": "P17-1039", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Extracting time expressions from free text is a fundamental task for many applications. We analyze time expressions from four different datasets and find that only a small group of words are used to express time information and that the words in time expressions demonstrate similar syntactic behaviour. Based on the findings, we propose a type-based approach named SynTime 1 for time expression recognition. Specifically, we define three main syntactic token types, namely time token, modifier, and numeral, to group time-related token regular expressions. On the types we design general heuristic rules to recognize time expressions. In recognition, SynTime first identifies time tokens from raw text, then searches their surroundings for modifiers and numerals to form time segments, and finally merges the time segments to time expressions. As a lightweight rule-based tagger, SynTime runs in real time, and can be easily expanded by simply adding keywords for the text from different domains and different text types. Experiments on benchmark datasets and tweets data show that SynTime outperforms state-of-the-art methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Time expression plays an important role in information retrieval and many applications in natural language processing (Alonso et al., 2011; Campos et al., 2014) . Recognizing time expressions from free text has attracted considerable attention since last decade (Verhagen et al., 2007 (Verhagen et al., , 2010 UzZaman et al., 2013) .", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 139, |
| "text": "(Alonso et al., 2011;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 140, |
| "end": 160, |
| "text": "Campos et al., 2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 262, |
| "end": 284, |
| "text": "(Verhagen et al., 2007", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 285, |
| "end": 309, |
| "text": "(Verhagen et al., , 2010", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 310, |
| "end": 331, |
| "text": "UzZaman et al., 2013)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We analyze time expressions in four datasets: TimeBank (Pustejovsky et al., 2003b) , Gigaword (Parker et al., 2011) , WikiWars (Mazur and Dale, 2010) , and Tweets. From the analysis we make four findings about time expressions. First, most time expressions are very short, with 80% of time expressions containing no more than three tokens. Second, at least 91.8% of time expressions contain at least one time token. Third, the vocabulary used to express time information is very small, with a small group of keywords. Finally, words in time expressions demonstrate similar syntactic behaviour. All the findings relate to the principle of least effort (Zipf, 1949) . That is, people tend to act under the least effort in order to minimize the cost of energy at both individual level and collective level to language usage (Zipf, 1949) . Time expression is part of language and acts as an interface of communication. Short expressions, occurrence, small vocabulary, and similar syntactic behaviour all reduce the cost of energy required to communicate.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 82, |
| "text": "(Pustejovsky et al., 2003b)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 94, |
| "end": 115, |
| "text": "(Parker et al., 2011)", |
| "ref_id": null |
| }, |
| { |
| "start": 127, |
| "end": 149, |
| "text": "(Mazur and Dale, 2010)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 651, |
| "end": 663, |
| "text": "(Zipf, 1949)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 821, |
| "end": 833, |
| "text": "(Zipf, 1949)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "According to the findings we propose a typebased approach named SynTime ('Syn' stands for syntactic) to recognize time expressions. Specifically, we define three main token types, namely time token, modifier, and numeral, to group timerelated token regular expressions. Time tokens are the words that explicitly express time information, such as time units (e.g., 'year'). Modifiers modify time tokens; they appear before or after time tokens, e.g., 'several' and 'ago' in 'several years ago.' Numerals are ordinals and numbers. From free text SynTime first identifies time tokens, then recognizes modifiers and numerals.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Naturally, SynTime is a rule-based tagger. The key difference between SynTime and other rulebased taggers lies in the way of defining token types and the way of designing rules. The definition of token type in SynTime is inspired by part-of-speech in which \"linguists group some words of language into classes (sets) which show similar syntactic behaviour.\" (Manning and Schutze, 1999) SynTime defines token types for tokens according to their syntactic behaviour. Other rulebased taggers define types for tokens based on their semantic meaning. For example, SUTime defines 5 semantic modifier types, such as frequency modifiers; 2 while SynTime defines 5 syntactic modifier types, such as modifiers that appear before time tokens. (See Section 4.1 for details.) Accordingly, other rule-based taggers design deterministic rules based on their meanings of tokens themselves. SynTime instead designs general rules on the token types rather than on the tokens themselves. For example, our general rules do not work on tokens 'February' nor '1989' but on their token types 'MONTH' and 'YEAR.' That is why we call SynTime a type-based approach. More importantly, other rule-based taggers design rules in a fixed method, including fixed length and fixed position. In contrast, SynTime designs general rules in a heuristic way, based on the idea of boundary expansion. The general heuristic rules are quite light-weight that it makes SynTime much more flexible and expansible, and leads SynTime to run in real time.", |
| "cite_spans": [ |
| { |
| "start": 358, |
| "end": 385, |
| "text": "(Manning and Schutze, 1999)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The heuristic rules are designed on token types and are independent of specific tokens, SynTime therefore is independent of specific domains, specific text types, and even specific languages that consist of specific tokens. In this paper, we test SynTime on specific domains and specific text types in English. (The test for other languages needs only to construct a collection of token regular expressions in the target language under our defined token types.) Specifically, we evaluate SynTime against three state-of-the-art methods (i.e., HeidelTime, SUTime, and UWTime) on three datasets: TimeBank, WikiWars, and Tweets. 3 TimeBank and Tweets are comprehensive datasets while WikiWars is a specific domain dataset about war; TimeBank and WikiWars are the datasets in formal text while Tweets dataset is in informal text. Experiments show that Syn-Time achieves comparable results on WikiWars dataset, and significantly outperforms the three state-of-the-art baselines on TimeBank and Tweets 2 https://github.com/stanfordnlp/CoreNLP/tree/ master/src/edu/stanford/nlp/time/rules 3 Gigaword dataset is not used in our experiments because the labels in the dataset are not the ground truth labels but instead are automatically generated by other taggers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "datasets. More importantly, SynTime achieves the best recalls on all three datasets and exceptionally good results on Tweets dataset. To sum up, we make the following contributions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We analyze time expressions from four datasets and make four findings. The findings provide evidence in terms of time expression for the principle of least effort (Zipf, 1949) . \u2022 We propose a time tagger named SynTime to recognize time expressions using syntactic token types and general heuristic rules. Syn-Time is independent of specific tokens, and therefore independent of specific domains, specific text types, and specific languages. \u2022 We conduct experiments on three datasets, and the results demonstrate the effectiveness of SynTime against state-of-the-art baselines.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 177, |
| "text": "(Zipf, 1949)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Many research works on time expression identification are reported in TempEval exercises (Verhagen et al., 2007 (Verhagen et al., , 2010 UzZaman et al., 2013) . The task is divided into two subtasks: recognition and normalization.", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 111, |
| "text": "(Verhagen et al., 2007", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 112, |
| "end": 136, |
| "text": "(Verhagen et al., , 2010", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 137, |
| "end": 158, |
| "text": "UzZaman et al., 2013)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Rule-based time taggers like GUTime, Heidel-Time, and SUTime, predefine time-related words and rules (Verhagen et al., 2005; Str\u00f6tgen and Gertz, 2010; Chang and Manning, 2012) . Heidel-Time (Str\u00f6tgen and Gertz, 2010) hand-crafts rules with time resources like weekdays and months, and leverages language clues like part-of-speech to identify time expression. SUTime (Chang and Manning, 2012) designs deterministic rules using a cascade finite automata (Hobbs et al., 1997) on regular expressions over tokens (Chang and Manning, 2014) . It first identifies individual words, then expands them to chunks, and finally to time expressions. Rule-based taggers achieve very good results in TempEval exercises. SynTime is also a rule-based tagger while its key difference from other rule-based taggers is that between the rules and the tokens it introduces a layer of token type; its rules work on token types and are independent of specific tokens. Moreover, SynTime designs rules in a heuristic way.", |
| "cite_spans": [ |
| { |
| "start": 101, |
| "end": 124, |
| "text": "(Verhagen et al., 2005;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 125, |
| "end": 150, |
| "text": "Str\u00f6tgen and Gertz, 2010;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 151, |
| "end": 175, |
| "text": "Chang and Manning, 2012)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 190, |
| "end": 216, |
| "text": "(Str\u00f6tgen and Gertz, 2010)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 452, |
| "end": 472, |
| "text": "(Hobbs et al., 1997)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 508, |
| "end": 533, |
| "text": "(Chang and Manning, 2014)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-based Time Expression Recognition.", |
| "sec_num": null |
| }, |
| { |
| "text": "Machine Learning based Method. Machine learning based methods extract features from the text and apply statistical models on the features for recognizing time expressions. Example features include character features, word features, syntactic features, semantic features, and gazetteer features (Llorens et al., 2010; Filannino et al., 2013; Bethard, 2013) . The statistical models include Markov logic network, logistic regression, support vector machines, maximum entropy, and conditional random fields (Llorens et al., 2010; Uz-Zaman and Allen, 2010; Filannino et al., 2013; Bethard, 2013) . Some models obtain good performance, and even achieve the highest F 1 of 82.71% on strict match in TempEval-3 (Bethard, 2013) .", |
| "cite_spans": [ |
| { |
| "start": 294, |
| "end": 316, |
| "text": "(Llorens et al., 2010;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 317, |
| "end": 340, |
| "text": "Filannino et al., 2013;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 341, |
| "end": 355, |
| "text": "Bethard, 2013)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 504, |
| "end": 526, |
| "text": "(Llorens et al., 2010;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 527, |
| "end": 552, |
| "text": "Uz-Zaman and Allen, 2010;", |
| "ref_id": null |
| }, |
| { |
| "start": 553, |
| "end": 576, |
| "text": "Filannino et al., 2013;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 577, |
| "end": 591, |
| "text": "Bethard, 2013)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 704, |
| "end": 719, |
| "text": "(Bethard, 2013)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-based Time Expression Recognition.", |
| "sec_num": null |
| }, |
| { |
| "text": "Outside TempEval exercises, Angeli et al. leverage compositional grammar and employ a EMstyle approach to learn a latent parser for time expression recognition (Angeli et al., 2012) . In the method named UWTime, Lee et al. handcraft a combinatory categorial grammar (CCG) (Steedman, 1996) to define a set of lexicon with rules and use L1-regularization to learn linguistic context (Lee et al., 2014) . The two methods explicitly use linguistic information. In (Lee et al., 2014) , especially, CCG could capture rich structure information of language, similar to the rule-based methods. Tabassum et al. focus on resolving the dates in tweets, and use distant supervision to recognize time expressions (Tabassum et al., 2016) . They use five time types and assign one of them to each word, which is similar to SynTime in the way of defining types over tokens. However, they focus only on the type of date, while SynTime recoginizes all the time expressions and does not involve learning and runs in real time.", |
| "cite_spans": [ |
| { |
| "start": 160, |
| "end": 181, |
| "text": "(Angeli et al., 2012)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 272, |
| "end": 288, |
| "text": "(Steedman, 1996)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 381, |
| "end": 399, |
| "text": "(Lee et al., 2014)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 460, |
| "end": 478, |
| "text": "(Lee et al., 2014)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 700, |
| "end": 723, |
| "text": "(Tabassum et al., 2016)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-based Time Expression Recognition.", |
| "sec_num": null |
| }, |
| { |
| "text": "Time Expression Normalization. Methods in TempEval exercises design rules for time expression normalization (Verhagen et al., 2005; Str\u00f6tgen and Gertz, 2010; Llorens et al., 2010; Uz-Zaman and Allen, 2010; Filannino et al., 2013; Bethard, 2013) . Because the rule systems have high similarity, Llorens et al. suggest to construct a large knowledge base as a public resource for the task (Llorens et al., 2012) . Some researchers treat the normalization process as a learning task and use machine learning methods (Lee et al., 2014; Tabassum et al., 2016) . Lee et al. (Lee et al., 2014) use AdaGrad algorithm (Duchi et al., 2011) and Tabassum et al. (Tabassum et al., 2016 ) use a loglinear algorithm to normalize time expressions.", |
| "cite_spans": [ |
| { |
| "start": 108, |
| "end": 131, |
| "text": "(Verhagen et al., 2005;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 132, |
| "end": 157, |
| "text": "Str\u00f6tgen and Gertz, 2010;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 158, |
| "end": 179, |
| "text": "Llorens et al., 2010;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 180, |
| "end": 205, |
| "text": "Uz-Zaman and Allen, 2010;", |
| "ref_id": null |
| }, |
| { |
| "start": 206, |
| "end": 229, |
| "text": "Filannino et al., 2013;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 230, |
| "end": 244, |
| "text": "Bethard, 2013)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 387, |
| "end": 409, |
| "text": "(Llorens et al., 2012)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 513, |
| "end": 531, |
| "text": "(Lee et al., 2014;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 532, |
| "end": 554, |
| "text": "Tabassum et al., 2016)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 557, |
| "end": 586, |
| "text": "Lee et al. (Lee et al., 2014)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 609, |
| "end": 629, |
| "text": "(Duchi et al., 2011)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 634, |
| "end": 672, |
| "text": "Tabassum et al. (Tabassum et al., 2016", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-based Time Expression Recognition.", |
| "sec_num": null |
| }, |
| { |
| "text": "SynTime focuses only on the recognition task. The normalization could be achieved by using methods similar to the existing rule systems, because they are highly similar (Llorens et al., 2012) . We conduct an analysis on four datasets: Time-Bank, Gigaword, WikiWars, and Tweets. Time-Bank (Pustejovsky et al., 2003b ) is a benchmark dataset in TempEval series (Verhagen et al., 2007 (Verhagen et al., , 2010 UzZaman et al., 2013) , consisting of 183 news articles. Gigaword (Parker et al., 2011 ) is a large automatically labelled dataset with 2,452 news articles and used in TempEval-3. WikiWars dataset is derived from Wikipedia articles about wars (Mazur and Dale, 2010) . Tweets is our manually annotated dataset with 942 tweets of which each contains at least one time expression. Table 1 summarizes the datasets.", |
| "cite_spans": [ |
| { |
| "start": 169, |
| "end": 191, |
| "text": "(Llorens et al., 2012)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 288, |
| "end": 314, |
| "text": "(Pustejovsky et al., 2003b", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 359, |
| "end": 381, |
| "text": "(Verhagen et al., 2007", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 382, |
| "end": 406, |
| "text": "(Verhagen et al., , 2010", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 407, |
| "end": 428, |
| "text": "UzZaman et al., 2013)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 473, |
| "end": 493, |
| "text": "(Parker et al., 2011", |
| "ref_id": null |
| }, |
| { |
| "start": 650, |
| "end": 672, |
| "text": "(Mazur and Dale, 2010)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 785, |
| "end": 792, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Rule-based Time Expression Recognition.", |
| "sec_num": null |
| }, |
| { |
| "text": "From the four datasets, we analyze their time expressions and make four findings. We will see that despite the four datasets vary in corpus sizes, in text types, and in domains, their time expressions demonstrate similar characteristics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Finding 1 Time expressions are very short. More than 80% of time expressions contain no more than three words and more than 90% contain no more than four words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Figure 1 plots the length distribution of time expressions. Although the texts are collected from different sources (i.e., news articles, Wikipedia articles, and tweets) and vary in sizes, the length of time expressions follow a similar distribution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In particular, the one-word time expressions range from 36.23% in WikiWars to 62.91% in Tweets. In informal communication people tend to use words in minimum length to express time information. The third column in Table 2 reports the average length of time expressions. On average, time expressions contain about two words.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 214, |
| "end": 221, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Finding 2 More than 91% of time expressions contain at least one time token.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The second column in Table 2 reports the percentage of time expressions that contain at least one time token. We find that at least 91.81% of time expressions contain time token(s). (Some time expressions have no time token but depend on other time expressions; in '2 to 8 days,' for example, '2' depends on '8 days.') This suggests that time tokens account for time expressions. Therefore, to recognize time expressions, it is essential to recognize their time tokens.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 21, |
| "end": 28, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Finding 3 Only a small group of time-related keywords are used to express time information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "From the time expressions in all four datasets, we find that the group of keywords used to express time information is small. Table 3 reports the number of distinct words and of distinct time tokens. The words/tokens are manually normalized before counting and their variants are ignored. For example, 'year' and '5yrs' are counted as one token 'year.' Numerals in the counting are ignored. Despite the four datasets vary in sizes, domains, and text types, the numbers of their distinct time tokens are comparable.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 126, |
| "end": 133, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Across the four datasets, the number of distinct words is 350, about half of the simply summing of 675; the number of distinct time tokens is 123, less than half of the simply summing 282. Among the 123 distinct time tokens, 45 appear in all the four datasets, and 101 appear in at least two datasets. This indicates that time tokens, which account for time expressions, are highly overlapped across the four datasets. In other words, time expressions highly overlap at their time tokens.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Finding 4 POS information could not distinguish time expressions from common words, but within time expressions, POS tags can help distinguish their constituents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "For each dataset we list the top 10 POS tags that appear in time expressions, and their percentages over the whole text. Among the 40 tags (10 \u00d7 4 datasets), 37 have percentage lower than 20%; other 3 are CD. This indicates that POS could not provide enough information to distinguish time expressions from common words. However, the most common POS tags in time expressions are NN*, JJ, RB, CD, and DT. Within time expressions, the time tokens usually have NN* and RB, the modifiers have JJ and RB, and the numerals have CD. This finding indicates that for the time expressions, their similar constituents behave in similar syntactic way. When seeing this, we realize that this is exactly how linguists define part-of-speech for language. 4 The definition of POS for language inspires us to define a syntactic type system for the time expression, part of language.", |
| "cite_spans": [ |
| { |
| "start": 740, |
| "end": 741, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The four findings all relate to the principle of least effort (Zipf, 1949) . That is, people tend to act with least effort so as to minimize the cost of energy at both individual and collective levels to the language usage (Zipf, 1949) . Time expression is part of language and acts as an interface of communication. Short expressions, occurrence, small vocabulary, and similar syntactic behaviour all reduce the cost of energy required to communicate.", |
| "cite_spans": [ |
| { |
| "start": 62, |
| "end": 74, |
| "text": "(Zipf, 1949)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 223, |
| "end": 235, |
| "text": "(Zipf, 1949)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To summarize: on average, a time expression contains two tokens of which one is time token and the other is modifier/numeral, and the size of time tokens is small. To recognize a time expression, therefore, we first recognize the time token, then recognize the modifier/numeral. Figure 2 : Layout of SynTime. The layout consists of three levels: token level, type level, and rule level. Token types group the constituent tokens of time expressions. Heuristic rules work on token types, and are independent of specific tokens.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 279, |
| "end": 287, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Finding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "SynTime defines a syntactic type system for the tokens of time expressions, and designs heuristic rules working on the token types. Figure 2 shows the layout of SynTime, consisting of three levels: Token level, type level, and rule level. Token types at the type level group the tokens of time expressions. Heuristic rules lie at the rule level, working on token types rather than on tokens themselves. That is why the heuristic rules are general. For example, the heuristic rules do not work on tokens '1989' nor 'February,' but on their token types 'YEAR' and 'MONTH.' The heuristic rules are only relevant to token types, and are independent of specific tokens. For this reason, our token types and heuristic rules are independent of specific domains, specific text types, and even specific languages that consist of specific tokens. In this paper, we test SynTime on specific domain (i.e., war domain) and specific text types (i.e., formal text and informal text) in English. The test for other languages simply needs to construct a set of token regular expressions in the target language under our defined token types. Figure 3 shows the overview of SynTime in practice. Shown on the left-hand side, SynTime is initialized with regular expressions over tokens. After initialization, SynTime can be directly applied on text. On the other hand, SynTime can be easily expanded by simply adding the time-related token regular expressions from training text under each defined token type. The expansion enables SynTime to recognize time expressions in text from different domains and different text types.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 132, |
| "end": 140, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1124, |
| "end": 1132, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "SynTime: Syntactic Token Types and General Heuristic Rules", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Shown on the right-hand side of Figure 3 , Syn-Time recognizes time expression through three main steps. In the first step, SynTime identifies time tokens from the POS-tagged raw text. Then around the time tokens SynTime searches for modifiers and numerals to form time segments. In the last step, SynTime transforms the time segments to time expressions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 32, |
| "end": 40, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "SynTime: Syntactic Token Types and General Heuristic Rules", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We define a syntactic type system for time expression, specifically, 15 token types for time tokens, 5 token types for modifiers, and 1 token type for numeral. Token types to tokens is like POS tags to words; for example, 'February' has a POS tag of NNP and a token type of MONTH.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SynTime Construction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Time Token. We define 15 token types for the time tokens and use their names similar to Joda-Time classes: 5 DECADE (-), YEAR (-), SEA-SON (5), MONTH (12), WEEK (7), DATE (-), TIME (-), DAY TIME (27), TIMELINE (12), HOLIDAY (20), PERIOD (9), DURATION (-), TIME UNIT (15), TIME ZONE (6), and ERA (2). Number in '()' indicates the number of distinct tokens in this token type. '-' indicates that this token type involves changing digits and cannot be counted.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SynTime Construction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Modifier. We define 3 token types for the modifiers according to their possible positions relative to time tokens. Modifiers that appear before time tokens are PREFIX (48); modifiers after time tokens are SUFFIX (2). LINKAGE (4) link two time tokens. Besides, we define 2 special modifier types, COMMA (1) for comma ',' and IN ARTICLE (2) for indefinite articles 'a' and 'an.'", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SynTime Construction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "TimeML (Pustejovsky et al., 2003a) and Time-Bank (Pustejovsky et al., 2003b) do not treat most prepositions like 'on' as a part of time expressions. Thus SynTime does not collect those prepositions.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 34, |
| "text": "(Pustejovsky et al., 2003a)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 49, |
| "end": 76, |
| "text": "(Pustejovsky et al., 2003b)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SynTime Construction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Numeral. Number in time expressions can be a time token e.g., '10' in 'October 10, 2016,' or a modifier e.g., '10' in '10 days.' We define NU-MERAL (-) for the ordinals and numbers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SynTime Construction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "SynTime Initialization. The token regular expressions for initializing SynTime are collected from SUTime, 6 a state-of-the-art rule-based tagger that achieved the highest recall in TempEval-3 (Chang and Manning, , 2013 . Specifically, we collect from SUTime only the tokens and the regular expressions over tokens, and discard its other rules of recognizing full time expressions.", |
| "cite_spans": [ |
| { |
| "start": 203, |
| "end": 218, |
| "text": "Manning, , 2013", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SynTime Construction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "On the token types, SynTime designs a small set of heuristic rules to recognize time expressions. The recognition process includes three main steps: (1) time token identification, (2) time segment identification, and (3) time expression extraction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Time Expression Recognition", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Identifying time tokens is simple, through matching of string and regular expressions. Some words might cause ambiguity. For example, 'May' could be a modal verb, or the fifth month of year. To filter out the ambiguous words, we use POS information. In implementation, we use Stanford POS Tagger; 7 and the POS tags for matching the instances of token types in SynTime are based on our Finding 4 in Section 3.2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Time Token Identification", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "Besides time tokens are identified, in this step, individual token is assigned with one token type of either modifier or numeral if it is matched with token regular expressions. In the next two steps, SynTime works on those token types.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Time Token Identification", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "The task of time segment identification is to search the surrounding of each time token identified in previous step for modifiers and numerals, then gather the time token with its modifiers and numerals to form a time segment. The searching is master/src/edu/stanford/nlp/time/rules under simple heuristic rules in which the key idea is to expand the time token's boundaries. At first, each time token is a time segment. If it is either a PERIOD or DURATION, then no need to further search. Otherwise, search its left and its right for modifiers and numerals. For the left searching, if encounter a PREFIX or NUMERAL or IN ARTICLE, then continue searching. For the right searching, if encounter a SUFFIX or NUMERAL, then continue searching. Both the left and the right searching stop when reaching a COMMA or LINK-AGE or a non-modifier/numeral word. The left searching does not exceed the previous time token; the right searching does not exceed the next time token. A time segment consists of exactly one time token, and zero or some modifiers/numerals.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Time Segment Identification", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "A special kind of time segments do not contain any time token; they depend on other time segments next to them. For example, in '8 to 20 days,' 'to 20 days' is a time segment, and '8 to' forms a dependent time segment. (See Figure 4 (e).)", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 224, |
| "end": 232, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Time Segment Identification", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "The task of time expression extraction is to extract time expressions from the identified time segments in which the core step is to determine whether to merge two adjacent or overlapping time segments into a new time segment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Time Expression Extraction", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "We scan the time segments in a sentence from beginning to the end. A stand-alone time segment is a time expression. (See Figure 4(a) .) The focus is to deal with two or more time segments that are adjacent or overlapping. If two time segments s 1 and s 2 are adjacent, merge them to form a new time segment s 1 . (See Figure 4(b) .) Consider that s 1 and s 2 overlap at a shared boundary. According to our time segment identification, the shared boundary could be a modifier or a numeral. If the word at the shared boundary is neither a COMMA nor a LINKAGE, then merge s 1 and s 2 . (See Figure 4(c) .) If the word is a LINKAGE, then extract s 1 as a time expression and continue scanning. When the shared boundary is a COMMA, merge s 1 and s 2 only if the COMMA's previous token and its next token satisfy the three conditions: (1) the previous token is a time token or a NUMERAL; (2) the next token is a time token; and (3) the token types of the previous token and of the next token are not the same. (See Figure 4(d) .)", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 121, |
| "end": 132, |
| "text": "Figure 4(a)", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 318, |
| "end": 329, |
| "text": "Figure 4(b)", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 588, |
| "end": 599, |
| "text": "Figure 4(c)", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 1009, |
| "end": 1020, |
| "text": "Figure 4(d)", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Time Expression Extraction", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "Although Figure 4 shows the examples as token types together with the tokens, we should note that the heuristic rules only work on the token types. After the extraction step, time expressions are exported as a sequence of tokens from the sequence of token types.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 9, |
| "end": 17, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Time Expression Extraction", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "SynTime could be expanded by simply adding new words under each defined token type without changing any rule. The expansion requires the words to be added to be annotated manually. We apply the initial SynTime on the time expressions from training text and list the words that are not covered. Whether the uncovered words are added to SynTime is manually determined. The rule for determination is that the added words can not cause ambiguity and should be generic. Wiki-Wars dataset contains a few examples like this: 'The time Arnold reached Quebec City.' Words in this example are extremely descriptive, and we do not collect them. In tweets, on the other hand, people may use abbreviations and informal variants; for example, '2day' and 'tday' are popular spellings of 'today.' Such kind of abbreviations and informal variants will be collected.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SynTime Expansion", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "According to our findings, not many words are used to express time information, the manual addition of keywords thus will not cost much. In addition, we find that even in tweets people tend to use formal words. In the Twitter word clusters trained from 56 million English tweets, 8 the most often used words are the formal words, and their frequencies are much greater than the informal words'. The cluster of 'today,' 9 for example, its most often use is the formal one, 'today,' which appears 1,220,829 times; while its second most often use '2day' appears only 34,827 times. The low rate of informal words (e.g., about 3% in 'today' cluster) suggests that even in informal environment the manual keyword addition costs little.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SynTime Expansion", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We evaluate SynTime against three state-of-theart baselines (i.e., HeidelTime, SUTime, and UW-Time) on three datasets (i.e., TimeBank, Wiki-Wars, and Tweets). WikiWars is a specific domain dataset about war; TimeBank and WikiWars are the datasets in formal text while Tweets dataset is in informal text. For SynTime we report the results of its two versions: SynTime-I and SynTime-E. SynTime-I is the initial version, and SynTime-E is the expanded version of SynTime-I.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We use three datasets of which TimeBank and WikiWars are benchmark datasets whose details are shown in Section 3.1; Tweets is our manually labeled dataset that are collected from Twitter. For Tweets dataset, we randomly sample 4000 tweets and use SUTime to tag them. 942 tweets of which each contains at least one time expression. From the remaining 3,058 tweets, we randomly sample 500 and manually annotate them, and find that only 15 tweets contain time expressions. We therefore roughly consider that SU-Time misses about 3% time expressions in tweets. Two annotators then manually annotate the 942 tweets with discussion to final agreement according to the standards of TimeML and TimeBank. We finally get 1,127 manually labeled time expressions. For the 942 tweets, we randomly sample 200 tweets as test set, and the rest 742 as training set, because a baseline UWTime requires training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets.", |
| "sec_num": null |
| }, |
| { |
| "text": "Baseline Methods. We compare SynTime with methods: HeidelTime (Str\u00f6tgen and Gertz, 2010) , SUTime (Chang and , and UW- Time (Lee et al., 2014) . HeidelTime and SU-Time both are rule-based methods, and UWTime is a learning method. When training UWTime on Tweets, we try two settings: (1) train with only Tweets training set;", |
| "cite_spans": [ |
| { |
| "start": 62, |
| "end": 88, |
| "text": "(Str\u00f6tgen and Gertz, 2010)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 124, |
| "end": 142, |
| "text": "(Lee et al., 2014)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets.", |
| "sec_num": null |
| }, |
| { |
| "text": "(2) train with TimeBank and Tweets training set. The second setting achieves slightly better result and we report that result.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets.", |
| "sec_num": null |
| }, |
| { |
| "text": "Evaluation Metrics. We follow TempEval-3 and use their evaluation toolkit 10 to report P recision, Recall, and F 1 in terms of strict match and relaxed match (UzZaman et al., 2013). Table 4 reports the overall performance. Among the 18 measures, SynTime-I and SynTime-E achieve 12 best results and 13 second best results. Except the strict match on WikiWars dataset, both SynTime-I and SynTime-E achieve F 1 above 91%. For the relaxed match on all three datasets, SynTime-I and SynTime-E achieve recalls above 92%. The high recalls are consistent with our finding that at least 91.81% of time expressions contain time token(s). (See Table 2 .) This indicates that SynTime covers most of time tokens. On Tweets dataset, SynTime-I and SynTime-E achieve exceptionally good performance. Their F 1 reach 91.74% with 11.37% improvement in strict match and 95.87% with 6.33% improvement in re-10 http://www.cs.rochester.edu/\u02dcnaushad/tempeval3/ tools.zip laxed match. The reasons are that in informal environment people tend to use time expressions in minimum length, (62.91% of one-word time expressions in Tweets; see Figure 1 .) the size of time keywords is small, (only 60 distinct time tokens; see Table 3 .) and even in tweets people tend to use formal words. (See Section 4.3 for our finding from Twitter word clusters.) For precision, Syn-Time achieves comparable results in strict match and performs slightly poorer in relaxed match.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 182, |
| "end": 189, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 633, |
| "end": 640, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 1112, |
| "end": 1120, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1195, |
| "end": 1202, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets.", |
| "sec_num": null |
| }, |
| { |
| "text": "On TimeBank dataset, SynTime-I achieves F 1 of 92.09% in strict match and of 94.96% in relaxed match. On Tweets, SynTime-I achieves 91.74% and 95.87%, respectively. It outperforms all the baseline methods. The reason is that for the rulebased time taggers, their rules are designed in a fixed way, lacking flexibility. For example, SU-Time could recognize '1 year' but not 'year 1.' For the machine learning based methods, some of the features they used actually hurt the modelling. Time expressions involve quite many changing numbers which in themselves affect the pattern recognition. For example, it is difficult to build connection between 'May 22, 1986' and 'February 01, 1989 ' at the level of word or of character. One suggestion is to consider a type-based learning method that could use type information. For example, the above two time expressions refer to the same pattern of 'MONTH NUMERAL COMMA Table 5 lists the number of time tokens and modifiers added to SynTime-I to get SynTime-E. On TimeBank and Tweets datasets, only a few tokens are added, the corresponding results are affected slightly. This confirms that the size of time words is small, and that SynTime-I covers most of time words. On WikiWars dataset, relatively more tokens are added, SynTime-E performs much better than SynTime-I, especially in recall. It improves the recall by 3.25% in strict match and by 2.98% in relaxed match. This indicates that with more words added from specific domains (e.g., WikiWars dataset about war), SynTime can significantly improve the performance.", |
| "cite_spans": [ |
| { |
| "start": 645, |
| "end": 663, |
| "text": "'May 22, 1986' and", |
| "ref_id": null |
| }, |
| { |
| "start": 664, |
| "end": 682, |
| "text": "'February 01, 1989", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 909, |
| "end": 916, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "SynTime-I vs. Baseline Methods", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "SynTime assumes that words are tokenized and POS tagged correctly. In reality, however, the tokenized and tagged words are not that perfect, due to the limitation of used tools. For example, Stanford POS Tagger assigns VBD to the word 'sat' in 'friday or sat' while whose tag should be NNP. The incorrect tokens and POS tags affect the result.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limitations", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "We conduct an analysis on time expressions from four datasets, and find that time expressions in general are very short and expressed by a small vocabulary, and words in time expressions demonstrate similar syntactic behavior. Our findings provide evidence in terms of time expression for the principle of least effort (Zipf, 1949) . Inspired by part-of-speech, based on the findings, we define a syntactic type system for the time expression, and propose a type-based time expression tagger, named by SynTime. SynTime defines syntactic token types for tokens and on the token types it designs general heuristic rules based on the idea of boundary expansion. Experiments on three datasets show that SynTime outperforms the stateof-the-art baselines, including rule-based time taggers and machine learning based time tagger. Because our heuristic rules are quite simple, Syn-Time is light-weight and runs in real time.", |
| "cite_spans": [ |
| { |
| "start": 319, |
| "end": 331, |
| "text": "(Zipf, 1949)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Our token types and heuristic rules are independent of specific tokens, SynTime therefore is independent of specific domains, specific text types, and even specific languages that consist of specific tokens. In this paper, we test SynTime on specific domains and specific text types in English. The test for other languages needs only to construct a collection of token regular expressions in the target language under our defined token types.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Time expression is part of language and follows the principle of least effort. Since language usage relates to human habits (Zipf, 1949; Chomsky, 1986; Pinker, 1995) , we might expect that humans would share some common habits, and therefore expect that other parts of language would more or less follow the same principle. In the future we will try our analytical method on other parts of language.", |
| "cite_spans": [ |
| { |
| "start": 124, |
| "end": 136, |
| "text": "(Zipf, 1949;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 137, |
| "end": 151, |
| "text": "Chomsky, 1986;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 152, |
| "end": 165, |
| "text": "Pinker, 1995)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Source: https://github.com/zhongxiaoshi/syntime", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "\"linguists group some words of language into classes (sets) which show similar syntactic behaviour.\"(Manning and Schutze, 1999)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.joda.org/joda-time/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/stanfordnlp/CoreNLP/tree/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.cs.cmu.edu/\u02dcark/TweetNLP/cluster_ viewer.html 9 http://www.cs.cmu.edu/\u02dcark/TweetNLP/paths/ 01111110010.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The authors would like to thank the three anonymous reviewers for their insightful comments and constructive suggestions. This research is mainly supported by the Singapore Ministry of Education Research Fund MOE2014-T2-2-066.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Temporal information retrieval: Challenges and opportunities", |
| "authors": [ |
| { |
| "first": "Omar", |
| "middle": [], |
| "last": "Alonso", |
| "suffix": "" |
| }, |
| { |
| "first": "Jannik", |
| "middle": [], |
| "last": "Strotgen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Baeza-Yates", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gertz", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of 1st International Temporal Web Analytics Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omar Alonso, Jannik Strotgen, Ricardo Baeza-Yates, and Michael Gertz. 2011. Temporal information retrieval: Challenges and opportunities. In Proceedings of 1st Inter- national Temporal Web Analytics Workshop. pages 1-8.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Parsing time: Learning to interpret time expressions", |
| "authors": [ |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "446--455", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gabor Angeli, Christopher D. Manning, and Daniel Jurafsky. 2012. Parsing time: Learning to interpret time expres- sions. In Proceedings of 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. pages 446- 455.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Cleartk-timeml: A minimalist approach to tempeval 2013", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 7th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "10--14", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Bethard. 2013. Cleartk-timeml: A minimalist ap- proach to tempeval 2013. In Proceedings of the 7th Inter- national Workshop on Semantic Evaluation. pages 10-14.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Survey of temporal information retrieval and related applications", |
| "authors": [ |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Campos", |
| "suffix": "" |
| }, |
| { |
| "first": "Gael", |
| "middle": [], |
| "last": "Dias", |
| "suffix": "" |
| }, |
| { |
| "first": "Alipio", |
| "middle": [ |
| "M" |
| ], |
| "last": "Jorge", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Jatowt", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "ACM Computing Surveys", |
| "volume": "47", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ricardo Campos, Gael Dias, Alipio M. Jorge, and Adam Ja- towt. 2014. Survey of temporal information retrieval and related applications. ACM Computing Surveys 47(2):15.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Sutime: A library for recognizing and normalizing time expressions", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Angel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of 8th International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "3735--3740", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Angel X. Chang and Christopher D. Manning. 2012. Sutime: A library for recognizing and normalizing time expres- sions. In Proceedings of 8th International Conference on Language Resources and Evaluation. pages 3735-3740.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Sutime: Evaluation in tempeval-3", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Angel", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of second Joint Conference on Lexical and Computational Semantics (SEM)", |
| "volume": "", |
| "issue": "", |
| "pages": "78--82", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Angel X. Chang and Christopher D. Manning. 2013. Su- time: Evaluation in tempeval-3. In Proceedings of second Joint Conference on Lexical and Computational Seman- tics (SEM). pages 78-82.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Tokensregex: Defining cascaded regular expressions over tokens", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Angel", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Angel X. Chang and Christopher D. Manning. 2014. To- kensregex: Defining cascaded regular expressions over to- kens. Technical report, Department of Computer Science, Stanford University.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Knowledge of Language: Its Nature, Origin, and Use", |
| "authors": [ |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Chomsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noam Chomsky. 1986. Knowledge of Language: Its Nature, Origin, and Use. New York: Prager.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Adaptive subgradient methods for online learning and stochastic optimization", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Duchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Elad", |
| "middle": [], |
| "last": "Hazan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoram", |
| "middle": [], |
| "last": "Singer", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2121--2159", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Duchi, Elad Hazan, and Yoram Singer. 2011. Adaptive subgradient methods for online learning and stochastic op- timization. The Journal of Machine Learning Research 12:2121-2159.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Mantime: Temporal expression identification and normalization in the tempeval-3 challenge", |
| "authors": [ |
| { |
| "first": "Michele", |
| "middle": [], |
| "last": "Filannino", |
| "suffix": "" |
| }, |
| { |
| "first": "Gavin", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Nenadic", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 7th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michele Filannino, Gavin Brown, and Goran Nenadic. 2013. Mantime: Temporal expression identification and normal- ization in the tempeval-3 challenge. In Proceedings of the 7th International Workshop on Semantic Evaluation.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Fastus: A cascaded finite-state transducer for extracting information from natrual-language text", |
| "authors": [ |
| { |
| "first": "Jerry", |
| "middle": [ |
| "R" |
| ], |
| "last": "Hobbs", |
| "suffix": "" |
| }, |
| { |
| "first": "Douglas", |
| "middle": [ |
| "E" |
| ], |
| "last": "Appelt", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bear", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Israel", |
| "suffix": "" |
| }, |
| { |
| "first": "Megumi", |
| "middle": [], |
| "last": "Kameyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Stickel", |
| "suffix": "" |
| }, |
| { |
| "first": "Mabry", |
| "middle": [], |
| "last": "Tyson", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Finite State Devices for Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "383--406", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jerry R. Hobbs, Douglas E. Appelt, John Bear, David Israel, Megumi Kameyama, Mark Stickel, and Mabry Tyson. 1997. Fastus: A cascaded finite-state transducer for ex- tracting information from natrual-language text. In Finite State Devices for Natural Language Processing. pages 383-406.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Context-dependent semantic parsing for time expressions", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Dodge", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1437--1447", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Lee, Yoav Artzi, Jesse Dodge, and Luke Zettlemoyer. 2014. Context-dependent semantic parsing for time ex- pressions. In Proceedings of the 52th Annual Meeting of the Association for Computational Linguistics. pages 1437-1447.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Timen: An open temporal expression normalisation resource", |
| "authors": [ |
| { |
| "first": "Hector", |
| "middle": [], |
| "last": "Llorens", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| }, |
| { |
| "first": "Estela", |
| "middle": [], |
| "last": "Saquete", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of 8th International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "3044--3051", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hector Llorens, Leon Derczynski, Robert Gaizauskas, and Estela Saquete. 2012. Timen: An open temporal expres- sion normalisation resource. In Proceedings of 8th Inter- national Conference on Language Resources and Evalua- tion. pages 3044-3051.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Tipsem (english and spanish): Evaluating crfs and semantic roles in tempeval-2", |
| "authors": [ |
| { |
| "first": "Hector", |
| "middle": [], |
| "last": "Llorens", |
| "suffix": "" |
| }, |
| { |
| "first": "Estela", |
| "middle": [], |
| "last": "Saquete", |
| "suffix": "" |
| }, |
| { |
| "first": "Borja", |
| "middle": [], |
| "last": "Navarro", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 5th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "284--291", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hector Llorens, Estela Saquete, and Borja Navarro. 2010. Tipsem (english and spanish): Evaluating crfs and seman- tic roles in tempeval-2. In Proceedings of the 5th Interna- tional Workshop on Semantic Evaluation. pages 284-291.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Foundations of Statistical Natural Language Processing", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Schutze", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher Manning and Hinrich Schutze. 1999. Founda- tions of Statistical Natural Language Processing. Cam- bride: MIT Press.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Wikiwars: A new corpus for research on temporal expressions", |
| "authors": [ |
| { |
| "first": "Pawel", |
| "middle": [], |
| "last": "Mazur", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Dale", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "913--922", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pawel Mazur and Robert Dale. 2010. Wikiwars: A new cor- pus for research on temporal expressions. In Proceedings of the 2010 Conference on Empirical Methods in Natu- ral Language Processing. Association for Computational Linguistics, pages 913-922.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The language instinct: The new science of language and mind", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Pinker", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "", |
| "volume": "7529", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Pinker. 1995. The language instinct: The new science of language and mind, volume 7529. Penguin.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Timeml: Robust specification of event and temporal expressions in text", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Jose", |
| "middle": [], |
| "last": "Castano", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Ingria", |
| "suffix": "" |
| }, |
| { |
| "first": "Roser", |
| "middle": [], |
| "last": "Sauri", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Setzer", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Katz", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [], |
| "last": "Radev", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "New Directions in Question Answering", |
| "volume": "3", |
| "issue": "", |
| "pages": "28--34", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Pustejovsky, Jose Castano, Robert Ingria, Roser Sauri, Robert Gaizauskas, Andrea Setzer, Graham Katz, and Dragomir Radev. 2003a. Timeml: Robust specification of event and temporal expressions in text. New Directions in Question Answering 3:28-34.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "The timebank corpus. Corpus Linguistics", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Hanks", |
| "suffix": "" |
| }, |
| { |
| "first": "Roser", |
| "middle": [], |
| "last": "Sauri", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "See", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Setzer", |
| "suffix": "" |
| }, |
| { |
| "first": "Beth", |
| "middle": [], |
| "last": "Sundheim", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [], |
| "last": "Radev", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Day", |
| "suffix": "" |
| }, |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Ferro", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcia", |
| "middle": [], |
| "last": "Lazo", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "647--656", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Pustejovsky, Patrick Hanks, Roser Sauri, Andrew See, Robert Gaizauskas, Andrea Setzer, Beth Sundheim, Dragomir Radev, David Day, Lisa Ferro, and Marcia Lazo. 2003b. The timebank corpus. Corpus Linguistics 2003:647-656.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Surface Structure and Interpretation", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Steedman. 1996. Surface Structure and Interpretation. The MIT Press.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Heideltime: High quality rule-based extraction and normalization of temporal expressions", |
| "authors": [ |
| { |
| "first": "Jannik", |
| "middle": [], |
| "last": "Str\u00f6tgen", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gertz", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 5th International Workshop on Semantic Evaluation (SemEval'10). Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "321--324", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jannik Str\u00f6tgen and Michael Gertz. 2010. Heideltime: High quality rule-based extraction and normalization of tempo- ral expressions. In Proceedings of the 5th International Workshop on Semantic Evaluation (SemEval'10). Asso- ciation for Computational Linguistics, Stroudsburg, PA, USA, pages 321-324.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Heideltime: Tuning english and developing spanish resources", |
| "authors": [ |
| { |
| "first": "Jannik", |
| "middle": [], |
| "last": "Strotgen", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Zell", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gertz", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of second Joint Conference on Lexical and Computational Semantics (SEM)", |
| "volume": "", |
| "issue": "", |
| "pages": "15--19", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jannik Strotgen, Julian Zell, and Michael Gertz. 2013. Hei- deltime: Tuning english and developing spanish resources. In Proceedings of second Joint Conference on Lexical and Computational Semantics (SEM). pages 15-19.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Tweetime: A minimally supervised method for recognizing and normalizing time expressions in twitter", |
| "authors": [ |
| { |
| "first": "Jeniya", |
| "middle": [], |
| "last": "Tabassum", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "307--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeniya Tabassum, Alan Ritter, and Wei Xu. 2016. Tweet- ime: A minimally supervised method for recognizing and normalizing time expressions in twitter. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing. pages 307-318.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Trips and trios system for tempeval-2: Extracting temporal information from text", |
| "authors": [ |
| { |
| "first": "Naushad", |
| "middle": [], |
| "last": "Uzzaman", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "James", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Allen", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 5th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "276--283", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naushad UzZaman and James F. Allen. 2010. Trips and trios system for tempeval-2: Extracting temporal information from text. In Proceedings of the 5th International Work- shop on Semantic Evaluation. pages 276-283.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Semeval-2013 task 1: Tempeval-3: Evaluating time expressions, events, and temporal relations", |
| "authors": [ |
| { |
| "first": "Naushad", |
| "middle": [], |
| "last": "Uzzaman", |
| "suffix": "" |
| }, |
| { |
| "first": "Hector", |
| "middle": [], |
| "last": "Llorens", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Verhagen", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Allen", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 7th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naushad UzZaman, Hector Llorens, Leon Derczynski, Marc Verhagen, James Allen, and James Pustejovsky. 2013. Semeval-2013 task 1: Tempeval-3: Evaluating time ex- pressions, events, and temporal relations. In Proceedings of the 7th International Workshop on Semantic Evalua- tion. pages 1-9.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Semeval-2007 task 15: Tempeval temporal relation identification", |
| "authors": [ |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Verhagen", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Schilder", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Hepple", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Katz", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 4th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "75--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc Verhagen, Robert Gaizauskas, Frank Schilder, Mark Hepple, Graham Katz, and James Pustejovsky. 2007. Semeval-2007 task 15: Tempeval temporal relation identi- fication. In Proceedings of the 4th International Workshop on Semantic Evaluation. pages 75-80.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Automating temporal annotation with tarqi", |
| "authors": [ |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Verhagen", |
| "suffix": "" |
| }, |
| { |
| "first": "Inderjeet", |
| "middle": [], |
| "last": "Mani", |
| "suffix": "" |
| }, |
| { |
| "first": "Roser", |
| "middle": [], |
| "last": "Sauri", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Knippen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jessica", |
| "middle": [], |
| "last": "Seok Bae Jang", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Littman", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Rumshisky", |
| "suffix": "" |
| }, |
| { |
| "first": "Inderjeet", |
| "middle": [], |
| "last": "Phillips", |
| "suffix": "" |
| }, |
| { |
| "first": "Roser", |
| "middle": [], |
| "last": "Mani", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Sauri", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Knippen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jessica", |
| "middle": [], |
| "last": "Seok Bae Jang", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Littman", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Rumshisky", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Phillips", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the ACL Interactive Poster and Demonstration Sessions", |
| "volume": "", |
| "issue": "", |
| "pages": "81--84", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc Verhagen, Inderjeet Mani, Roser Sauri, Robert Knip- pen, Seok Bae Jang, Jessica Littman, Anna Rumshisky, John Phillips, Inderjeet Mani, Roser Sauri, Robert Knip- pen, Seok Bae Jang, Jessica Littman, Anna Rumshisky, John Phillips, and James Pustejovsky. 2005. Automat- ing temporal annotation with tarqi. In Proceedings of the ACL Interactive Poster and Demonstration Sessions.. pages 81-84.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Semeval-2010 task 13: Tempeval-2", |
| "authors": [ |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Verhagen", |
| "suffix": "" |
| }, |
| { |
| "first": "Roser", |
| "middle": [], |
| "last": "Sauri", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommaso", |
| "middle": [], |
| "last": "Caselli", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 5th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "57--62", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc Verhagen, Roser Sauri, Tommaso Caselli, and James Pustejovsky. 2010. Semeval-2010 task 13: Tempeval-2. In Proceedings of the 5th International Workshop on Se- mantic Evaluation. pages 57-62.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Human Behavior and the Principle of Least Effort: An Introduction to Human Ecology", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Zipf", |
| "suffix": "" |
| } |
| ], |
| "year": 1949, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Zipf. 1949. Human Behavior and the Principle of Least Effort: An Introduction to Human Ecology. Addison-Wesley Press, Inc.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Figure 1: Length distribution of time expressions 3 Time Expression Analysis 3.1 Dataset", |
| "num": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Overview of SynTime. Left-hand side shows the construction of SynTime, with initialization using token regular expressions, and optional expansion using training text. Right-hand side shows the main steps of SynTime recognizing time expressions.", |
| "num": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "7 http://nlp.stanford.edu/software/tagger.shtml PREFIX/the PREFIX/last TIME_UNIT/week \u2026 said WEEK/Friday /to NUMERAL/20 TIME _UNIT/days (e) Dependent time segment and time segment", |
| "num": null |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Example time segments and time expressions. The above labels are from time segment identification; the below labels are for time expression extraction.", |
| "num": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "content": "<table><tr><td>Dataset</td><td colspan=\"3\">#Docs #Words #TIMEX</td></tr><tr><td>TimeBank</td><td>183</td><td>61,418</td><td>1,243</td></tr><tr><td>Gigaword</td><td colspan=\"2\">2,452 666,309</td><td>12,739</td></tr><tr><td>WikiWars</td><td colspan=\"2\">22 119,468</td><td>2,671</td></tr><tr><td>Tweets</td><td>942</td><td>18,199</td><td>1,127</td></tr></table>", |
| "text": "Statistics of the datasets (A tweet here is a document.)", |
| "html": null, |
| "num": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "content": "<table><tr><td>Dataset</td><td colspan=\"2\">Percent Average Length</td></tr><tr><td>TimeBank</td><td>94.61</td><td>2.00</td></tr><tr><td>Gigaword</td><td>96.44</td><td>1.70</td></tr><tr><td>WikiWars</td><td>91.81</td><td>2.38</td></tr><tr><td>Tweets</td><td>96.01</td><td>1.51</td></tr></table>", |
| "text": "The percentage of time expressions that contain at least one time token, and the average length of time expressions", |
| "html": null, |
| "num": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"3\">: Number of distinct words and number of</td></tr><tr><td colspan=\"3\">distinct time tokens in time expressions Dataset #Words #Time Tokens</td></tr><tr><td>TimeBank</td><td>130</td><td>64</td></tr><tr><td>Gigaword</td><td>214</td><td>80</td></tr><tr><td>WikiWars</td><td>224</td><td>74</td></tr><tr><td>Tweets</td><td>107</td><td>64</td></tr></table>", |
| "text": "", |
| "html": null, |
| "num": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "content": "<table><tr><td>Rule level</td><td>General Heuristic Rules</td></tr><tr><td>Type level Token level</td><td>1989, Time Token, Modifier, Numeral</td></tr></table>", |
| "text": "February, 12:55, this year, 3 months ago, ...", |
| "html": null, |
| "num": null |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "content": "<table><tr><td>Dataset</td><td>Method</td><td>Strict Match P r. Re.</td><td>F 1</td><td>Relaxed Match P r. Re. F 1</td></tr><tr><td/><td>UWTime(Lee et al., 2014) SynTime-I SynTime-E</td><td colspan=\"3\">86.10 80.40 83.10 94.60 88.40 91.40 91.43 92.75 92.09 94.29 95.65 94.96 91.49 93.48 92.47 93.62 95.65 94.62</td></tr><tr><td/><td>HeidelTime(Lee et al., 2014)</td><td colspan=\"3\">85.20 79.30 82.10 92.60 86.20 89.30</td></tr><tr><td/><td>SUTime</td><td colspan=\"3\">78.61 76.69 76.64 95.74 89.57 92.55</td></tr><tr><td>WikiWars</td><td>UWTime(Lee et al., 2014) SynTime-I</td><td colspan=\"3\">87.70 78.80 83.00 97.60 87.60 92.30 80.00 80.22 80.11 92.16 92.41 92.29</td></tr><tr><td/><td>SynTime-E HeidelTime SUTime</td><td colspan=\"3\">79.18 83.47 81.27 90.49 95.39 92.88 89.58 72.88 80.37 95.83 77.97 85.98 76.03 77.97 76.99 88.43 90.68 89.54</td></tr><tr><td>Tweets</td><td>UWTime SynTime-I</td><td colspan=\"3\">88.54 72.03 79.44 96.88 78.81 86.92 89.52 94.07 91.74 93.55 98.31 95.87</td></tr><tr><td/><td>SynTime-E</td><td colspan=\"3\">89.20 94.49 91.77 93.20 98.78 95.88</td></tr></table>", |
| "text": "Overall performance. The best results are in bold face and the second best are underlined. Some results are borrowed from their original papers and the papers are indicated by the references.TimeBankHeidelTime(Strotgen et al., 2013) 83.85 78.99 81.34 93.08 87.68 90.30 SUTime(Chang andManning, 2013) 78.72 80.43 79.57 89.36 91.30 90.32", |
| "html": null, |
| "num": null |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"3\">: Number of time tokens and modifiers for</td></tr><tr><td>expansion Dataset</td><td colspan=\"2\">#Time Tokens #Modifiers</td></tr><tr><td>TimeBank</td><td>3</td><td>5</td></tr><tr><td>WikiWars</td><td>16</td><td>21</td></tr><tr><td>Tweets</td><td>3</td><td>2</td></tr><tr><td colspan=\"3\">YEAR' at the level of token type. POS is a kind of</td></tr><tr><td colspan=\"3\">type information. But according to our analysis,</td></tr><tr><td colspan=\"3\">POS could not distinguish time expressions from</td></tr><tr><td colspan=\"3\">common words. Features need carefully design-</td></tr><tr><td colspan=\"3\">ing. On WikiWars, SynTime-I achieves competi-</td></tr><tr><td colspan=\"3\">tive results in both matches. Time expressions in</td></tr><tr><td colspan=\"3\">WikiWars include lots of prepositions and quite a</td></tr><tr><td colspan=\"3\">few descriptive time expressions. SynTime could</td></tr><tr><td colspan=\"3\">not fully recognize such kinds of time expressions</td></tr><tr><td colspan=\"3\">because it follows TimeML and TimeBank.</td></tr><tr><td colspan=\"2\">5.2.2 SynTime-E vs. SynTime-I</td><td/></tr></table>", |
| "text": "", |
| "html": null, |
| "num": null |
| } |
| } |
| } |
| } |