| { |
| "paper_id": "Y08-1033", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:38:10.623209Z" |
| }, |
| "title": "Trend-based Document Clustering for Sensitive and Stable Topic Detection *", |
| "authors": [ |
| { |
| "first": "Yoshihide", |
| "middle": [], |
| "last": "Sato", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "NTT West Corporation", |
| "location": {} |
| }, |
| "email": "y.sato@west.east.ntt.co.jp" |
| }, |
| { |
| "first": "Harumi", |
| "middle": [], |
| "last": "Kawashima", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "NTT Cyber Solutions Laboratories", |
| "institution": "NTT Corporation", |
| "location": { |
| "addrLine": "1-1, Hikarino-oka", |
| "postCode": "239-0847", |
| "settlement": "Yokosuka", |
| "region": "Kanagawa", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hidenori", |
| "middle": [], |
| "last": "Okuda", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "NTT Cyber Solutions Laboratories", |
| "institution": "NTT Corporation", |
| "location": { |
| "addrLine": "1-1, Hikarino-oka", |
| "postCode": "239-0847", |
| "settlement": "Yokosuka", |
| "region": "Kanagawa", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Masahiro", |
| "middle": [], |
| "last": "Oku", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "oku.masahiro@labs.ntt.co.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The ability to detect new topics and track them is important given the huge amounts of documents. This paper introduces a trend-based document clustering algorithm for analyzing them. Its key characteristic is that it gives scores to words on the basis of the fluctuation in word frequency. The algorithm generates clusters in a practical time, with O(n) processing cost due to preliminary calculation of document distances. The attribute allows the user to settle on the best level of granularity for identifying topics. Experiments prove that our algorithm can gather relevant documents with F measure of 63.0% on average from the beginning to the end of topic lifetime and it largely surpasses other algorithms.", |
| "pdf_parse": { |
| "paper_id": "Y08-1033", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The ability to detect new topics and track them is important given the huge amounts of documents. This paper introduces a trend-based document clustering algorithm for analyzing them. Its key characteristic is that it gives scores to words on the basis of the fluctuation in word frequency. The algorithm generates clusters in a practical time, with O(n) processing cost due to preliminary calculation of document distances. The attribute allows the user to settle on the best level of granularity for identifying topics. Experiments prove that our algorithm can gather relevant documents with F measure of 63.0% on average from the beginning to the end of topic lifetime and it largely surpasses other algorithms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Due to the information explosion on the WWW, the cost of catching up with the latest trends has risen. Consumer Generated Media (CGM), such as weblogs and social networking service (SNS), are only accelerating this explosion. The best approach to recognizing trends from among the huge number of documents being created is to analyze the topics in them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "The goal of Topic Detection and Tracking (TDT) is to find state-of-the-art events in a stream of broadcast news stories (Allan et al., 1998) . The study defines segmentation, new event detection, and event tracking as the major tasks. Segmentation proceeds by automatically dividing a text stream into topically homogeneous blocks. New event detection identifies stories in several continuous news streams that pertain to new or previously unidentified events. Event tracking identifies any and all subsequent stories describing the same event as sample instances of stories describing the event. Document clustering is an efficient approach to find topics in many documents.", |
| "cite_spans": [ |
| { |
| "start": 120, |
| "end": 140, |
| "text": "(Allan et al., 1998)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "In the tasks, new event detection is intimately related to clustering, and involves the functions of retrospective detection and on-line detection. The input to retrospective detection task is the entire corpus, and it is desired to divide them into event-specific groups. The input to on-line detection is a chronologically ordered document stream, and the change point of topics should be found.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "On the WWW, where documents are numerous and increasing hourly, our goal is to provide an environment that supports users on finding and tracking the topics. In particular, sensitive detection of new topics is needed there. Then, our research is categorized as both on-line event detection and event tracking in TDT. As a matter of fact, both aspects are essential for adequately grasping the topics. This paper introduces a trend-based document clustering algorithm that enables the detection of topic occurrence at the earliest possible stage and the observation of topic transition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "The remainder of this paper is organized as follows: Section 2 describes related work; Section 3 describes our clustering algorithm; Section 4 describes our experiments and their results; and we conclude in Section 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "New event detection is the target of incremental clustering algorithms for on-line documents. In new event detection, conventionally, the similarity between new document and existing clsuters are calculated, and it is judged that which cluster is appropriate to include the document or any one is inappropriate. Developments of similarity measure (Dharanipragada et al., 1999) and term weighting (Brants et al., 2003) have proposed for better detection performance.", |
| "cite_spans": [ |
| { |
| "start": 347, |
| "end": 376, |
| "text": "(Dharanipragada et al., 1999)", |
| "ref_id": null |
| }, |
| { |
| "start": 396, |
| "end": 417, |
| "text": "(Brants et al., 2003)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Many clustering algorithms have been applied for the task, such as single-pass based algorithm (Papka and Allan, 1998) and incremental k-means algorithm (Walls et al., 1999) . They do not consider trends in on-line documents. However, it is required to focus attention on \"time\" for the sensitive topic detection.", |
| "cite_spans": [ |
| { |
| "start": 153, |
| "end": 173, |
| "text": "(Walls et al., 1999)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "The time-focused approach attempts to enhance detection performance by attenuating document similarities on the basis of time interval between documents (Yang et al., 1998) . The strategy yielded measurable improvements in their on-line detection experiments. Word distribution in a corpus is used to choose core lexicon in the corpus (Zhang et al., 2004) . The algorithm is applied to choose topical words in document if the documents are divided into some parts by their timestamps, though time temporal continuity is not considered.", |
| "cite_spans": [ |
| { |
| "start": 153, |
| "end": 172, |
| "text": "(Yang et al., 1998)", |
| "ref_id": null |
| }, |
| { |
| "start": 335, |
| "end": 355, |
| "text": "(Zhang et al., 2004)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Another incremental clustering algorithm F 2 ICM (Ishikawa et al., 2001; Khy et al., 2006) is characterized by its ease in updating the statistics value used for calculating document similarities when new documents arrive. It defines the forgetting model as being exponential. It attenuates worth of documents exponentially as time passes, as if they are forgotten. In their model, recent documents are likely to be situated closer to each other, and older ones are likely to be more widely separated. The algorithm tends to generate clusters containing especially newer documents. On the other hand, persistent clusters are seldom generated. Thus, the algorithm is not the best way to observe topics continuously in terms of event tracking.", |
| "cite_spans": [ |
| { |
| "start": 49, |
| "end": 72, |
| "text": "(Ishikawa et al., 2001;", |
| "ref_id": null |
| }, |
| { |
| "start": 73, |
| "end": 90, |
| "text": "Khy et al., 2006)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "What the prior studies lack in is the responsiveness to the current trends. Our approach to accomplish the goal is based on the trends in documents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trend-based Clustering Algorithm", |
| "sec_num": "3." |
| }, |
| { |
| "text": "More and more documents describing the same event are created when people's interest in the event arises. In on-line documents, a rapid increase in the frequency of a word indicates a trend toward one or more topics relevant to the word. Taking such trends into account, when clustering documents, yields the sensitive detection of new topics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trend-based Clustering Algorithm", |
| "sec_num": "3." |
| }, |
| { |
| "text": "The most remarkable feature in our algorithm is that it senses current trends by word frequency fluctuation and gathers relevant documents based on the latest trends. Since its clustering process finishes in a short time after the classification granularity is indicated, it helps users to find adequate clustering results interactively that meet their intentions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trend-based Clustering Algorithm", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Word weights in our algorithm involve word appearance growth and its accumulative appearance. We declare the gradient model in the following part before describing word weights. The concept of gradient, essential idea in our algorithm, represents the growth of the two aspects. Word weighting algorithm is described in the second subsection, and the clustering algorithm is detailed in the third subsection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trend-based Clustering Algorithm", |
| "sec_num": "3." |
| }, |
| { |
| "text": "The impression of word appearance in a document declines over time. Suppose that the initial intensity of the impression is one, the intensity after time t \u0394 can be defined as following the forgetting model in F ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "t e \u2212(t n \u2212t)/T l t 1 t 2 t n t n\u22121 t 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "Figure 1: Impression Intensity. Figure 1 shows the residual impression of each appearance of word w. Here, is the time of the nth appearance. Given that the current time is , the current appearance is assigned the maximum n t n t impression (the intensity is one), and the impression of w appearance at t remains . Then the total impressions score up to now, we define this as memory M, is described as follows.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 32, |
| "end": 40, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "( ) l n T t t e / \u2212 \u2212 ( ) ( ) (1) . , 1 / \u2211 = \u2212 \u2212 = n t T t t n l n e t w M", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "In consecutive appearance of words, the memory M is efficiently updated if the previous result is stored. Updating follows Equation 2. If the word appears at", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "1 + n t time , t \u0394 after n t , ( ) 1 , + n t w M", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "is represented as the sum of the previous result multiplied by the attenuation coefficient for the elapsed time t \u0394 and the latest inte On the other hand, the memory can be also regarded as the amount of word w appearances with time attenuation. With strong attenuation parameter T s (<T l ), the memory is approximate recent frequency of the word; the recent frequency F is represented in Equation 3, as well as M.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "the new nsity. ( ) ( ) ( ) ( ) ( ) (2) . 1 , 1 1 , / 1 / / 1 / 1 1 / 1 1 + \u22c5 = + \u22c5 = + = = \u0394 \u2212 = \u2212 \u2212 \u0394 \u2212 = \u2212 \u0394 + \u2212 + = \u2212 \u2212 + \u2211 \u2211 \u2211 + n T t n i T t t T t n i T t t t n i T t t n t w M", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "( ) ( ) (3) . , 1 / \u2211 = \u2212 \u2212 = n i T t t n s i n e t w F", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "Suppose that the recent frequency of a word is higher compared to the amount of permanent memory which the word has given up to now. Then the word is in the growth phase.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "Here, we declare the concept of gradient as the difference of the memory from the recent frequency.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "( ) ( ) ( ) ( ) (4) . , , , , n n n n t w M t w M t w F t w G \u03b2 \u03b1 \u2212 =", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "The denominator is a normalizing element for eliminating the effects of general words with high frequency. \u03b1 and \u03b2 are coefficients. In the definition of gradient, the numerator,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "t \u03b1e \u2212(tn\u2212t)/Ts \u03b2e \u2212(tn\u2212t)/T l \u03b1 \u03b2 t n t \u03b1 \u2212 \u03b2 t n t cross \u03b1e \u2212(tn\u2212t)/Ts \u2212\u03b2e \u2212(tn\u2212t)/T l", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "( ) ( ) n n t w M t w F , , \u03b1 \u03b2 \u2212", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": ", is also explained by Figure 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 23, |
| "end": 31, |
| "text": "Figure 2", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "Two curves are drawn in the upper part. The solid line curve corresponds to the memory of w, with attenuation parameter T l . The other dashed line corresponds to the recent frequency of the word, with strong attenuation parameter T s . In fact, the curves do not directly plot memory or recent frequency rather the intensity attenuation used in calculating them. \u03b1 and \u03b2 in the definition of ( ) n t w G , correspond to intercepts respectively in the upper part of Figure 2 . The bold line curve in the bottom part, we define this as differential curve, corresponds he erence between the , score of a word with invariable frequency should be zero.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 466, |
| "end": 474, |
| "text": "Figure 2", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "Then the following equality is true.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "\u2212 \u2212 dt e dt e n l n n s n t T t t t T t t \u03b2 \u03b1 is deformed as follows. ) 0 / s t T t t d T e dt e n s n \u03c4 \u03b1 \u03b1 \u03c4 = \u222b \u222b \u221e \u2212 \u221e \u2212 \u2212 \u2212", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "Deforming the second term in Equation 5 in the same way, the ratio of to t diff dashed and solid lines.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "By the way, M and F, represented in summations, can be represented in integrations if a word appears continuously. Moreover, ( )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "G n t w ( ) ( ) (5) . 0 / / = \u2212 \u222b \u222b \u221e \u2212 \u2212 \u2212 \u221e \u2212", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "The first term", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "( ) (6 . s T \u03b1 = \u03b1 to \u03b2 is derived. (7) . s T \u03b2 l T = \u03b1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "For simplification, we regard the current (t=t n ) difference bet een two curves in Figure 2 as one. w", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 84, |
| "end": 92, |
| "text": "Figure 2", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": ". 1 = \u2212 \u03b2 \u03b1", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "Incidentally, \u03b1 and \u03b2 are derived as follows, respectively, from Equation 7 and 8.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "s l T T \u2212 l T = \u03b1 , (9) . s l T T \u2212 ( ) n t w G ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "is interpreted as differential operator for frequency transition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "s T = \u03b2 ( ) n t w G ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "is zero if w appears with invariable frequency. The value is above zero if the frequency is increasing as time passes, below zero if decreasing. and The time t cross when differential curve crosses horizontal axis is also derived as follows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "( )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "(10) . ln ln cross l s s l T T \u2212", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "The current growth rate is evaluated as an accumulation of past appearances by the differential curve. The", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "s l T T T T t \u2212 =", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "appearance until t cross affects negatively, and the appearance after t cross is enhanced ositively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gradient Model", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "scores for trend-based clustering should reflect both growth and accumulative p nd Scores for Words and Document Expression ( ) n t w G , quantifies the degree of growth. That is, the value does not accurately reflect the current trend. Word appearance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "Therefore, we define the trend score of word w, ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "(11) . , , / 1 i i n = s i n T t t n e t w G t w TREND \u2212 \u2212 \u22c5 = \u2211", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "Trend score can be easily updated as well as memory or recent frequency by multiplying attenuation coefficient for elapsed time to the previous result, and then adding the latest gradient.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "( ) ( ) ( ) (12) . , , , 1 / 1 + \u0394 \u2212 + + \u22c5 = n n T t n t w G t w TREND e t w TREND s", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "All the system has to do is to preserve the latest M, F, and TREND for each word, and the timestamp when they were last updated. When a new document arrives, M and F for the words in the document are updated as shown in Equation 2, and then the latest TREND is calculated as shown in Equation 12.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "For trend-based document clustering, documents are expressed as vectors based on TREND. The vector of document d n (arrived at t n ) is defined in the following equation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "( ) ( ) ( ) ( ) ( ) (13) . , , , , , , , , 3 2 1 n m n n n n t w W t w W t w W t w W v L =", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "Here, m denotes the number of unique words in d n , and weight ( )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "n x t w W", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": ", is defined as follows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "( ) ( ) ( ) (14) . else 0 , if 0 , , > \u23a9 \u23a8 \u23a7 = n x n x n x t w TREND t w TREND t w W", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "Equation 14 means that words with negative trend scores are eliminated from the document. This is because words with negative score can be considered to be completely irrelevant to current trends. However, the same word in another document is used for vector element if its trend score rises above zero due to the arrival of the second document.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tre", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "Many clustering algorithms have been invented, which are roughly classified into hierarchical or partitioning-optimization. The former generate a document tree called a dendrogram, whereas the latter give flat document clusters without any hierarchy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clustering based on Trend Scores", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "For observing topic transition, the clustering algorithm should offer cluster reproducibility. There are two perspectives to reproducibility. First, the documents, already classified into clusters, should not be moved to another cluster when a new document arrives. If documents in a cluster move continuously, we cannot observe topic expansion and declination. Next, the documents should also remain stable when classification granularity is changed. If cluster coherency is preserved during the merging and partitioning processes, we can find the most effective level of granularity easily.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clustering based on Trend Scores", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "For the twin goals of reproducibilities, our approach is based on the single linkage clustering algorithm as a hierarchical scheme. Most hierarchical algorithms have processing cost of O(n 2 ) where n is the number of documents. However, the single linkage algorithm has processing cost of O(n) after a threshold is given and the nearest document for each document has been already identified.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clustering based on Trend Scores", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "Our clustering algorithm is composed of two steps. In the first step, distances between a new document and prior ones are calculated upon its arrival, and the nearest one is recorded in a nearestneighbor table. In the second step, document clusters are generated based on the threshold given by the user. This admits of interactive analysis through the flexible threshold changes in a practical time by using the nearest-neighbor table as a cache.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clustering based on Trend Scores", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "The distance between two clusters, in the single linkage algorithm, is defined as the distance between the closest documents in the clusters. One of the problems in this algorithm is the chaining phenomenon, which is due to the definition of distances between clusters. Even if each two documents in a cluster are substantially relevant, the farthest documents in the cluster may cover decidedly irrelevant topics. The focus of the single linkage algorithm is the result of giving preference to both high speed clustering performance and reproducibility rather than trying to suppress the influence of chaining.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clustering based on Trend Scores", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "The two key steps in our clustering algorithm are detailed below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clustering based on Trend Scores", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "The left of Figure 3 visually shows the structure of a nearest-neighbor table. The nodes denote documents, and d n inside the circles denote document identifiers. The larger n is, the later the document arrived. Newer documents are placed right of others in the figure. The arrows denote the nearest links from newer document to older one, and the values beside the arrows denote the distance between two documents. When the latest document d 6 arrives, the nearest-neighbor table is updated as follows. Table and Clusters First, for the words in d 6 , M, F, and TREND are updated based on their previous results and the elapsed time since they were last updated. In the definition of trend scores in Equation 11, the score for a word with the first appearance is one. The timestamp of d 6 is stored for the next update. Then, the current trend scores are assigned to d 6 ; the weighted words form the vector of the document. Though trend scores of words are updated whenever new documents arrive, 6 v (the vector of d 6 ) is never updated even if the new documents contain same words in d 6 . In other words, weighted words in a document reflect the trends at the time of its arrival. Therefore, scores in different documents may be different if the documents arrive at different times.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 3", |
| "ref_id": "FIGREF6" |
| }, |
| { |
| "start": 504, |
| "end": 522, |
| "text": "Table and Clusters", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Step1: Updating Nearest-Neighbor Table", |
| "sec_num": null |
| }, |
| { |
| "text": "Second, 6 v is compared to the vectors of older documents respectively, and the distances are calculated. The distance between two documents is defined by subtracting cosine similarity of respective vectors from one. The cosine similarity is normalized between zero and one.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step1: Updating Nearest-Neighbor Table", |
| "sec_num": null |
| }, |
| { |
| "text": "( ) ( ) (15) . , sim 1 , dis n m n m v v d d \u2212 =", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step1: Updating Nearest-Neighbor Table", |
| "sec_num": null |
| }, |
| { |
| "text": "When the nearest document as d 6 is identified from among the older documents, the identifier of the nearest document and the corresponding distance are added to the nearest-neighbor table. In the Figure 3 , the nearest neighbor as d 6 is d 3 , and the distance is 0.5.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 197, |
| "end": 205, |
| "text": "Figure 3", |
| "ref_id": "FIGREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Step1: Updating Nearest-Neighbor Table", |
| "sec_num": null |
| }, |
| { |
| "text": "The clustering process begins after the threshold is given. Since the nearest-neighbor table specifies the most similar older documents, document clusters can be generated in a short time.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step2: Clustering based on Nearest-Neighbor Table", |
| "sec_num": null |
| }, |
| { |
| "text": "The right of Figure 3 shows the composition of clusters after threshold 0.6 is given. Links shorter than 0.6 are valid, and only the link from d 3 to d 1 is regarded as invalid. As a result, two clusters, \"d 1 , d 2 , d 4 \" and \"d 3 , d 5 , d 6 \", are generated. Since it is not necessary to update distances between clusters during clustering processes, the algorithm can generate clusters with O(n) processing cost. Even if the threshold is changed, the algorithm can process again with the same cost.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 13, |
| "end": 21, |
| "text": "Figure 3", |
| "ref_id": "FIGREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Step2: Clustering based on Nearest-Neighbor Table", |
| "sec_num": null |
| }, |
| { |
| "text": "For the evaluation, we reviewed the clusters generated by our algorithm, and examined the sensitivity performance as regards new topics. We used the Mainichi newspaper tagged corpus (in Japanese) in our experiments. All articles are tagged with issued date and category of the page on which they were placed, such as world topics, politics, economics, and sports. Several keywords are attached to each article. We extracted 1,037 articles which are classified as world topics in January and February in 1994. The number of keywords in articles varies from 8 to 243, and the average article includes 54.2 keywords. We used the keywords as the elements of the document vectors. Though we stated that trend scores are updated each time a new document arrives, the scores are updated once a day in this experiment because the documents in the corpus had only day-based timestamps.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4." |
| }, |
| { |
| "text": "To comprehend the characteristics of our clustering algorithm, we reviewed the clusters formed with different thresholds.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reviewing Clusters", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "In this experiment, we set T l as 10(days) and T s as 5(days) so as to set t cross to 7(days) approximately. The appearance in the last seven days is thus emphasized in calculating trend scores. Our word weighting algorithm performs reasonably well only when the positive and negative regions of the differential curve are populated by existing documents; word scores calculated in the earlier period are not proper. Therefore, the articles prior to the last thirty days (about half of whole period) were not used for constructing the nearest-neighbor table, though all articles were processed to obtain trend scores during the last thirty days. Consequently, 567 articles (from the total of 1,037) were included in the nearest-neighbor table, and used for clustering. Table 1 and Table 2 summarize the largest top 10 clusters yielded by our algorithm. They are the results at the end of the two months. In the results in Table 1 , instead of giving distance threshold directly, we indicate the threshold that made the number of clusters half the number of articles in the nearest-neighbor table. In the results in Table 2 , we set the threshold to make the number of clusters one quarter the number of articles. Clusters are sorted by size, the number of articles in it, in descending order. Span is the days from the timestamp of the latest article to that of the earliest one in each cluster. Clusters are manually summarized at the rightmost column. As for the clusters in Table 1 , a review finds that No.1-3, 1-5, 1-6, 1-8, and 1-9 cover single topics; the other clusters are composed of several topics. The results are relative to cluster span. Clusters with short span gather relative articles quite precisely. Longer cluster spans indicate more topics.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 769, |
| "end": 788, |
| "text": "Table 1 and Table 2", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 922, |
| "end": 929, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 1115, |
| "end": 1122, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 1477, |
| "end": 1484, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Reviewing Clusters", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "Larger clusters, such as No.1-1 and 1-2, cover especially wide various topics due to the chaining effect. The clusters in Table 2 are larger. Though some larger clusters are created by general words such as country name, and other larger ones are affected by chaining, two of top ten clusters, No. 2-5 and 2-10, completely correspond to No.1-3 and 1-8 in Table 1 . Both events described in these two clusters occurred at the end of February, the last one week in our dataset. Our algorithm sensitively gathered the events at the earliest possible stage just after their occurrences and separated them from the other events definitely.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 122, |
| "end": 129, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 355, |
| "end": 362, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Reviewing Clusters", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "The purpose of the next experiment was to evaluate the algorithm's sensitivity to new topics. Sensitivity is achieved when the word scores reflect current trends. We started by comparing the relationship between daily document frequency of a word and its scores. Next, we examined the performance of gathering documents related to new topics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sensitivity to New Topics", |
| "sec_num": "4.2." |
| }, |
| { |
| "text": "In the experiment, as benchmark word weighting algorithms, we prepared simple \"IDF\", \"weighted-DF(W-DF)\" using the summation of document frequency with time attenuation, and \"Gradient\" as the growth of word appearance, in addition to our word weighting algorithm \"Trend\". W-DF corresponds to , and Gradient corresponds to", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sensitivity to New Topics", |
| "sec_num": "4.2." |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "( n t w F , )", |
| "eq_num": "( ) n" |
| } |
| ], |
| "section": "Sensitivity to New Topics", |
| "sec_num": "4.2." |
| }, |
| { |
| "text": "t w G , ; they are obtained in the process of trend score calculation. In scoring words by IDF, the total number of documents and document frequency of words were updated each day by using articles up to the day because it is not feasible to obtain future given our assumption. Therefore, scores by IDF changed daily. We also regarded minus scores as zero in Gradient, as well as Trend.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sensitivity to New Topics", |
| "sec_num": "4.2." |
| }, |
| { |
| "text": "Word scores yielded by the four algorithms are drawn in Figure 4 . They are the results for the word \"Sarajevo\", which was frequently used in news articles in the period. The bars denote daily document frequency where the word appears. The lines denote the scores output by each algorithm. Trend(a) surges at the first peak in document frequency at 37th day, and hovers at a relatively high level on following days. IDF(b) changes fluidly while it declines as document frequency increases. The specific difference between Trend and W-DF(c) is the sensitivity to dramatic increase of document frequency. W-DF rises gradually as time passes after the first high peak on the 37th day because it reflects the accumulation of frequency. Gradient(d) reflects the peak on the 37th day as well as Trend. However, it also overreacts to smaller peaks in the earlier period. It reflects the behavior seen when the scores are obtained from just the rate of frequency change. Gradient scores rise strongly at small peaks only if it was missing articles on prior days. On the other hand, due to the negative coefficient in the differential curve, it seldom reaches high scores after big peaks pass.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 56, |
| "end": 64, |
| "text": "Figure 4", |
| "ref_id": "FIGREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Word Scores", |
| "sec_num": null |
| }, |
| { |
| "text": "Eventually, Trend scores faithfully follow document frequency change, and they reflect the beginning and the end of trend lifetime. Moreover, they are unaffected by smaller peaks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Scores", |
| "sec_num": null |
| }, |
| { |
| "text": "We evaluated clustering performance precisely.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clustering Performance", |
| "sec_num": null |
| }, |
| { |
| "text": "For this experiment, we prepared a target article group with fifteen articles among the corpus. They are manually gathered so as to cover a single topic. For the four algorithms, nearest-neighbor tables were constructed for the last thirty days, as in the previous experiment. After constructing them, we generated clusters by adding articles day by day. The first results contain the articles in the first day of the thirty day period. The second ones contain the articles the first two days.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clustering Performance", |
| "sec_num": null |
| }, |
| { |
| "text": "The previous experiments proved that using the large threshold, which yielded the cluster number of 25% of all documents, could gather articles related to new topics. Since reducing cluster number makes it easier to comprehend, we also used the large threshold in this experiment. Figure 5 plots F measure day by day. The horizontal axis plots the days since the first article in the target group appeared. The bars are the number of articles in the target group. Data details are shown in Table 3 ; several dates are missing because the performance was estimated for the dates when the articles in the target group were issued. The bold values indicate the best performance achieved by each algorithm.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 281, |
| "end": 289, |
| "text": "Figure 5", |
| "ref_id": "FIGREF8" |
| }, |
| { |
| "start": 490, |
| "end": 497, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Clustering Performance", |
| "sec_num": null |
| }, |
| { |
| "text": "Trend performed well, especially for a few days after the first target article was issued, and recorded the highest performance throughout almost the entire period. The averaged performance was 63%. Though IDF demonstrated excellent performance after several articles were issued, it did not work well initially. The performance of W-DF was significantly below that of Trend while there was not so much of a difference between their word scores in Figure 4(a) and Figure 4(c) . This is because the scores of general words tend to be overweighted by IDF. Though Gradient proved high", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 448, |
| "end": 459, |
| "text": "Figure 4(a)", |
| "ref_id": "FIGREF7" |
| }, |
| { |
| "start": 464, |
| "end": 475, |
| "text": "Figure 4(c)", |
| "ref_id": "FIGREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Clustering Performance", |
| "sec_num": null |
| }, |
| { |
| "text": "22nd Pacific Asia Conference on Language, Information and Computation, pages 331-340", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "performance compared to IDF or W-DF initially, it does not last so long, as also seen in word scores in Figure 4(d) .The experiment proved the performance of our algorithm to detect topics sensitively and follow them over time.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 104, |
| "end": 115, |
| "text": "Figure 4(d)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| }, |
| { |
| "text": "This paper introduced a trend-based clustering algorithm for detecting and tracking new topics in online documents. Our algorithm is marked by its word weighting algorithm based on the gradient model, that represents word appearance growth. The clustering process adopts the single linkage algorithm, that offers high speed clustering in a practical time.The experiments proved that word weights in our algorithm reflect their frequency transitions and that the clustering algorithm can gather related news articles persistently as well as sensitively identify new topics. The performance meets the purpose of detecting new topics effectively and tracking them sustainably for documents increasing hourly.Our next goal is to optimize the adequate parameters related to attenuation power for different types of documents. 98-21. Walls, Frederick., Hubert Jin, Sreenivasa Sista, and Richard Schwartz. 1999. Topic Detection in Broadcast News, Proceedings of the DARPA Broadcast News Workshop, 193-198. Yang, Yiming., Tom Pierce, and Jaime Carbonell. 1998 ", |
| "cite_spans": [ |
| { |
| "start": 821, |
| "end": 880, |
| "text": "98-21. Walls, Frederick., Hubert Jin, Sreenivasa Sista, and", |
| "ref_id": null |
| }, |
| { |
| "start": 881, |
| "end": 1029, |
| "text": "Richard Schwartz. 1999. Topic Detection in Broadcast News, Proceedings of the DARPA Broadcast News Workshop, 193-198. Yang, Yiming., Tom Pierce, and", |
| "ref_id": null |
| }, |
| { |
| "start": 1030, |
| "end": 1051, |
| "text": "Jaime Carbonell. 1998", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5." |
| } |
| ], |
| "bib_entries": {}, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "Ishikawa et al., 2001). denotes the parameter deciding the rate of intensity attenuation.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "num": null, |
| "text": "1", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF4": { |
| "num": null, |
| "text": "Differential Curve.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF5": { |
| "num": null, |
| "text": "Equation 11, as the accumulation of its gradient scores with time attenuation. as well as recent frequency F.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF6": { |
| "num": null, |
| "text": "Nearest-Neighbor", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF7": { |
| "num": null, |
| "text": "Document Frequency and Word Scores: \"Sarajevo\" (a)Trend (b)IDF (c)W-DF (d)Gradient.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF8": { |
| "num": null, |
| "text": "Target Group Detection Performance.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF0": { |
| "text": "The Summary of Top 10 Clusters (clusters: 1/2).", |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td>No.</td><td>articles</td><td>span(days)</td><td>summary</td></tr><tr><td>1-1</td><td>84</td><td>22</td><td>Sarajevo, Bosnia-Herzegovina, Russia, etc</td></tr><tr><td>1-2</td><td>57</td><td>21</td><td>Bosnia, China, Vietnam, etc</td></tr><tr><td>1-3</td><td>13</td><td>3</td><td>Hebron random shooting</td></tr><tr><td>1-4</td><td>10</td><td>28</td><td>USA(North Korea, China), UN</td></tr><tr><td>1-5</td><td>6</td><td>5</td><td>Russian election</td></tr><tr><td>1-6</td><td>6</td><td>9</td><td>relationship between China and Taiwan</td></tr><tr><td>1-7</td><td>6</td><td>21</td><td>Austria, Ukraine, Russia</td></tr><tr><td>1-8</td><td>6</td><td>3</td><td>Myanmar(Aung San Suu Kyi)</td></tr><tr><td>1-9</td><td>6</td><td>5</td><td>USA(lifting of the economic sanctions for Vietnam)</td></tr><tr><td>1-10</td><td>6</td><td>23</td><td>North Korea(IAEA), China, Iran & Iraq</td></tr></table>", |
| "html": null |
| }, |
| "TABREF1": { |
| "text": "The Summary of Top 10 Clusters (clusters: 1/4).", |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td>No.</td><td>articles</td><td>span(days)</td><td>summary</td></tr><tr><td>2-1</td><td>160</td><td>30</td><td>North Korea(IAEA), China, South Korea</td></tr><tr><td>2-2</td><td>87</td><td>22</td><td>Sarajevo, Bosnia-Herzegovina, PKO, etc</td></tr><tr><td>2-3</td><td>21</td><td>24</td><td>relationship between China and Taiwan, North Korea</td></tr><tr><td>2-4</td><td>14</td><td>8</td><td>Russian politics</td></tr><tr><td>2-5</td><td>13</td><td>3</td><td>Hebron random shooting</td></tr><tr><td>2-6</td><td>10</td><td>9</td><td>Italy, China, NATO</td></tr><tr><td>2-7</td><td>9</td><td>19</td><td>North Korea, Russia</td></tr><tr><td>2-8</td><td>9</td><td>14</td><td>Taiwan, Israel</td></tr><tr><td>2-9</td><td>7</td><td>21</td><td>NATO, Russia</td></tr><tr><td>2-10</td><td>6</td><td>3</td><td>Myanmar(Aung San Suu Kyi)</td></tr></table>", |
| "html": null |
| }, |
| "TABREF2": { |
| "text": "Target Group Detection Performance.", |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td>date</td><td>Trend</td><td>IDF</td><td>W-DF</td><td>Gradient</td></tr><tr><td>1</td><td>0.059</td><td>0.083</td><td>0.20</td><td>0.18</td></tr><tr><td>2</td><td>1.0</td><td>0.11</td><td>0.15</td><td>0.67</td></tr><tr><td>3</td><td>0.67</td><td>0.22</td><td>0.062</td><td>0.40</td></tr><tr><td>4</td><td>0.67</td><td>0.24</td><td>0.36</td><td>0.40</td></tr><tr><td>6</td><td>0.73</td><td>0.22</td><td>0.33</td><td>0.33</td></tr><tr><td>8</td><td>0.63</td><td>0.21</td><td>0.073</td><td>0.67</td></tr><tr><td>9</td><td>0.61</td><td>0.28</td><td>0.065</td><td>0.55</td></tr><tr><td>10</td><td>0.58</td><td>0.57</td><td>0.091</td><td>0.50</td></tr><tr><td>13</td><td>0.64</td><td>0.57</td><td>0.086</td><td>0.53</td></tr><tr><td>15</td><td>0.67</td><td>0.53</td><td>0.097</td><td>0.42</td></tr><tr><td>17</td><td>0.67</td><td>0.55</td><td>0.12</td><td>0.33</td></tr><tr><td>18</td><td>0.65</td><td>0.51</td><td>0.13</td><td>0.32</td></tr><tr><td>Ave.</td><td>0.63</td><td>0.44</td><td>0.34</td><td>0.18</td></tr></table>", |
| "html": null |
| } |
| } |
| } |
| } |