| { |
| "paper_id": "Y15-1005", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:42:18.829281Z" |
| }, |
| "title": "Surrounding Word Sense Model for Japanese All-words Word Sense Disambiguation", |
| "authors": [ |
| { |
| "first": "Kanako", |
| "middle": [], |
| "last": "Komiya", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ibaraki University", |
| "location": {} |
| }, |
| "email": "kanako.komiya.nlp@vc.ibaraki.ac.jp" |
| }, |
| { |
| "first": "Yuto", |
| "middle": [], |
| "last": "Sasaki", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Kyoto University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hajime", |
| "middle": [], |
| "last": "Morita", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tokyo University of Agriculture and Technology", |
| "location": {} |
| }, |
| "email": "morita@nlp.ist.i.kyoto-u.ac.jp" |
| }, |
| { |
| "first": "Minoru", |
| "middle": [], |
| "last": "Sasaki", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ibaraki University", |
| "location": {} |
| }, |
| "email": "minoru.sasaki.01@vc.ibaraki.ac.jphiroyuki.shinnou.0828@vc.ibaraki.ac.jp" |
| }, |
| { |
| "first": "Hiroyuki", |
| "middle": [], |
| "last": "Shinnou", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ibaraki University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Yoshiyuki", |
| "middle": [], |
| "last": "Kotani", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Kyoto University", |
| "location": {} |
| }, |
| "email": "kotani@cc.tuat.ac.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper proposes a surrounding word sense model (SWSM) that uses the distribution of word senses that appear near ambiguous words for unsupervised all-words word sense disambiguation in Japanese. Although it was inspired by the topic model, ambiguous Japanese words tend to have similar topics since coarse semantic polysemy is less likely to occur than that in Western languages as Japanese uses Chinese characters, which are ideograms. We thus propose a model that uses the distribution of word senses that appear near ambiguous words: SWSM. We embedded the concept dictionary of an Electronic Dictionary Research (EDR) electronic dictionary in the system and used the Japanese Corpus of EDR for the experiments, which demonstrated that SWSM outperformed a system with a random baseline and a system that used a topic model called Dirichlet Allocation with WORDNET (LDAWN), especially when there were high levels of entropy for the word sense distribution of ambiguous words.", |
| "pdf_parse": { |
| "paper_id": "Y15-1005", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper proposes a surrounding word sense model (SWSM) that uses the distribution of word senses that appear near ambiguous words for unsupervised all-words word sense disambiguation in Japanese. Although it was inspired by the topic model, ambiguous Japanese words tend to have similar topics since coarse semantic polysemy is less likely to occur than that in Western languages as Japanese uses Chinese characters, which are ideograms. We thus propose a model that uses the distribution of word senses that appear near ambiguous words: SWSM. We embedded the concept dictionary of an Electronic Dictionary Research (EDR) electronic dictionary in the system and used the Japanese Corpus of EDR for the experiments, which demonstrated that SWSM outperformed a system with a random baseline and a system that used a topic model called Dirichlet Allocation with WORDNET (LDAWN), especially when there were high levels of entropy for the word sense distribution of ambiguous words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "This paper proposes a surrounding word sense model (SWSM) for unsupervised Japanese allwords Word Sense Disambiguation (WSD). SWSM assumes that the sense distribution of surrounding words varies according to the sense of a polysemous word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "For instance, a word \" \" (possibility) has three senses according to the Electronic Dictionary Research (EDR) electronic dictionary (Miyoshi et al., 1996) :", |
| "cite_spans": [ |
| { |
| "start": 132, |
| "end": 154, |
| "text": "(Miyoshi et al., 1996)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(1) The ability to do something well", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(2) Its feasibility", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(3) The certainty of something happenings Although sense (3) is the most frequent in the prior distributions, sense (1) will be more likely when the local context includes some concepts like \" \" (man) or \" \" (someone's). It is challenging in practice to accurately learn the difference in the senses of surrounding words in an unsupervised manner, but we developed an approximate model that took conditions into consideration.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "SWSM is a method for all-words WSD inspired by the topic model (Section 2). It treats the similarities of word senses using WORDNET-WALK and it generates word senses of ambiguous words and their surrounding words (Section 3). First, SWSM abstracted the concepts of the concept dictionary (Section 4) and calculated the transition probabilities for priors (Section 5). Then it estimated the word senses using Gibbs Sampling (Section 6) . Our experiments with an EDR Japanese corpus and a Concept Dictionary (Section 7) indicated that SWSM was effective for Japanese all-words WSD (Section 8) . We discuss the results (Section 9) and concludes this paper (Section 10) .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There are many methods of all-words WSD. Pedersen et al. (2005) proposed calculation of the semantic relatedness of the word senses of ambiguous words and their surrounding words. Some papers have reported that methods using topic models (Blei et al., 2003) are most effective. Boyd-Graber et al. (2007) proposed a model, called Latent Dirichlet Allocation with WORD-NET (LDAWN) , which was a model where the probability distributions of words that the topics had were replaced with a word generation process on WordNet: WORDNET-WALK. They ap-plied the topic model to unsupervised English allwords WSD. Although Guo and Diab (2011) also used the topic model and WordNet, they also used WordNet as a lexical resource for sense definitions and they did not use its conceptual structure. They reported that the performance of their system was comparable with that reported by Boyd-Graber et al.", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 63, |
| "text": "Pedersen et al. (2005)", |
| "ref_id": null |
| }, |
| { |
| "start": 238, |
| "end": 257, |
| "text": "(Blei et al., 2003)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 278, |
| "end": 303, |
| "text": "Boyd-Graber et al. (2007)", |
| "ref_id": null |
| }, |
| { |
| "start": 371, |
| "end": 378, |
| "text": "(LDAWN)", |
| "ref_id": null |
| }, |
| { |
| "start": 612, |
| "end": 631, |
| "text": "Guo and Diab (2011)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "There has been little work, on the other hand, on unsupervised Japanese all-words WSD. As far as we know, there has only been one paper (Baldwin et al., 2008) and there have been no reported methods that have used the topic model. We think this is because ambiguous words in Japanese tend to have similar topics since coarse semantic polysemy is less likely to occur compared to that with Western languages as Japanese uses Chinese characters, which are ideograms. In addition, Guo and Diab (2011) reported that in word sense disambiguation (WSD), an even narrower context was taken into consideration, as Mihalcea (2005) had reported. Therefore, we assumed that the word senses of the local context are differentiated depending on the word sense of the target word like that in supervised WSD. SWSM was inspired by LDAWN, it thus uses WORDNET-WALK and Gibbs sampling but it does not use the topics but the word senses of the surrounding words. We propose SWSM as an approach to unsupervised WSD and carried out Japanese all-words WSD.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 158, |
| "text": "(Baldwin et al., 2008)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "SWSM uses the distribution of word senses that appear near the target word in WSD to estimate the word senses assuming that the word senses of the local context are differentiated depending on the word sense of the target word. In other words, SWSM estimates the word sense according to p(s|w), which is a conditional probability of a string of senses, s, given a string of words w.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surrounding Word Sense Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "SWSM involves three assumptions. First, each word sense has a probability distribution of the senses of the surrounding words. Second, when c i denotes the sense string of the surrounding words of the target word w i , the conditional probability of c i given w i is the product of the those of the senses in c i given w i . For example, when w i is \" \" (possibility) and its surrounding words are \" \" (both sides) and \" \" (human), c i = (s both , s human ) and P (c i |s possibility ) = P (s both |s possibility )P (s human |s possibility ) are de-fined where s possibility , s both , and s human denote word senses of \" \" (possibility), \" \" (both sides), and \" \" (human). Finally, each polyseme has a prior distribution of the senses. Given these assumptions, SWSM calculates the conditional probability of s that corresponds to w, under the condition where w is observed as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surrounding Word Sense Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (s, c|w) = N \u220f i=1 P (s i |w i )P (c i |s i , w),", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Surrounding Word Sense Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where c denotes the string of c i and N denotes the number of all the words in the text. The initial part on the right is the probability distribution of the word sense of each word and the last part is that of the senses of the surrounding words for each word sense. We set the Dirichlet distribution as their prior.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surrounding Word Sense Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The final equation considering prior is described using the following parameters:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surrounding Word Sense Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "P (s, c, \u03b8, \u03c6|w, \u03b3 k , \u03c4 j ) = W \u220f k=1 P (\u03b8 k |\u03b3 k ) S \u220f j=1 P (\u03c6 j |\u03c4 j ) N \u220f i=1 P (s i |\u03b8 w i )P (c i |\u03c6 s j ,w),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surrounding Word Sense Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "(2) where W denotes the number of words, S denotes the number of senses, \u03b8 k denotes the probability distribution of the senses of word k, and \u03c6 j denotes the probability distribution of the word senses surrounding word sense j. \u03b8 k and \u03c6 j are the parameters of the multinomial distribution. \u03b3 and \u03c4 are the parameters of the Dirichlet distribution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surrounding Word Sense Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Eq.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surrounding Word Sense Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "(2) is the basic form. We replace \u03c6, the probability distribution of each sense, with the generation process by using the WORDNET-WALK of the concept dictionary. The WORDNET-WALK in this work does not generate words but word senses using a hypertransition probability parameter, S\u03b1. We set \u03b1 according to the senses to differentiate the sense distribution of the surrounding words before training. By doing this, we can determine which sense in the model corresponds to the senses in the dictionary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surrounding Word Sense Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "SWSM estimates the word senses using Gibbs sampling as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surrounding Word Sense Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "(1) Pre-processing 1 Abstract the concepts in the concept dictionary (2) Training: Gibbs sampling to estimate the word senses", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surrounding Word Sense Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "SWSM obtains the sense probability of the surrounding words using WORDNET-WALK. WORDNET-WALK involves the generation process, which represents the probabilistic walks over the hierarchy of conceptual structures like Word-Net. Figure 1 shows the easy example of the generation probabilities of words by WORDNET-WALK. When circle nodes represent concepts and triangle nodes represent words of leaf concepts ,i.e., X and Y, and numbers represent the transition probabilities, the generation probabilities of words A, B, C, and D are 0.03 0.27 0.28 and 0.42. LDAWN calculated the probabilities of word senses using the transition probability from the root node in a concept dictionary. WORDNET-WALK generated words in (Boyd-Graber et al., 2007) but our WORDNET-WALK generates word senses. However, the word senses sometimes do not correspond to leaf nodes but to internal nodes in our model and that causes a problem: the sum of the probabilities is not one. Thus, we added leaf nodes of the word senses directly below the internal nodes of the concept dictionary (c.f. Figure 2 ). Concept abstraction involves the process by which hyponym concepts map onto hypernym concepts. Most concepts in a very deep hierarchy are fine grained like the \"Tokyo University of Agriculture and Technology\" and \"Ibaraki University\" and they should be combined together like \"university\" to avoid the zero frequency problem. Hirakawa and Kimura 2003reported that they compared three methods for concept abstraction, i.e, flat depth, flat size, and flat probability methods, by using the EDR concept dictionary, and the flat probability method was the best. Therefore, we used the flat probability method for concept abstraction.", |
| "cite_spans": [ |
| { |
| "start": 714, |
| "end": 740, |
| "text": "(Boyd-Graber et al., 2007)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 226, |
| "end": 234, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1066, |
| "end": 1074, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Concept Abstraction", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The flat probability method consists of two steps. First, there is a search for nodes from the root node in depth first order. Second, if the concept probability calculated based on the corpus is less than a threshold value, the concept and its hyponym concepts are mapped onto its hypernym concept.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Abstraction", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We employed the methods of (Ribas, 1995) and (McCarthy, 1997) to calculate the concept probability. Ribas (1995) calculated the frequency of sense s as:", |
| "cite_spans": [ |
| { |
| "start": 45, |
| "end": 61, |
| "text": "(McCarthy, 1997)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Abstraction", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f req(s) = \u2211 w |senses(w) \u2208 U (s)| |senses(w)| count(w),", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Concept Abstraction", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where senses(w) denotes the possible senses of a word w, U (s) denotes concept s and its hyponym concepts, and count(w) denotes the frequency of word w. This equation weights count(w) by the ratio of concept s and its hyponym concepts in all the word senses of w. probability P (s i ) was calculated as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Abstraction", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (s i ) = f req(s i ) N ,", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Concept Abstraction", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where N denotes the number of word tokens. Figure 3 demonstrates the example of the conceptual structure 1 . The nodes A\u223cF represent the A Turing estimator (Gale and Sampson, 1995) was used for smoothing with rounding of the weighted frequencies.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 43, |
| "end": 51, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Concept Abstraction", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Concept abstraction sometimes causes a problem where some word senses of a polyseme are mapped onto the same concept. The most frequent sense in the corpus has been chosen for the answer in these cases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Abstraction", |
| "sec_num": "4" |
| }, |
| { |
| "text": "SWSM differentiates the sense distribution of the surrounding words of each target word before training using \u03b1 : the transition probability parameter. As our method is an unsupervised approach, we cannot know the word senses in the corpus. Therefore, SWSM counts the frequencies of all the possible word senses of the surrounding words in the corpus. That is, if there are polysemes A and B in the corpus and B is a surrounding word of A, SWSM counts the frequencies of the senses by considering that all the senses of B appeared near all the senses of A. That makes no difference in the sense distributions of A; however, if there is another polyseme or a monosemic word, C, and a sense of C is identical with a sense of A, the sense distributions of A will be differentiated by counting the frequencies of the senses of C. As this example indicates, SWSM expects that words that have an identical sense, like A and C, have similar local contexts. SWSM uses these counted frequencies to calculate the transition parameter \u03b1 so that the transition probabilities to each concept are proportional to the word sense frequencies of the surrounding words. We calculate \u03b1 s i ,s j , i.e., the transition probability from hypernym s i to hyponym s j , like that in (Jiang and Conrath, 1997) as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b1 s i ,s j = P (s j |s i ) = P (s i , s j ) P (s i ) = P (s j ) P (s i ) .", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In addition, probability P (s i ) is calculated as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (s i ) = f req(s i ) N ,", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where f req(s i ) denotes the frequency of sense", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "s i . Moreover, f req(s i ) is calculated like that in (Resnik, 1995): f req(s i ) = \u2211 w\u2208words(s i ) count(w).", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Here, words(s i ) denotes a concept set that includes s i and its hyponyms, and N denotes the number of the word tokens in the corpus. However, the probability that Eq. 7will have a problem, i.e., the sum of the transition probabilities from a concept to its hyponyms is not one. Thus, we calculate the probability by considering that the same concept that follow a different path is different:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f req(s i )= \u2211 s j \u2208L(s i ) path(s i ,s j ) \u2211 w\u2208words(s i ) count(w),", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where path(s i , s j ) denotes the number of the paths from concept s i to its hyponym s j and L(s i ) denotes the leaf concepts below s i . Consequently, the transition probability can be calculated by dividing the frequencies of the hyponym by that of its hypernym.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "When word (a) appeared twice and word (b) appeared once, the transition probability from A to B, i.e., \u03b1 A,B is 1/2 because the frequencies of A and B are six 2 and three in Figure 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 174, |
| "end": 182, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Here, p(path s l ), i.e., a transition probability of an arbitrary path from the root node to a leaf concept, path s l , is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(path s l ) = f req(c 1 ) f req(s root ) f req(c 2 ) f req(c 1 ) . . . f req(c n ) f req(c n\u22121 ) f req(s l ) f req(c n ) = f req(s l ) f req(s root ) ,", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where c 1 c 2 . . . c n denote the concepts in path s l . Therefore, when we set the frequency of the word sense frequencies of s l , the surrounding words, as f req(s l ), p(path s l ) are proportional to the frequency. We eventually used the following transition probability parameter to avoid the zero frequency problem:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "S a \u03b1 a + S b \u03b1 s b ,", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where \u03b1 a denotes a transition probability parameter where all the leaf nodes have the same amount probability and \u03b1 s b denotes the transition probability parameter that is pre-trained using the above equations. S a and S b are constant numbers to control the effect of pre-processing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The transition probability parameter where all the leaf nodes have the same amount probability, \u03b1 a , is calculated by assuming that the frequencies of all the leaf nodes are as follows", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": ". 3 f req(s l ) = 1 path(s root , s l )", |
| "eq_num": "(11)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "6 Sense Estimation using Gibbs Sampling SWSM estimates the word sense, s, using Gibbs sampling (Liu, 1994) . As described in Section 3, the conditional probability of the model is in Eq.", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 106, |
| "text": "(Liu, 1994)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": ".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (s, c, \u03b8, \u03c6|w) = W \u220f k=1 P (\u03b8 k |\u03b3 k ) S \u220f j=1 P (\u03c6 j |\u03c4 j ) N \u220f i=1 P (s i |\u03b8 w i )P (c i |\u03c6 s j ,w)", |
| "eq_num": "(12)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We calculate the conditional distribution that is necessary for sampling. We regard variants except those for word w i as constant numbers. The probability distribution, \u03c6, of the word sense is actually replaced by WORDNET-WALK in the word sense generation process and it will have plural 3 The reason we did not set the frequencies of all the leaf nodes to one (f req(s l ) = 1) is as follows. If so, all the probabilities of all the paths from the root node to each leaf node would have been the same. However, the more paths from the root node a leaf node has, the higher the probability the leaf node will have. We used Eq.(11) so that all the leaf nodes would have the same probability. multinomial distributions of the transitions to the hyponym concepts.", |
| "cite_spans": [ |
| { |
| "start": 289, |
| "end": 290, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We calculated the conditional distribution P (s i , c i |s \u2212i , c \u2212i , w) as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (s i = x, c i = y|s \u2212i , c \u2212i , w) \u221d (n \u2212i w i ,x +\u03b3)\u2022 |y| \u220f j=1 (n \u2212i x,y j +m y (j, y i )+\u03c4 x,y j ) \u2211 sen (n \u2212i x,sen +\u03c4 x,sen )+(j \u2212 1) ,", |
| "eq_num": "(13)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where x and y correspond to the real values of word sense s i and the vector of the word senses of the surrounding words, c i . n \u2212i w i ,x denotes the number of x, i.e., the word senses that are assigned to word w i except for the i th variate, which is the sampling target now. n \u2212i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "x,y i denotes the frequency where y j appears around word sense x except for the i th variate. m y (j, y j ) is the frequency where word sense y j appear before the j th surrounding word sense in y and it can be ignored if y j appeared once in y. We approximately and determinately assign the sequence of the word senses to y, calculate each probability of s i , and determine s i , i.e., the word sense that corresponds to word w i .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "If the probability distributions of word senses are replaced with WORDNET-WALK, the last part of the right side of Eq. (13) will also be replaced. When r j,0 , r j,1 , . . . , r j,l denotes the path from the root concept of word sense y j in y, we obtain Eq. (14) by calculating the following values of all combinations from the root concept for all word senses, and summing them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "|y| \u220f j=1 l\u22121 \u220f p=1 {T \u2212i x,r j,p ,r j,p+1 + m y (j, r j,p , r j,p+1 ) + S a \u03b1 a,r j,p ,r j,p+1 + S b \u03b1 x b,r j,p ,r j,p+1 } /{ \u2211 r (T \u2212i x,r j,p ,r +m y (j, r j,p , r)+S b \u03b1 x b,r j,p ,r )+S a },", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where T \u2212i x,r j,p ,r j,p+1 denotes the frequency where the word sense of the surrounding words of word sense x pass the link from concept r j,p to concept r j,p+1 except for the i th variate. m y (j, r j,p , r j,p+1 ) denotes the frequency where the link from concept r j,p to concept r j,p+1 is passed before the j th path. The value of T s i should be updated after word sense s i is assigned. Thus, the paths of the word senses of the surrounding words are necessary. This time, we assign values proportional to each probability to each path. When path 1 ,path 2 , ,path n denote the paths from the root concept to word sense c i,j , i.e., a word sense of surrounding words c i of word sense s i , we added following value to T s i ,path k , which is the frequency where a link in path k is passed, for each word sense c i,j .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (path k |s i ) \u2211 n l=1 P (path l |s i )", |
| "eq_num": "(15)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The probability p(path k |s i ) is as follows, when r 1 , r 2 , \u2022 \u2022 \u2022 , r l denote the concepts that path k follows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (path k |s i ) = l\u22121 \u2211 p=1 T \u2212i s i ,rp,r p+1 + S a \u03b1 a,rp,r p+1 + S b \u03b1 s i b,rp,r p+1 \u2211 r (T \u2212i s i ,rp,r + S b \u03b1 s i b,rp,r ) + S a", |
| "eq_num": "(16)" |
| } |
| ], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Concepts that have many paths from the root concept are concepts that have many properties. Thus, we can view these cases as that of an appearance of word sense c i,j that was assigned to multiple properties. Algorithm 1 demonstrates the algorithm of one iteration in Gibbs Sampling of SWSM. Note that x and y are sampled according to Eq. (13) where the last part on the right side is replaced with Eq. 14and each T s i ,path k is updated with Eq. (15).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Algorithm 1 Processes of One Iteration in Gibbs Sampling of SWSM Require: Disambiguate the word sense s i in text for each word w i in text do", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "n w i ,s i \u21d0 n w i ,s i \u2212 1 for each word sense c i,j in c i do for each path path k for c i,j do T s i ,path k \u21d0 T s i ,path k \u2212 P (path k |s i ) \u2211 n l=1 P (path l |s i ) end for end for c i \u21d0 y s i \u21d0 x n w i ,s i \u21d0 n w i ,s i + 1 for each word sense c i,j in c i do for each path path k for c i,j do T s i ,path k \u21d0 T s i ,path k + P (path k |s i ) \u2211 n l=1 P (path l |s i )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "end for end for end for", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Probability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We used the Japanese word dictionary, the concept dictionary, and the Japanese corpus of the second version of the EDR electronic dictionary. All the nouns and verbs that could be followed from the root node in the concept dictionary were used for the experiments. In addition, we added some nouns by deleting \" (suru, the suffix that means do)\" from nominal verbs, to the concept dictionary. Consequently, the concept dictionary included 263,757 words and 406,710 leaf concepts, and 199,430 leaf concepts in them were used for the experiments. The internal nodes that were used for the experiments were 203,565 concepts. Most of the concepts that were not used were those that had no links to Japanese words. In addition, the concept dictionary included 13,846 concepts and 6,905 leaf concepts after concept abstraction. The threshold value we used was 5.0 \u00d7 10 \u22125 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "7" |
| }, |
| { |
| "text": "The Japanese corpus consisted of seven subcorpora: the Nikkei, the Asahi Shimbun, AERA, Heibonsha World Encyclopedia, Encyclopedic Dictionary of Computer Science, Magazines, and Collections. They were annotated with word sense tags that were the concepts in the concept dictionary. Table 1 summarizes the numbers of documents and word tokens according to the type of text. The documents in this corpus only consisted of one sentence. We used the Nikkei for evaluation. The other six sub-corpora were used for pre-processing in an unsupervised manner. The EDR Japanese corpus did not include the basic forms of words. Thus we used a morphological analyzer, Mecab 4 , to identify the basic forms of words in the corpus. Shirai (2002) set up the three difficulty classes listed in Table 2. Tables 7 and 3 indicate the number of word types, noun tokens, and verb tokens according to difficulty and the average polysemy of target words according to difficulty. Only words that appeared more than four times in the corpus were classified based on difficulty.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 282, |
| "end": 289, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 778, |
| "end": 801, |
| "text": "Table 2. Tables 7 and 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Entoropy Easy E(w) < 0.5 Normal 0.5 \u2264 E(w) < 1 Hard 1 \u2264 E(w) ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Difficulty", |
| "sec_num": null |
| }, |
| { |
| "text": "We used nouns and independent verbs in a local window whose size was 2N except for marks, as the surrounding words. We set N = 10 in this research. In addition, we deleted word senses that appeared only once through pre-processing. We performed experiments using the nine settings of the transition probability parameters: S a = {1.0, 5.0, 10.0} and S b = {10.0, 15.0, 20.0} in Eq.(10). We set the hyper-parameter \u03b3 = 0.1 in Eq.(2) for all experiments. Gibbs sampling was iterated 2,000 times and the most frequent senses of 100 samples in the latter 1,800 times were chosen for the answers. We performed experiments three times per setting for the transition probability parameters and calculated the average accuracies. Table 4 summaries the results. It includes the micro-and macro-averaged accuracies of SWSM for the nine settings of the parameters, those of the random baseline, and those of LDAWN 5 . The experiments for the random baseline were performed 1,000 times. The best results are indicated in boldface. The table indicates that our model, SWSM, was better than both the random baseline and LDAWN. Although the macro-averaged accuracies of LDAWN were better than those of SWSM except when S a = 1 and S b = 10, both the micro-and macro-averaged accuracies of SWSM outperformed those of LDAWN when S a = 1 and S b = 10.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 722, |
| "end": 729, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Result", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Tables 5 and 6 summarize the micro-averaged accuracies of all words and the macro-averaged accuracies of all words. SWSM1 and SWSM2 in these tables denote the SWSMs with the setting when the best macro-averaged accuracy for all words was obtained (S a = 1 and S b = 10) and with the setting when the best micro-averaged accuracy for all words was obtained (S a = 5 and S b = 20). The best results in each table are indicated in boldface. These tables indicate that SWSM1 or SWSM2 was always better than both 5 The best results for the 13 settings. We changed the number of topics and the scale parameters according to (Boyd-Graber et al., 2007) . In addition, we tested that the effect of the size of a text, a sentence, or a whole daily publication because a document only consisted of a sentence in our Japanese corpus and there was no clues that indicated to what article the sentence belonged. Furthermore, we tested two kinds of transition probabilities, those that used priors and those where all the leaf nodes had the same amount probability. The best was the setting where there were 32 topics, scale parameter S was 10, the text size was a sentence, and the transition probabilities were those where all the leaf nodes had the same amount probability. The details are similar to those in (Sasaki et al., 2014) . However, we performed the experiments three times and calculated the accuracies but they only performed the experiments twice.", |
| "cite_spans": [ |
| { |
| "start": 508, |
| "end": 509, |
| "text": "5", |
| "ref_id": null |
| }, |
| { |
| "start": 618, |
| "end": 644, |
| "text": "(Boyd-Graber et al., 2007)", |
| "ref_id": null |
| }, |
| { |
| "start": 1298, |
| "end": 1319, |
| "text": "(Sasaki et al., 2014)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Result", |
| "sec_num": "8" |
| }, |
| { |
| "text": "The leaf concepts below C, D, E, and F are omitted.PACLIC 29", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "It is sum of twice from path ABD (a), twice from path AC (a), once from path ABE (b), and once from path ACE (b).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/jordwest/mecab-docs-en PACLIC 29", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "the random baseline and LDAWN.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| }, |
| { |
| "text": "All Easy Normal Hard Random 30.97 33.01 29.35 13.47 LDAWN 36.12 42.06 30.66 13.52 SWSM1 38.91 46.87 33.44 19.92 SWSM2 39.60 48.90 32.85 23.95 Table 7 : Macro-averaged accuracies for all words (%) Table 6 indicates that the macro averaged accuracies of LDAWN (42.51%) outperformed those of SWSM2 (42.09%) when all the words were evaluated. However, the same table reveals that the reason is due to the results for the easy class words, i.e., the words that almost always had the same sense. In addition, Tables 5 and 6 indicate that SWSM clearly outperformed the other systems for words in the normal and hard classes.", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 141, |
| "text": "Random 30.97 33.01 29.35 13.47 LDAWN 36.12 42.06 30.66 13.52 SWSM1 38.91 46.87 33.44 19.92 SWSM2 39.60 48.90 32.85 23.95", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 142, |
| "end": 149, |
| "text": "Table 7", |
| "ref_id": null |
| }, |
| { |
| "start": 196, |
| "end": 203, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 503, |
| "end": 517, |
| "text": "Tables 5 and 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": null |
| }, |
| { |
| "text": "The examples\" (possibility)\" and \" (wash)\" were cases where most senses were correctly predicted. \" (possibility)\" is a hardclass word and it appeared 18 times in the corpus. SWSM correctly predicted the senses of \u223c70% of them. It had three senses as described in Section 1: (1) the ability to do something well, (2) its feasibility, and (3) the certainty of something happenings. First, SWSM could correctly predict the first sense. The words that surrounded them were, for instance, \" (both sides)\" and \" (human)\", and \" (research)\", \" (industrial complex)\", and \" (hereafter)\". Second, SWSM could correctly predict almost none of the words that had the second sense. The words surrounding an example were \" (every day)\", \" (various)\", \" (to face)\", and \" (people)\", and SWSM predicted the sense as sense (1). We think that \" (people)\" misled the answer. The words surrounding another example were \" (break through)\", \" (music)\", and \" (spread)\", and SWSM predict the sense as sense (1). We think that \" (spread)\" could be a clue to predict the sense, but \" (music)\" misled the answer because it appeared many times in the corpus. Finally, SWSM could correctly predicted the last sense. The words surrounded them were, for instance, (1) \" (situation)\", \" (arise)\", and \" (appear)\", (2) \" (appreciation)\", \" (escalate)\", and \" (appear)\", and (3) \" (read)\" and\" (deny)\". \"(wash)\" is a normal-class word and it appeared five times in the corpus. SWSM correctly predicted the senses of \u223c80%, viz., four of them. It has two senses in the corpus: (1) sanctify (someone's heart) and (2) wash out a stain with water. The words surrounding the example that were incorrectly predicted were \" (tonight)\", \" (body)\", and \" (not)\", and SWSM answered the sense as (1) even though it was (2). The words surrounding the examples that were correctly predicted were (1) \" (islander)\", \" (tear)\", and \" (stone)\", (2) \" (look at)\" and \" (heart)\", (3) \" (limb)\", \" (face)\", \" (I)\", and \" (bath)\", (4) \" (body)\", \" (water)\", and \" (drain)\".These examples demonstrate that the surrounding words were good clues to disambiguate the word senses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "9" |
| }, |
| { |
| "text": "We proposed the surrounding word sense model (SWSM), which used the word sense distribution around ambiguous words, and performed unsupervised all-words word sense disambiguation in the Japanese language. The system incorporated the EDR concept dictionary and we performed experiments using the EDR Japanese corpus. We evaluated the performance of the model using difficulty classes based on the entropy of senses in the corpus: easy, normal, and hard. We performed experiments with SWSM in nine settings for the transition probability parameters. The experiments revealed that SWSM outperformed the random baseline and LDAWN, which is a system that uses the topic model. The SWSM model clearly outperformed the other systems for senses in the normal and hard classes. Some examples that correctly predicted senses indicated that the surrounding words were good clues to disambiguate word senses even if we used unsupervised WSD.Jordan Boyd-Graber, David M. Blei, and Xiaojin Zhu. 2007 ", |
| "cite_spans": [ |
| { |
| "start": 958, |
| "end": 985, |
| "text": "Blei, and Xiaojin Zhu. 2007", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "10" |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Mrd-based word sense disambiguation: Further extending lesk", |
| "authors": [ |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Su", |
| "middle": [ |
| "Nam" |
| ], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Francis", |
| "middle": [], |
| "last": "Bond", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanae", |
| "middle": [], |
| "last": "Fujita", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Martinez", |
| "suffix": "" |
| }, |
| { |
| "first": "Takaaki", |
| "middle": [], |
| "last": "Tanaka", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 2008 International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "775--780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timothy Baldwin, Su Nam Kim, Francis Bond, Sanae Fujita, David Martinez, and Takaaki Tanaka. 2008. Mrd-based word sense disambiguation: Further ex- tending lesk. In Proceedings of the 2008 Interna- tional Joint Conference on Natural Language Pro- cessing, pages 775-780.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Latent dirichlet allocation", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Jordan", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "1", |
| "issue": "3", |
| "pages": "993--1022", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Blei, Andrew Ng, and Michael Jordan. 2003. Latent dirichlet allocation. Journal of Machine Learning Research, 1(3):993-1022.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Example of WORNET-WALK2 Calculate the transition parameters using the sense frequencies" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Addition of Word Sense NodesThus, SWSM combines semantically similar concepts in the concept dictionary." |
| }, |
| "FIGREF2": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Example of Concept Structure concepts and (a)\u223c(c) represent the words, which indicates that word (a) is a polyseme that have two word senses, i.e., C and D. When word (a) appeared twice and word (b) appeared once, the probabilities are as illustrated inFigure 3. Note that C and D share the frequencies of word (a)." |
| }, |
| "TABREF1": { |
| "html": null, |
| "text": "Summary of Sub-corpora.", |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "text": "", |
| "num": null, |
| "content": "<table><tr><td/><td colspan=\"3\">: Difficulty of disambiguation</td></tr><tr><td colspan=\"4\">Difficulty Word types Tokens(N) Tokens(V)</td></tr><tr><td>All</td><td>4,822</td><td>12,149</td><td>6,199</td></tr><tr><td>Easy</td><td>399</td><td>3,630</td><td>1,723</td></tr><tr><td>Normal</td><td>337</td><td>2,929</td><td>1,541</td></tr><tr><td>Hard</td><td>105</td><td>1,028</td><td>1,196</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "html": null, |
| "text": "Types and tokens of words according to difficulty", |
| "num": null, |
| "content": "<table><tr><td colspan=\"3\">Difficulty Noun polysemy Verb polysemy</td></tr><tr><td>All</td><td>4.2</td><td>5.5</td></tr><tr><td>Easy</td><td>3.9</td><td>4.0</td></tr><tr><td>Normal</td><td>4.4</td><td>5.3</td></tr><tr><td>Hard</td><td>8.6</td><td>10.3</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "html": null, |
| "text": "Average polysemy of target words according to difficulty", |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF6": { |
| "html": null, |
| "text": "Summary of result", |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |