| { |
| "paper_id": "W06-0134", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T04:03:18.871847Z" |
| }, |
| "title": "A Pragmatic Chinese Word Segmentation System", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Harbin Institute of Technology", |
| "location": { |
| "addrLine": "Heilongjiang Province", |
| "postCode": "150001", |
| "country": "P.R.China" |
| } |
| }, |
| "email": "jiangwei@insun.hit.edu.cn" |
| }, |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Guan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Harbin Institute of Technology", |
| "location": { |
| "addrLine": "Heilongjiang Province", |
| "postCode": "150001", |
| "country": "P.R.China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Xiao-Long", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Harbin Institute of Technology", |
| "location": { |
| "addrLine": "Heilongjiang Province", |
| "postCode": "150001", |
| "country": "P.R.China" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper presents our work for participation in the Third International Chinese Word Segmentation Bakeoff. We apply several processing approaches according to the corresponding sub-tasks, which are exhibited in real natural language. In our system, Trigram model with smoothing algorithm is the core module in word segmentation, and Maximum Entropy model is the basic model in Named Entity Recognition task. The experiment indicates that this system achieves Fmeasure 96.8% in MSRA open test in the third SIGHAN-2006 bakeoff.", |
| "pdf_parse": { |
| "paper_id": "W06-0134", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper presents our work for participation in the Third International Chinese Word Segmentation Bakeoff. We apply several processing approaches according to the corresponding sub-tasks, which are exhibited in real natural language. In our system, Trigram model with smoothing algorithm is the core module in word segmentation, and Maximum Entropy model is the basic model in Named Entity Recognition task. The experiment indicates that this system achieves Fmeasure 96.8% in MSRA open test in the third SIGHAN-2006 bakeoff.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Word is a logical semantic and syntactic unit in natural language. Unlike English, there is no delimiter to mark word boundaries in Chinese language, so in most Chinese NLP tasks, word segmentation is a foundation task, which transforms Chinese character string into word sequence. It is prerequisite to POS tagger, parser or further applications, such as Information Extraction, Question Answer system.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our system participated in the Third International Chinese Word Segmentation Bakeoff, which held in 2006. Compared with our system in the last bakeoff (Jiang 2005A) , the system in the third bakeoff is adjusted intending to have a better pragmatic performance. This paper mainly focuses on describing two sub-tasks: (1) The basic Word Segmentation; (2) Named entities recognition. We apply different approaches to solve above two tasks, and all the modules are integrated into a pragmatic system (ELUS).", |
| "cite_spans": [ |
| { |
| "start": 151, |
| "end": 164, |
| "text": "(Jiang 2005A)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "All the words in our system are categorized into five types: Lexicon words (LW), Factoid words (FT), Morphologically derived words (MDW), Named entities (NE), and New words (NW). Figure 1 demonstrates our system structure.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 179, |
| "end": 187, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The input character sequence is converted into one or several sentences, which is the basic dealing unit. The \"Basic Segmentation\" is used to identify the LW, FT, MDW words, and \"Named Entity Recognition\" is used to detect NW words. We don't adopt the New Word detection algorithm in our system in this bakeoff. The \"Disambiguation\" module performs to classify complicated ambiguous words, and all the above results are connected into the final result, which is denoted by XML format.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We apply the trigram model to the word segmentation task (Jiang 2005A) , and make use of Absolute Smoothing algorithm to overcome the sparse data problem.", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 70, |
| "text": "(Jiang 2005A)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Trigram model is used to convert the sentence into a word sequence. Let w = w 1 w 2 \u2026w n be a word sequence, then the most likely word sequence w* in trigram is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u220f = \u2212 \u2212 = n i i i i w w w w w w P n 1 1 2 ) | ( max arg * 2 1 L w (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where let P(w 0 |w -2 w -1 ) be P(w 0 ) and let P(w 1 |w -1 w 0 ) be P(w 1 |w 0 ), and w i represents LW or a type of FT or MDW. In order to search the best segmentation way, all the word candidates are filled in the word lattice (Zhao 2005 algorithm is used to search the best word segmentation path.", |
| "cite_spans": [ |
| { |
| "start": 230, |
| "end": 240, |
| "text": "(Zhao 2005", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "FT and MDW need to be detected when constructing word lattice (detailed in section 2.2). The data structure of lexicon can affect the efficiency of word segmentation, so we represent lexicon words as a set of TRIEs, which is a treelike structure. Words starting with the same character are represented as a TRIE, where the root represents the first Chinese character, and the children of the root represent the second characters, and so on (Gao 2004) .", |
| "cite_spans": [ |
| { |
| "start": 440, |
| "end": 450, |
| "text": "(Gao 2004)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "When searching a word lattice, there is the zero-probability phenomenon, due to the sparse data problem. For instance, if there is no cooccurence pair \"\u6211\u4eec/\u5403/\u9999\u8549\"(we eat bananas) in the training corpus, then P(\u9999\u8549|\u6211\u4eec\uff0c\u5403) = 0. According to formula (1), the probability of the whole candidate path, which includes \"\u6211\u4eec/\u5403/ \u9999\u8549\" is zero, as a result of the local zero probability. In order to overcome the sparse data problem, our system has applied Absolute Discounting Smoothing algorithm (Chen, 1999) .", |
| "cite_spans": [ |
| { |
| "start": 481, |
| "end": 493, |
| "text": "(Chen, 1999)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "| } 0 ) ( : { | ) ( 1 1 1 1 1 > = \u2022 \u2212 + \u2212 \u2212 + \u2212 + i i n i i i n i w w c w w N (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The notation N 1+ is meant to evoke the number of words that have one or more counts, and the \u2022 is meant to evoke a free variable that is summed over. The function () c represents the count of one word or the cooccurence count of multiwords. In this case, the smoothing probability", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2211 + \u2212 + \u2212 \u2212 + \u2212 \u2212 = i w i n i i n i i n i i w c D w c w w p ) ( } 0 , ) ( max{ ) | ( 1 1 1 1 ) | ( ) 1 ( 1 2 \u2212 + \u2212 \u2212 + i n i i w w p \u03bb (3) where, \u239f \u239f \u239f \u23a0 \u239e \u239c \u239c \u239c \u239d \u239b \u2022 = \u2212 \u2212 + \u2212 + + \u2212 \u2211 ) ( ) ( 1 1 1 1 1 i n i w i n i w N w c D i \u03bb (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Because we use trigram model, so the maximum n may be 3. A fixed discount D (0 \u2264 D \u2264 1) can be set through the deleted estimation on the training data. They arrive at the estimate", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "2 1 1 2n n n D + = (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where n 1 and n 2 are the total number of n-grams with exactly one and two counts, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "After the basic segmentation, some complicated ambiguous segmentation can be further disambiguated. In trigram model, only the previous two words are considered as context features, while in disambiguation processing, we can use the Maximum Entropy model fused more features (Jiang 2005B) or rule based method.", |
| "cite_spans": [ |
| { |
| "start": 275, |
| "end": 288, |
| "text": "(Jiang 2005B)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Trigram and Smoothing Algorithm", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "All the Factoid words can be represented as regular expressions. So the detection of factoid words can be achieved by Finite State Automaton(FSA). In our system, the following categories of factoid words can be detected, as shown in table 1. Deterministic FSA (DFA) is efficient because a unique \"next state\" is determined, when given an input symbol and the current state. While it is common for a linguist to write rule, which can be represented directly as a non-deterministic FSA (NFA), i.e. which allows several \"next states\" to follow a given input and state.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factoid and Morphological words", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Since every NFA has an equivalent DFA, we build a FT rule compiler to convert all the FT generative rules into a DFA. e.g.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factoid and Morphological words", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\"< digit > -> [0..9]; < year > ::= < digit >{< digit >+}\u5e74\"; < integer > ::= {< digit >+}; where \"->\" is a temporary generative rule, and \"::=\" is a real generative rule.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factoid and Morphological words", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "As for the morphological words, we erase the dealing module, because the word segmentation definition of our system adopts the PKU standard.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factoid and Morphological words", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We adopt Maximum Entropy model to perform the Named Entity Recognition. The extensive evaluation on NER systems in recent years (such as CoNLL-2002 and CoNLL-2003) indicates the best statistical systems are typically achieved by using a linear (or log-linear) classification algorithm, such as Maximum Entropy model, together with a vast amount of carefully designed linguistic features. And this seems still true at present in terms of statistics based methods.", |
| "cite_spans": [ |
| { |
| "start": 137, |
| "end": 151, |
| "text": "CoNLL-2002 and", |
| "ref_id": null |
| }, |
| { |
| "start": 152, |
| "end": 163, |
| "text": "CoNLL-2003)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Maximum Entropy model (ME) is defined over H \u00d7 T in segmentation disambiguation, where H is the set of possible contexts around target word that will be tagged, and T is the set of allowable tags, such as B-PER, I-PER, B-LOC, I-LOC etc. in our NER task. Then the model's conditional probability is defined as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2211 \u2208 = T t t h p t h p h t p ' ) ' , ( ) , ( ) | (", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Named Entity Recognition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u220f = = k j t h f j j t h p 1 ) , ( ) , ( \u03b1 \u03c0\u00b5 (7)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where h is the current context and t is one of the possible tags.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The several typical kinds of features can be used in the NER system. They usually include the context feature, the entity feature, and the total resource or some additional resources. Table 2 shows the context feature templates.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 184, |
| "end": 191, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Table2 NER feature template 1 Type Feature Template One order feature", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "w i-2 , w i-1 , w i , w i+1 , w i+2 Two order feature w i-1:i , w i:i+1 NER tag feature t i-1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "While, we only point out the local feature template, some other feature templates, such as long distance dependency templates, are also helpful to NER performance. These trigger features can be collected by Average Mutual Information or Information Gain algorithm etc.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Besides context features, entity features is another important factor, such as the suffix of Location or Organization. The following 8 kinds of dictionaries are usually useful (Zhao 2006): In addition, some external resources may improve the NER performance too, e.g. we collect a lot of entities for Chinese Daily Newspaper in 2000, and total some entity features. However, our system is based on Peking University (PKU) word segmentation definition and PKU NER definition, so we only used the basic features in table 2 in this bakeoff. Another effect is the corpus: our system is training by the Chinese Peoples' Daily Newspaper corpora in 1998, which conforms to PKU NER definition. In the section 4, we will give our system performance with the basic features in Chinese Peoples' Daily Newspaper corpora.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The performance of our system in the third bakeoff is presented in table 4 in terms of recall(R), precision(P) and F score in percentages. The score software is standard and open by SIGHAN. Our system has good performance in terms of R iv measure. The R iv measure in close test and in open test are 99.1% and 98.9% respectively. This good performance owes to class-based trigram with the absolute smoothing and word disambiguation algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Evaluation in Word Segmentation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In our system, it is the following reasons that the open test has a better performance than the close test:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Evaluation in Word Segmentation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "(1) Named Entity Recognition module is added into the open test system. And Named Entities, including PER, LOC, ORG, occupy the most of the out-of-vocabulary words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Evaluation in Word Segmentation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "(2) The system of close test can only use the dictionary that is collected from the given training corpus, while the system of open test can use a better dictionary, which includes the words that exist in MSRA training corpus in SIGHAN2005. And we know, the dictionary is the one of important factors that affects the performance, because the LW candidates in the word lattice are generated from the dictionary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Evaluation in Word Segmentation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "As for the dictionary, we compare the two collections in SIGHAN2005 and SIGHAN2006, and evaluating in SIGHAN2005 MSRA close test. There are less training sentence in SIGHAN2006, as a result, there is at least 1.2% performance decrease. So this result indicates that the dictionary can bring an important impact in our system. Table 5 gives our system performance in the second bakeoff. We'll make brief comparison. Comparing table 4 with table 5 , we find that the OOV is 3.4 in third bakeoff, which is higher than the value in the last bakeoff. Obviously, it is one of reasons that affect our performance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 326, |
| "end": 333, |
| "text": "Table 5", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 415, |
| "end": 445, |
| "text": "Comparing table 4 with table 5", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Evaluation in Word Segmentation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In addition, based on pragmatic consideration, our system has been made some simplifier, for instance, we erase the new word detection algorithm and the is no morphological word detection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Evaluation in Word Segmentation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In MSRA NER open test, our NER system is training in prior six-month corpora of Chinese Peoples' Daily Newspaper in 1998, which were annotated by Peking University. Table 6 shows the NER performance in the MSRA open test. As a result of insufficiency in preparing bakeoff, our system is only trained in Chinese Peoples' Daily Newspaper, in which the NER is defined according to PKU standard. However, the NER definition of MSRA is different from that of PKU, e.g, \"\u4e2d\u534e/LOC \u6c11\u65cf\", \"\u9a6c/PER \u5217 /PER \u4e3b\u4e49\" in MSRA, are not entities in PKU. So the training corpus becomes a main handicap to decrease the performance of our system, and it also explains that there is much difference between the recall rate and the precision in table 6. Table 7 gives the evaluation of our NER system in Chinese Peoples' Daily Newspaper, training in prior five-month corpora and testing in the sixth month corpus. We also use the feature templates in table 2, in order to make comparison with table 6. This experiment indicates that our system can have a good performance, if the test corpus and the training corpora conform to the condition of independent identically distributed attribution.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 165, |
| "end": 172, |
| "text": "Table 6", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 724, |
| "end": 731, |
| "text": "Table 7", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Some points need to be further considered:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis and Discussion", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "(1) The dictionary can bring a big impact to the performance, as the LW candidates come from the dictionary. However a big dictionary can be easily acquired in the real application.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis and Discussion", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "(2) Due to our technical and insufficiently preparing problem, we use the PKU NER definition, however they seem not unified with the MSRA definition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis and Discussion", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "(3) Our NER system is a word-based model, and we have find out that the word segmentation with two different dictionaries can bring a big impact to the NER performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis and Discussion", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "(4) We erase the new word recognition algorithm in our system. While, we should explore the real annotated corpora, and add new word detection algorithm, if it has positive effect. e.g. \"\u8377\u82b1 \u5956\"(lotus prize) can be recognized as one word by the conditional random fields model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis and Discussion", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We have briefly described our word segmentation system and NER system. We use wordbased features in the whole processing. Our system has a good performance in terms of R iv measure, so this means that the trigram model with the smoothing algorithm can deal with the basic segmentation task well. However, the result in the bakeoff indicates that detecting out-ofvocabulary word seems to be a harder task than dealing with the segmentation-ambiguity task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The work in the future will concentrate on two sides: improving the NER performance and adding New Word Detection Algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "w i -current word, w i-1 -previous word, t i -current tag. 2 Partial translation: \u5317\u4eac BeiJing,\u7ebd\u7ea6 New york;\u5f20 Zhang, \u738b Wang; \u8001 Old;\u5c71 mountain,\u6e56 lake;\u5c40 bureau.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Chinese Lexical Analysis Using Hierarchical Hidden Markov Model", |
| "authors": [ |
| { |
| "first": "Huaping", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Second SIGHAN workshop affiliated with 4th ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "63--70", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "HuaPing Zhang, Qun Liu etc. 2003. Chinese Lexical Analysis Using Hierarchical Hidden Markov Model, Second SIGHAN workshop affiliated with 4th ACL, Sapporo Japan, pp.63-70.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Chinese Word Segmentation: A Pragmatic Approach", |
| "authors": [ |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Mu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jianfeng Gao, Mu Li et al. 2004. Chinese Word Seg- mentation: A Pragmatic Approach. MSR-TR-2004- 123, November 2004.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Chinese segmentation and new word detection using conditional random fields", |
| "authors": [ |
| { |
| "first": "Fangfang", |
| "middle": [], |
| "last": "Peng Fuchun", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peng Fuchun, Fangfang Feng and Andrew McCallum. Chinese segmentation and new word detection us- ing conditional random fields. In:COLING 2004.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "An empirical study of smoothing techniques for language modeling", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Stanley", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Computer Speech and Language", |
| "volume": "13", |
| "issue": "", |
| "pages": "369--394", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stanley F.Chen and J. Goodman. 1999. An empirical study of smoothing techniques for language model- ing. Computer Speech and Language. 13:369-394.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Chinese Word Segmentation based on Mixing Model. 4th SIGHAN Workshop", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "180--182", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Jiang, Jian Zhao et al. 2005A.Chinese Word Segmentation based on Mixing Model. 4th SIGHAN Workshop. pp. 180-182.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "applying rough sets in word segmentation disambiguation based on maximum entropy model", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiao-Long", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Guan", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "New Series)", |
| "volume": "13", |
| "issue": "1", |
| "pages": "94--98", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Jiang, Xiao-Long Wang, Yi Guan et al. 2005B. applying rough sets in word segmentation disam- biguation based on maximum entropy model. Jour- nal of Harbin Institute of Technology (New Series). 13(1): 94-98.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Research on Conditional Probabilistic Model and Its Application in Chinese Named Entity Recognition", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhao Jian", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhao Jian. 2006. Research on Conditional Probabilis- tic Model and Its Application in Chinese Named Entity Recognition. Ph.D. Thesis. Harbin Institute of Technology, China.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Research on Chinese Morpheme Analysis Based on Statistic Language Model", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhao Yan", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhao Yan. 2005. Research on Chinese Morpheme Analysis Based on Statistic Language Model. Ph.D. Thesis. Harbin Institute of Technology, China.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "html": null, |
| "content": "<table><tr><td/><td colspan=\"2\">Factoid word categories</td></tr><tr><td>FT type</td><td>Factoid word</td><td>Example</td></tr><tr><td>Number</td><td>Integer, real, percent etc.</td><td>2910, 46.12%, \u4e8c\u5341 \u4e5d, \u4e09\u5343\u4e03\u767e\u4e8c\u5341</td></tr><tr><td>Date</td><td>Date</td><td>2005 \uf98e 5 \u6708 12 \u65e5</td></tr><tr><td>Time</td><td>Time</td><td>8:00, \u5341\u70b9\u4e8c\u5341\u5206</td></tr><tr><td colspan=\"2\">English English word,</td><td>How, are, you</td></tr><tr><td>www</td><td>Website, IP address</td><td>http://www.hit.edu.cn 192.168.140.133</td></tr><tr><td>email</td><td>Email</td><td>elus@google.com</td></tr><tr><td>phone</td><td>Phone, fax</td><td>0451-86413322</td></tr></table>", |
| "num": null, |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "content": "<table><tr><td/><td colspan=\"2\">resource dictionary 2</td></tr><tr><td>List Type</td><td>Lexicon</td><td>Example</td></tr><tr><td>Word list</td><td>Place lexicon Chinese surname</td><td>\u5317\u4eac, \u7ebd\u7ea6, \u9a6c\u5bb6\u6c9f \u5f20, \u738b, \u8d75, \u6b27\u9633</td></tr><tr><td/><td>Prefix of PER</td><td>\u8001, \u963f, \u5c0f</td></tr><tr><td>String list</td><td>Suffix of PLA</td><td>\u5c71, \u6e56, \u5bfa, \u53f0, \u6d77</td></tr><tr><td/><td>Suffix of ORG</td><td>\u4f1a, \u8054\u76df, \u7ec4\u7ec7, \u5c40</td></tr><tr><td>Character list</td><td colspan=\"2\">Character for CPER \u519b,\u521a, \u83b2, \u8335, \u5029 Character for FPER \u79d1, \u66fc, \u65af, \u5a03, \u8d1d Rare character \u6ed7, \u80e8, \u8585</td></tr></table>", |
| "num": null, |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "html": null, |
| "content": "<table><tr><td/><td/><td/><td colspan=\"3\">test in SIGHAN2006 (%)</td></tr><tr><td>MSRA</td><td>R</td><td>P</td><td>F</td><td colspan=\"2\">OOV R oov</td><td>R iv</td></tr><tr><td>Close</td><td colspan=\"3\">96.3 91.8 94.0</td><td>3.4</td><td colspan=\"2\">17.5 99.1</td></tr><tr><td>Open</td><td colspan=\"3\">97.7 96.0 96.8</td><td>3.4</td><td colspan=\"2\">62.4 98.9</td></tr></table>", |
| "num": null, |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "html": null, |
| "content": "<table><tr><td/><td/><td colspan=\"4\">MSRA test in SIGHAN 2005 (%)</td></tr><tr><td>MSRA</td><td>R</td><td>P</td><td>F</td><td colspan=\"2\">OOV R oov</td><td>R iv</td></tr><tr><td>Close</td><td colspan=\"3\">97.3 94.5 95.9</td><td>2.6</td><td colspan=\"2\">32.3 99.1</td></tr><tr><td>Open</td><td colspan=\"3\">98.0 96.5 97.2</td><td>2.6</td><td colspan=\"2\">59.0 99.0</td></tr></table>", |
| "num": null, |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "html": null, |
| "content": "<table><tr><td colspan=\"4\">The NER performance in MSRA Open test</td></tr><tr><td>MSRA NER</td><td colspan=\"2\">Precision Recall</td><td>F Score</td></tr><tr><td>PER</td><td>93.68%</td><td>86.37%</td><td>89.87</td></tr><tr><td>LOC</td><td>85.50%</td><td>59.67%</td><td>70.29</td></tr><tr><td>ORG</td><td>75.87%</td><td>47.48%</td><td>58.41</td></tr><tr><td>Overall</td><td>86.97%</td><td>65.56%</td><td>74.76</td></tr></table>", |
| "num": null, |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF6": { |
| "html": null, |
| "content": "<table><tr><td>MSRA NER</td><td colspan=\"2\">Precision Recall</td><td>F Score</td></tr><tr><td>CPN</td><td>93.56</td><td>90.96</td><td>92.24</td></tr><tr><td>FPN</td><td>90.42</td><td>86.47</td><td>88.40</td></tr><tr><td>LOC</td><td>91.94</td><td>90.52</td><td>91.22</td></tr><tr><td>ORG</td><td>88.38</td><td>84.52</td><td>86.40</td></tr><tr><td>Overall</td><td>91.35</td><td>88.85</td><td>90.08</td></tr></table>", |
| "num": null, |
| "text": "The NER test in Chinese Peoples' Daily", |
| "type_str": "table" |
| } |
| } |
| } |
| } |