| { |
| "paper_id": "W06-0130", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T04:03:49.233286Z" |
| }, |
| "title": "Chinese Named Entity Recognition with Conditional Probabilistic Models", |
| "authors": [ |
| { |
| "first": "Aitao", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Yahoo", |
| "location": { |
| "addrLine": "701 First Avenue Sunnyvale", |
| "postCode": "94089", |
| "region": "CA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Fuchun", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "fuchun@yahoo-inc.com" |
| }, |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Shan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Yahoo", |
| "location": { |
| "addrLine": "701 First Avenue Sunnyvale", |
| "postCode": "94089", |
| "region": "CA" |
| } |
| }, |
| "email": "rshan@yahoo-inc.com" |
| }, |
| { |
| "first": "Gordon", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "gzsun@yahoo-inc.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes the work on Chinese named entity recognition performed by Yahoo team at the third International Chinese Language Processing Bakeoff. We used two conditional probabilistic models for this task, including conditional random fields (CRFs) and maximum entropy models. In particular, we trained two conditional random field recognizers and one maximum entropy recognizer for identifying names of people, places, and organizations in unsegmented Chinese texts. Our best performance is 86.2% F-score on MSRA dataset, and 88.53% on CITYU dataset.", |
| "pdf_parse": { |
| "paper_id": "W06-0130", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes the work on Chinese named entity recognition performed by Yahoo team at the third International Chinese Language Processing Bakeoff. We used two conditional probabilistic models for this task, including conditional random fields (CRFs) and maximum entropy models. In particular, we trained two conditional random field recognizers and one maximum entropy recognizer for identifying names of people, places, and organizations in unsegmented Chinese texts. Our best performance is 86.2% F-score on MSRA dataset, and 88.53% on CITYU dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "At the third International Chinese Language Processing Bakeoff, we participated in the closed test in the Named Entity Recognition (NER) task using the MSRA corpus and the CITYU corpus. The named entity types include person, place, and organization. The training data consist of texts that are segmented into words with names of people, places, and organizations labeled. And the testing data consist of un-segmented Chinese texts, one sentence per line.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There are many well known models for English named recognition, among which Conditional Random Fields (Lafferty et al. 2001) and maximum entropy models (Berger et al. 2001) have achieved good performance in English in CoNLL NER tasks. To understand the performance of these two models on Chinese, we both models to Chinese NER task on MSRA data and CITYU data.", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 124, |
| "text": "(Lafferty et al. 2001)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 152, |
| "end": 172, |
| "text": "(Berger et al. 2001)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We trained two named entity recognizers based on conditional random field and one based on maximum entropy model. Both conditional random field and maximum entropy models are capable of modeling arbitrary features of the input, thus are well suit for many language processing tasks. However, there exist significant differences between these two models. To apply a maximum entropy model to NER task, we have to first train a maximum entropy classifier to classify each individual word and then build a dynamic programming for sequence decoding. While in CRFs, these two steps are integrated together. Thus, in theory, CRFs are superior to maximum entropy models in sequence modeling problem and this will also confirmed in our Chinese NER experiments. The superiority of CRFs on Chinese information processing was also demonstrated in word segmentation (Peng et al. 2004) . However, the training speed of CRFs is much slower than that of maximum entropy models since training CRFs requires expensive forward-backward algorithm to compute the partition function.", |
| "cite_spans": [ |
| { |
| "start": 853, |
| "end": 871, |
| "text": "(Peng et al. 2004)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We used Taku's CRF package 1 to train the first CRF recognizer, and the MALLET 2 package with BFGS optimization to train the second CRF recognizer. We used a C++ implementation 3 of maximum entropy modeling and wrote our own second order dynamic programming for decoding.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The first CRF recognizer used the features C -2 , C -1 , C 0 , C -1 , C 2 , C -2 C -1 , C -1 C 0 , C 0 C -1 , C 1 C 2 , and C -1 C 1 , where C 0 is the current character, C 1 the next character, C 2 the second character after C 0 , C -1 the character preceding C 0 , and C -2 the second character before C 0 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The second CRF recognizer used the same set of basic features but the feature C 2 . In addition, the first CRF recognizer used the tag bigram feature, and the second CRF recognizer used word and character cluster features, obtained automatically from the training data only with distributional word clustering (Tishby and Lee, 1993) .", |
| "cite_spans": [ |
| { |
| "start": 310, |
| "end": 332, |
| "text": "(Tishby and Lee, 1993)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The maximum entropy recognizer used the following unigram, bigram features, and type features:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "C -2 , C -1 , C 0 , C 1 , C 2 , C -4 C -3 , C -3 C -2 , C -2 C -1 , C -1 C 0 , C 0 C 1 , C 1 C 2 , C 2 C 3 , C 3 C 4, and T -2 T -1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "When using the first CRF package, we found the labeling scheme OBIE performs better than the OBIE scheme. In the OBI scheme, the first character of a named entity is labeled as \"B\", the remaining characters, including the last character, are all labeled as \"I\". And any character that is not part of a named entity is labeled as \"O\". In the OBIE scheme, the last character of a named entity is labeled as \"E\". The other characters are labeled in the same way as in OBIE scheme. The first CRF recognizer used the OBIE labeling scheme, and the second CRF recognizer used the OBI scheme.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We tried a window size of seven characters (three characters preceding the current character and three characters following the current character) with almost no difference in performance from using the window size of five characters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "When a named entity occurs frequently in the training data, there is a very good chance that it will be recognized when appearing in the testing data. However, for entity names of rare occurrence, they are much harder to recognize in the 1 Available from http://chasen.org/~taku/software/CRF++ 2 Available at http://mallet.cs.umass.edu 3 Available at http://homepages.inf.ed.ac.uk/s0450736/maxent_toolkit.htm testing data. Thus it may be beneficial to examine the testing data to identify the named entities that occur in the training data, and assign them the same label as in the training data. From the training data, we extracted the person names of at least three characters, the place names of at least four characters, and the organization names of at least four characters. We removed from the dictionary the named entities that are also common words. We did not include the short names in the dictionary because they may be part of long names. We produced a run first using one of the NER recognizers, and then replaced the labels of a named entity assigned by a recognizer with the labels of the same named entity in the training data without considering the contexts. Table 1 presents the official results of five runs in the closed test of the NER task on MSRA corpus. The first two runs, msra_a and msra_b, are produced using the first CRF recognizer; the next two runs, msra_f and msra_g, are produced using the second CRF recognizer which used randomly selected 90% of the MSRA training data. When we retrained the second CRF recognizer with the whole set of the MSRA training data, the overall F-Score is 85.00, precision 90.28%, and recall 80.31%. The last run, msra_r, is produced using the MaxEnt recognizer.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1179, |
| "end": 1186, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The msra_a run used the set of basic features with a window size of five characters. Slightly over eight millions features are generated from the MSRA training data, excluding features occurred only once. The training took 321 iterations to complete. The msra_b run is produced from the msra_a run by substituting the labels assigned by the recognizer to a named entity with the labels of the named entity in the training data if it occurs in the training data. For example, in the MSRA training data, the text \u6bd5\u52a0\uf96a\u6545\u5c45 in the sentence \u6211 \u8fd8 \u5230 \u6bd5 \u52a0 \uf96a \u6545 \u5c45 \u53bb \u77bb \u4ef0 is tagged as a place name. The same entity also appeared in MSRA testing data set. The first CRF recognizer failed to mark the text \u6bd5\u52a0\uf96a\u6545\u5c45 as a place name instead it tagged \u6bd5\u52a0\uf96a as a person name. In post-processing, the text \u6bd5\u52a0\uf96a\u6545 \u5c45 in the testing data is re-tagged as a place name. As another example, the person name \u7ae0\uf9a3\u751f appears both in the training data and in the testing data. The first CRF recognizer failed to recognize it as a person name. In post-processing the text \u7ae0\uf9a3\u751f is tagged as a person name because it appears in the training data as a person name. The text \"\u5168\u56fd\u4eba\u5927\u9999\u6e2f\u7279\u522b\ufa08\u653f\u533a\u7b79\u5907 \u59d4\u5458\u4f1a\" was correctly tagged as an organization name. It is not in the training data, but the texts \"\u5168\u56fd\u4eba\u5927\", \"\u9999\u6e2f\u7279\u522b\ufa08\u653f\u533a\", and \"\u7b79\u5907\u59d4 \u5458\u4f1a\" are present in the training data and are all labeled as organization names. In our postprocessing, the correctly tagged organization name is re-tagged incorrectly as three organization names. This is the main reason why the performance of the organization name got much worse than that without post-processing. Table 3 : The performance of the msra_b run broken down by entity type. Table 2 presents the performance of the msra_a run by entity type. Table 3 shows the performance of the msra_b run by entity type. While the post-processing improved the performance of person name recognition, but it degraded the performance of organization name recognition. Overall the performance was worse than that without post-processing. In our development testing, we saw large improvement in organization name recognition with post-processing. Table 4 : Official results in the closed test of the NER task on CITYU corpus. Table 4 presents the official results of four runs in the closed test of the NER task on CITYU corpus. The first two runs, msra_a and msra_b, are produced using the first CRF recognizer; the next two runs, msra_f and msra_g, are produced using the second CRF recognizer. The system configurations are the same as used on the MSRA corpus. The cityu_b run is produced from cityu_a run with post-processing, and the cityu_g run produced from cityu_f run with post-processing. We used the whole set of CITYU to train the first CRF model, and 80% of the CITYU training data to train the second CRF model. No results on full training data are available at the time of submission.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1581, |
| "end": 1588, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 1653, |
| "end": 1660, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1720, |
| "end": 1727, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 2106, |
| "end": 2113, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 2185, |
| "end": 2192, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Run ID Precision", |
| "sec_num": null |
| }, |
| { |
| "text": "All the runs we submitted are based characters. We tried word-based approach but found it was not as effective as character-based approach. Table 4 is shows the confusion matrix of the labels. The rows are the true labels and the columns are the predicated labels. An entry at row x and column y in the table is the number of characters that are predicated as y while the true label is x. Ideally, all entries except the diagonal should be zero.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 140, |
| "end": 147, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Run ID Precision", |
| "sec_num": null |
| }, |
| { |
| "text": "The table was obtained from the result of our development dataset for MSRA data, which are the last 9,364 sentences of the MSRA training data (we used the first 37,000 sentences for training in the model developing phase). As we can see, most of the errors lie in the first column, indicating many of the entities labels are predicated as O. This resulted low recall for entities. Another major error is on detecting the beginning of ORG (B-O). Many of them are mislabeled as O and beginning of location (B-L), resulting low recall and low precision for ORG. A second interesting thing to notice is the numbers presented in Table 2 . They may suggest that person name recognition is more difficult than location name recognition, which is contrary to what we believe, since Chinese person names are short and have strict structure and they should be easier to recognize than both location and organization names. We examined the MSRA testing data and found out that 617 out 1,973 person names occur in a single sentence as a list of person names. In this case, simple rule may be more effective. When we excluded the sentence with 617 person names, for person name recognition of our msra_a run, the F-score is 90.74, precision 93.44%, and recall 88.20%. Out of the 500 person names that were not recognized in our msra_a run, 340 occurred on the same line of 617 person names.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 624, |
| "end": 631, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We applied Conditional Random Fields and maximum entropy models to Chinese NER tasks and achieved satisfying performance. Three systems with different implementations and different features are reported. Overall, CRFs are superior to maximum entropy models in Chinese NER tasks. Useful features include using BIOE tags instead of BIO tags and word and character clustering features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "A Maximum Entropy Approach to Natural Language Processing", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Berger", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [ |
| "Della" |
| ], |
| "last": "Pietra", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [ |
| "Della" |
| ], |
| "last": "Pietra", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "22", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Berger, Stephen Della Pietra, and Vincent Della Pietra, A Maximum Entropy Approach to Natural Language Processing, Computational Lin- guistics, 22 (1)", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Lafferty", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proc. 18th International Conf. on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "282--289", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Lafferty, Andrew McCallum, and Fernando Pereira, Conditional random fields: Probabilistic models for segmenting and labeling sequence data. In: Proc. 18th International Conf. on Machine Learning, Morgan Kaufmann, San Francisco, CA (2001) 282-289", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Chinese Segmentation and New Word Detection using Conditional Random Fields", |
| "authors": [ |
| { |
| "first": "Fuchun", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Fangfang", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of The 20th International Conference on Computational Linguistics (COLING 2004)", |
| "volume": "", |
| "issue": "", |
| "pages": "562--568", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fuchun Peng, Fangfang Feng, and Andrew McCallum, Chinese Segmentation and New Word Detection using Conditional Random Fields, In Proceedings of The 20th International Conference on Computational Linguistics (COLING 2004) , pages 562-568, August 23-27, 2004, Geneva, Swit- zerland", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Distributional Clustering of English Words", |
| "authors": [ |
| { |
| "first": "Naftali", |
| "middle": [], |
| "last": "Tishby", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Proceedings of the 31st Annual Conference of Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "183--190", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naftali Tishby and Lillian Lee, Distributional Cluster- ing of English Words, In Proceedings of the 31st Annual Conference of Association for Computa- tional Linguistics, pp 183--190, 1993.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "text": "Official results in the closed test of the NER task on MSRA corpus.", |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td/><td>Recall</td><td>F-Score</td></tr><tr><td>msra_a 91.22%</td><td>81.71%</td><td>86.20</td></tr><tr><td>msra_b 88.43%</td><td>82.88%</td><td>85.56</td></tr><tr><td>msra_f 88.45%</td><td>79.31%</td><td>83.63</td></tr><tr><td>msra_g 86.61%</td><td>80.32%</td><td>83.35</td></tr><tr><td>msra_r 87.48%</td><td>71.68%</td><td>78.80</td></tr><tr><td>Table 1</td><td/><td/></tr></table>", |
| "html": null |
| }, |
| "TABREF4": { |
| "text": "Confusion matrix of on the MSRA development dataset", |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |