| { |
| "paper_id": "W06-0131", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T04:06:30.836040Z" |
| }, |
| "title": "POC-NLW Template for Chinese Word Segmentation", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Pattern Recognition and Intelligent System Lab Beijing University of Posts and Telecommunications", |
| "institution": "", |
| "location": { |
| "postCode": "100876", |
| "settlement": "Beijing", |
| "country": "P. R. China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Weiran", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Pattern Recognition and Intelligent System Lab Beijing University of Posts and Telecommunications", |
| "institution": "", |
| "location": { |
| "postCode": "100876", |
| "settlement": "Beijing", |
| "country": "P. R. China" |
| } |
| }, |
| "email": "xuweiran@263.net" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Pattern Recognition and Intelligent System Lab Beijing University of Posts and Telecommunications", |
| "institution": "", |
| "location": { |
| "postCode": "100876", |
| "settlement": "Beijing", |
| "country": "P. R. China" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper, a language tagging template named POC-NLW (position of a character within an n-length word) is presented. Based on this template, a twostage statistical model for Chinese word segmentation is constructed. In this method, the basic word segmentation is based on n-gram language model, and a Hidden Markov tagger based on the POC-NLW template is used to implement the out-of-vocabulary (OOV) word identification. The system participated in the MSRA_Close and UPUC_Close word segmentation tracks at SIGHAN Bakeoff 2006. Results returned by this bakeoff are reported here.", |
| "pdf_parse": { |
| "paper_id": "W06-0131", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper, a language tagging template named POC-NLW (position of a character within an n-length word) is presented. Based on this template, a twostage statistical model for Chinese word segmentation is constructed. In this method, the basic word segmentation is based on n-gram language model, and a Hidden Markov tagger based on the POC-NLW template is used to implement the out-of-vocabulary (OOV) word identification. The system participated in the MSRA_Close and UPUC_Close word segmentation tracks at SIGHAN Bakeoff 2006. Results returned by this bakeoff are reported here.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In Chinese word segmentation, there are two problems still remain, one is the resolution of ambiguity, and the other is the identification of so-called out-of-vocabulary (OOV) or unknown words. In order to resolve these two problems, a two-stage statistical word segmentation strategy is adopted in our system. The first stage is optional, and the whole segmentation can be accomplished in the second stage. In the first stage, the n-gram language model is employed to implement basic word segmentation including disambiguation. In the second stage, a language tagging template named POC-NLW (position of a character within an n-length word) is introduced to accomplish unknown word identification as template-based character tagging.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The remainder of this paper is organized as follows. In section 2 and section 3, a briefly description of the main methods adopted in our system is given. Results of our system at this bakeoff are reported in section 4. At last, conclusions are derived in section 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the first stage, the basic word segmentation is accomplished. The key issue in this stage is the ambiguity problem, which is mainly caused by the fact that a Chinese character can occur in different word internal positions in different words (Xue, 2003) . A lot of machine learning techniques have been applied to resolve this problem, the n-gram language model is one of the most popular ones among them (Fu and Luke, 2003; Li et al., 2005) . As such, we also employed ngram model in this stage.", |
| "cite_spans": [ |
| { |
| "start": 245, |
| "end": 256, |
| "text": "(Xue, 2003)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 408, |
| "end": 427, |
| "text": "(Fu and Luke, 2003;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 428, |
| "end": 444, |
| "text": "Li et al., 2005)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Word Segmentation Stage", |
| "sec_num": "2" |
| }, |
| { |
| "text": "When a sentence is inputted, it is first segmented into a sequence of individual characters (e.g. ASCII strings, basic Chinese characters, punitions, numerals and so on), marked as C 1,n . According to the system's dictionary, several word sequences W 1,m will be constructed as candidates. The function of the n-gram model is to find out the best word sequence W * corresponds to C 1,n , which has the maximum integrated probability, i.e.,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Word Segmentation Stage", |
| "sec_num": "2" |
| }, |
| { |
| "text": "trigram for W W W P bigram for W W P C W P W m i i i i W m i i i W n m W m m m \u220f \u220f = \u2212 \u2212 = \u2212 \u2245 \u2245 = 1 2 1 1 1 , 1 , 1 * ) , | ( max arg ) | ( max arg ) | ( max arg , 1 , 1 , 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Word Segmentation Stage", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The Maximum Likelihood method was used to estimate the word n-gram probabilities used in our model, and the linear interpolation method (Jelinek and Mercer, 1980) was applied to smooth these estimated probabilities.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 162, |
| "text": "(Jelinek and Mercer, 1980)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Basic Word Segmentation Stage", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The n-gram method is based on the exiting grams in the model, so it is good at judging the connecting relationship among known words, but does not have the ability to deal with unknown words in substance. Therefore, another OOV word identification model is required.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The OOV Word Identification Stage", |
| "sec_num": "3" |
| }, |
| { |
| "text": "OOV words are regarded as words that do not exist in a system's machine-readable dictionary, and a more detailed definition can be found in (Wu and Jiang, 2000) . In general, Chinese word can be created through compounding or abbreviating of most of existing characters and words. Thus, the key to solve the OOV word identification lies on whether the new word creation mechanisms in Chinese language can be extracted. Therefore, a POC-NLW language tagging template is introduced to explore such information on the character-level within words.", |
| "cite_spans": [ |
| { |
| "start": 140, |
| "end": 160, |
| "text": "(Wu and Jiang, 2000)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The OOV Word Identification Stage", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Many character-level based works have been done for the Chinese word segmentation, including the LMR tagging methods (Xue, 2003; Nakagawa. 2004) , the IWP mechanism (Wu and Jiang, 2000) . Based on these previous works, this POC-NLW template was derived. Assume that the length of a word is the number of component characters in it, the template is consist of two component: L max and a Wl-Pn tag set. L max to denote the maximum length of a word expressed by the template; a Wl-Pn tag denotes that this tag is assigned to a character at the n-th position within a l-length word,", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 128, |
| "text": "(Xue, 2003;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 129, |
| "end": 144, |
| "text": "Nakagawa. 2004)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 165, |
| "end": 185, |
| "text": "(Wu and Jiang, 2000)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The POC-NLW Template", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": ". Apparently, the size of this tag set is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The POC-NLW Template", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "l n , , 2 , 1 L = 2 / ) 1 ( max max L L \u00d7 +", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The POC-NLW Template", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For example, the Chinese word \" \u4eba \u6c11 \" is tagged as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The POC-NLW Template", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u4eba W2P1, \u6c11 W2P2 and \"\u4e2d\u56fd\u4eba\" is tagged as: \u4e2d W3P1, \u56fd W3P2, \u4eba W3P3 In the example, two words are tagged by the template respectively, and the Chinese character \"\u4eba\" has been assigned two different tags.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The POC-NLW Template", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In a sense, the Chinese word creation mechanisms could be extracted through statistics of the tags for each character on a certain large corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The POC-NLW Template", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "On the other hand, while a character sequence in a sentence is tagged by this template, the word boundaries are obvious. Meanwhile, the word segmentation is implemented.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The POC-NLW Template", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In addition, in this template, known words and unknown words are both regarded as sequences of individual characters. Thus, the basic word segmentation process, the disambiguation process and the OOV word identification process can be accomplished in a unified process. Thereby, this model can also be used alone to implement the word segmentation task. This characteristic will make the word segmentation system much more efficient.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The POC-NLW Template", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Form the description of POC-NLW template, it can be found that the word segmentation could be implemented as POC-NLW tagging, which is similar to the so-called part-of-speech (POS) tagging problem. In POS tagging, Hidden Markov Model (HMM) was applied as one of the most significant methods, as described in detail in (Brants, 2000) . The HMM method can achieve high accuracy in tagging with low processing costs, so it was adopted in our model.", |
| "cite_spans": [ |
| { |
| "start": 318, |
| "end": 332, |
| "text": "(Brants, 2000)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The HMM Tagger", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "According to the definition of POC-NLW template, the state set of HMM corresponds to the Wl-Pn tag set, and the symbol set is composed of all characters. However, the initial state probability matrix and the state transition probability matrix are not composed of all of the tags in the state set. To express more clearly, we define two subset of the state set:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The HMM Tagger", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 Begin Tag Set (BTS): this set is consisted of tag which can occur in the begging position in a word. Apparently, these tags must have the Wl-P1 form.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The HMM Tagger", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 End Tag Set (ETS): correspond to BTS, tags in this set should occur in the end position, and their form should be like Wl-Pl.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The HMM Tagger", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Apparently, the size of BTS is L max as well as of ETS. Thus, the initial state probability matrix corresponds to BTS instead of the whole state set. On the other hand, because of the word internal continuity, if the current tag Wl-Pn is not in ETS, than the next tag must be Wl-P(n+1). In other words, the case in which the transition probability is need is that when the current tag is in ETS and the next tag belongs to BTS. So, the state transition matrix in our model corresponds to BTS ETS \u00d7 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The HMM Tagger", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The probabilities used in HMM were defined similarly to those in POS tagging, and were estimated using the Maximum Likelihood method from the training corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The HMM Tagger", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the two-stage strategy, the output word sequence of the first stage is transferred into the second stage. The items in the sequence, including individual characters and words, which do not have a bigram or trigram relationship with the surrounding items, are picked out with its surrounding items to compose several sequences of items. These item sequences are processed by the HMM tagger to form new item sequences. At last, these processed items sequences are combined into the whole word sequence as the final output.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The HMM Tagger", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The system submitted at this bakeoff was a twostage one, as describe at beginning of this paper. The model used in the first stage was trigram, and the L max of the template used in the second stage was set to 7.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In addition to the tags defined in the template before, a special tag is introduced into our Wl-Pn tag set to indicate all those characters that occur after the L max -th position in an extremely long (longer than L max ) word., formulized as WL max -P(L max +1). And then, there are 28 basic tags (from W1-P1 to W7-P7) and the special one W7-P8.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For instance, using the special tag, the word \" \u4e2d\u56fd\u5171\u4ea7\u515a\u4e2d\u592e\u59d4\u5458\u4f1a\" (form the MSRA Corpus ) is tagged as: The performances of our system on the two corpuses can rank in the half-top group among the participated systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u4e2d W7-P1 \u56fd W7-P2 \u5171 W7-P3 \u4ea7 W7-P4 \u515a W7-P5 \u4e2d W7-P6 \u592e W7-P7 \u59d4 W7-P8 \u5458 W7-P8 \u4f1a W7-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We notice that the accuracies on known word segmentation are relatively better than on OOV words segmentation. This appears somewhat unexpected. In the close experiments we had done on the PKU and MSR corpuses of SIGHAN Bakeoff 2005, the relative performance of OOV Recall was much more outstanding than of the Fmeasure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We think this is due to the inappropriate parameters used in n-gram model, which overguarantees the performance of basic word segmentation. It can be seen on the IV Recall (highest in UPUC_Close track). For only the best output sequence of the n-gram model is transferred to the HMM tagger, some potential unknown words may be miss-split in the early stage. Thus, the OOV Recall is not very good, and this also affects the overall performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "On the other hand, the performances of OOV identification on UPUC are much better than on MSRA, while the performances of overall segmentation accuracy on UPUC are worse than on MSRA. This phenomenon also happened in our experiments on the Bakeoff 2005 corpuses of PKU and MSR. In the PKU test data, the rate of OOV words according is 0.058 while in MSR is 0.026. Thus, it can be conclude that the more unknown words occur, the more significant ability of OOV words identification appears.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In addition, the relative performance of OOV Precision are much better. This demonstrates that the OOV identification ability of our system is appreciable. In other words, the POC-NLW tagging method introduced is effective to some extent.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In this paper, a POC-NLW template is presented for word segmentation, which aims at exploring the word creation mechanisms in Chinese language by utilizing the character-level information to. A two-stage strategy was applied in our system to combine the n-gram model based word segmentation and OOV word identification implemented by a HMM tagger. Test results show that the method achieved high performance on word segmentation, especially on unknown words identification. Therefore, the method is a practical one that can be implemented as an inte-gral component in actual Chinese NLP applications. From the results, it can safely conclude that method introduced here does find some character-level information, and the information could effectively conduct the word segmentation and unknown words identification. For this is the first time we participate in this bakeoff, and the work has been done as a integral part of another system during the past two months, the implementation of the segmentation system we submitted is coarse. A lot of improvements, on either theoretical methods or implementation techniques, are required in our future work, including the smoothing techniques in the n-gram model and the HMM model, the refine of the features extraction method and the POC-NLW template itself, the more harmonious integration strategy and so on.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CONCLUSION AND FURTHER WORK", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Statisticallyenhanced new word identification in a rule-based Chinese system", |
| "authors": [ |
| { |
| "first": "Andi", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zixin", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of the 2nd Chinese Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "46--51", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andi Wu, and Zixin Jiang. 2000. Statistically- enhanced new word identification in a rule-based Chinese system. Proceedings of the 2nd Chinese Language Processing Workshop, 46-51.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Interpolated Estimation of Markov Source Parameters from Sparse Data", |
| "authors": [ |
| { |
| "first": "Frederick", |
| "middle": [], |
| "last": "Jelinek", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [ |
| "L" |
| ], |
| "last": "Mercer", |
| "suffix": "" |
| } |
| ], |
| "year": 1980, |
| "venue": "Proceedings of Workshop on Pattern Recognition in Practice", |
| "volume": "", |
| "issue": "", |
| "pages": "381--397", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Frederick Jelinek, and Robert L. Mercer. 1980. Inter- polated Estimation of Markov Source Parameters from Sparse Data. Proceedings of Workshop on Pattern Recognition in Practice, Amsterdam, 381- 397.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A Twostage Statistical Word Segmentation System for Chinese", |
| "authors": [ |
| { |
| "first": "Guohong", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kang-Kwong", |
| "middle": [], |
| "last": "Luke", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the Second SIGHAN Workshop on Chinese Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "156--159", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guohong Fu, and Kang-Kwong Luke. 2003. A Two- stage Statistical Word Segmentation System for Chinese. Proceedings of the Second SIGHAN Workshop on Chinese Language Processing, 156- 159.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Chinese Word Segmentation in FTRD Beijing", |
| "authors": [ |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Xinnian", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Haila", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wu", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the Fourth SIGHAN Workshop on Chinese Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "150--153", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heng Li, Yuan Dong, Xinnian Mao, Haila Wang, and Wu Liu. 2005. Chinese Word Segmentation in FTRD Beijing. Proceedings of the Fourth SIGHAN Workshop on Chinese Language Processing, 150- 153.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Chinese Word Segmentation as Character Tagging", |
| "authors": [ |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "International Journal of Computational Linguistics and Chinese Language Procession", |
| "volume": "8", |
| "issue": "1", |
| "pages": "29--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nianwen Xue. 2003. Chinese Word Segmentation as Character Tagging. International Journal of Com- putational Linguistics and Chinese Language Pro- cession, 8(1):29-48.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Chinese and Japanese Word Segmentation Using Word-Level and Character-Level Information", |
| "authors": [ |
| { |
| "first": "Tetsuji", |
| "middle": [], |
| "last": "Nakagawa", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 20th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "466--472", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tetsuji Nakagawa. 2004. Chinese and Japanese Word Segmentation Using Word-Level and Character- Level Information. Proceedings of the 20th Inter- national Conference on Computational Linguistics, 466-472.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "TnT -A Statistical Part-of-Speech Tagger", |
| "authors": [ |
| { |
| "first": "Thorsten", |
| "middle": [], |
| "last": "Brants", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of the Sixth Conference on Applied Natural Language Processing ANLP-2000", |
| "volume": "", |
| "issue": "", |
| "pages": "224--231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thorsten Brants. 2000. TnT -A Statistical Part-of- Speech Tagger. Proceedings of the Sixth Confer- ence on Applied Natural Language Processing ANLP-2000, 224-231.", |
| "links": null |
| } |
| }, |
| "ref_entries": {} |
| } |
| } |