| { |
| "paper_id": "O98-3002", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:08:45.031897Z" |
| }, |
| "title": "Unknown Word Detection for Chinese by a Corpus-based Learning Method", |
| "authors": [ |
| { |
| "first": "Keh-Jiann", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Ming-Hong", |
| "middle": [], |
| "last": "Bai", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "One of the most prominent problems in computer processing of the Chinese language is identification of the words in a sentence. Since there are no blanks to mark word boundaries, identifying words is difficult because of segmentation ambiguities and occurrences of out-of-vocabulary words (i.e., unknown words). In this paper, a corpus-based learning method is proposed which derives sets of syntactic rules that are applied to distinguish monosyllabic words from monosyllabic morphemes which may be parts of unknown words or typographical errors. The corpus-based learning approach has the advantages of: 1. automatic rule learning, 2. automatic evaluation of the performance of each rule, and 3. balancing of recall and precision rates through dynamic rule set selection. The experimental results show that the rule set derived using the proposed method outperformed hand-crafted rules produced by human experts in detecting unknown words.", |
| "pdf_parse": { |
| "paper_id": "O98-3002", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "One of the most prominent problems in computer processing of the Chinese language is identification of the words in a sentence. Since there are no blanks to mark word boundaries, identifying words is difficult because of segmentation ambiguities and occurrences of out-of-vocabulary words (i.e., unknown words). In this paper, a corpus-based learning method is proposed which derives sets of syntactic rules that are applied to distinguish monosyllabic words from monosyllabic morphemes which may be parts of unknown words or typographical errors. The corpus-based learning approach has the advantages of: 1. automatic rule learning, 2. automatic evaluation of the performance of each rule, and 3. balancing of recall and precision rates through dynamic rule set selection. The experimental results show that the rule set derived using the proposed method outperformed hand-crafted rules produced by human experts in detecting unknown words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "One of the most prominent problems in computer processing of Chinese language is the identification of the words in a sentence. There are no blanks to mark word boundaries in Chinese text. As a result, identifying words is difficult because of segmentation ambiguities and occurrences of out-of-vocabulary words ( i.e., unknown words). For instance, in (1), the proper name AE\u00ce 'Wang, Ying-Xiong' is a typical example of an unknown word, and it has ambiguous segmentation of AE 'king' \u00ce 'hero'. Another example in 1UR3 'university student in Taiwan' also has ambiguous segmentations of 'Taiwan' UR3 'university student' , UR 'National Taiwan University' 3 'give birth to' ,and 'Taiwan' UR 'university' 3 'give birth to' etc.:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "(1) AE\u00ce\"+!\"\u00c01 UR3'", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Ying-Xiong Wang is a typical university student in Taiwan.' * Institute of Information Sicence, Academia Sinica, Taipei, Taiwan, R. O. C. E-mail: {kchen, evan}@iis.sinica.edu.tw", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Most of the papers dealing with the problem of word segmentation have focused only on the resolution of ambiguous segmentation. The problem of unknown word identification is considered to be more difficult and needs to be further investigated. According to an inspection of the Sinica corpus [Chen et al., 1996] , which is a balanced Chinese corpus with words segmented based on the Chinese word segmentation standard for information processing proposed by ROCLING [Huang et al., 1997] , the most productive unknown words are of the following types.", |
| "cite_spans": [ |
| { |
| "start": 292, |
| "end": 311, |
| "text": "[Chen et al., 1996]", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 465, |
| "end": 485, |
| "text": "[Huang et al., 1997]", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Unknown words are defined as the words which are not in the lexicon. The following types of unknown words most frequently occur in the Sinica corpus. Table 1 shows the frequency distribution of unknown words of the most frequent 14 categories by examining 3 million-word data from the Sinica corpus.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 150, |
| "end": 157, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "(a) abbreviation (acronym): e.g., m 'China-fuel' (Nb) and \u00b3 'Taiwan-bus' (Nb).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "(Please refer to table 1 for the meaning of each category name; for instance, Nb denotes proper names.)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "It is difficult to identify abbreviations since their morphological structures are very irregular. Their affixes more or less reflect the conventions of the selection of meaning components [Huang 94 ]. However, the affixes of abbreviations are common words which are least informative for indicating the existence of unknown words.", |
| "cite_spans": [ |
| { |
| "start": 189, |
| "end": 198, |
| "text": "[Huang 94", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "(b) proper names: e.g., \u00ab 'Chen-So' (Nb) , \u00b3\u00c4 'Champaign-city' (Nc), and \u00de 'micro-soft' (Nb).", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 40, |
| "text": "'Chen-So' (Nb)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "Proper names can be further classified into 3 sub-categories, i.e., names of people, names of place, and names of organizations. Certain key words are indicators for each different sub-category. For instance, there are about 100 common surnames which are prefix characters of Chinese personal names. The district names, such as 'city', b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "'country' etc., frequently occur as suffixes of the names of places. Identification of company names is as difficult as that of abbreviations since there is no restriction on the choice of morpheme components. (c) derived words: e.g., =\u00b0 'computer-ize' (Vh) .", |
| "cite_spans": [ |
| { |
| "start": 253, |
| "end": 257, |
| "text": "(Vh)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "Derived words have affix morphemes which are strong indicators.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "(d) compounds: e.g., \u00e5 'turn-go'(VCL), \u00e7 'receive-permission' (VE), \u00fc 'search-method' (Na) , and =\u00b0\u00cb 'computer-desk' (Na) .", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 90, |
| "text": "(Na)", |
| "ref_id": null |
| }, |
| { |
| "start": 117, |
| "end": 121, |
| "text": "(Na)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "A compounds is a very productive type of unknown word. Nominal and verbal compounds are easily coined by combining two words/characters. Since there are more than 5000 commonly used Chinese characters, each with idiosyncratic syntactic behavior, it is hard to derive a set of morphological rules to generate the set of Chinese compounds. To identify Chinese compounds is, thus, also difficult.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "(e) numeric type compounds: e.g., 1986 \u00a3 '1986-year' (Nd) , ?O .", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 57, |
| "text": "'1986-year' (Nd)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "The characteristic of numeric compounds is that they contain numbers as major components. For instances, dates, time, phone numbers, addresses, numbers, determiner-measure compounds etc. belong to this type. Since digital numbers are the major components of unknown words of this type and their morphological structures are more regular, they can be identified using the morphological rules. Table 1 . The frequency distribution of unknown words in the most frequent categories.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 392, |
| "end": 399, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "From the above discussion, it is seen that identification for each different type of unknown word is difficult and might require adopting different approaches. However, the processes for detecting the occurrences of each different type of unknown word are almost the same since they are all composed of morphemes of characters. In this paper, we focus only on the detection processes and leave the complete identification problem for future research.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Unknown Words", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "Unknown words cause segmentation errors because out-of-vocabulary words in an input text normally are incorrectly segmented into pieces of single character word or shorter words. For instance, example (1) would be segmented into (2) after dictionary look-up and resolution of ambiguous segmentation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unknown Word Detection", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "(2) AE \u00ce \" + ! \"\u00c0 1 UR3k ing hero be DET CL typical DE Taiwan university-student", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unknown Word Detection", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "It is difficult to know when an unknown word is encountered since all Chinese characters can either morphemes or words and there are no blanks to mark word boundaries. Therefore, without (or even with) syntactic or semantic checking, it is difficult to tell whether a character in a particular context is a part of an unknown word or whether it stands alone as a word. As mentioned in section 1.1, compound words and proper names are the two major types of unknown words. It is not possible to list all of the compounds in the lexicon nor possible to write simple rules which can enumerate the compounds without over-generation or under-generation. Each different type of compound must be identified using either content or context dependent rules. Proper names and their abbreviations have less content regularity. Identifying them relies more on contextual information. The occurrence of typographical errors makes the problem even more complicated. There is currently no satisfactory algorithm for identifying both unknown words and typographical errors, but researchers are separately working on each different type of problem.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unknown Word Detection", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "Chang et al. [Chang et al., 94] used statistical methods to identify personal names in Chinese text and achieved a recall rate of 80% and a precision rate of 90%. Similar experiments were reported in [Sun et al., 94] . Their recall rate was 99.77% but with a lower precision of 70.06%. Both papers default with the recognition of Chinese personal names only. Chen & Lee [Chen & Lee 94] used morphological rules and contextual information to identify the names of organizations. Since organizational names are much more irregular than personal names in Chinese, they achieved a recall rate of 54.50% and a precision rate of 61.79%. A pilot study on automatic correction of Chinese spelling errors was done by Chang [Chang 94] . He used mutual information between a character and its neighboring words to detect spelling errors and to then automatically make the necessary corrections. The error detection process achieved a recall rate of 76.64% and a precision rate of 51.72%. Lin et al. [Lin et al., 93 ] did a preliminary study of the problem of unknown word identification. They used 17 morphological rules to recognize regular compounds and a statistical model to deal with irregular unknown words, such as proper names etc. With this unknown word resolution procedure, an error reduction rate of 78.34% was obtained for the word segmentation process. Since there is no standard reference data, the accuracy rates claimed in different papers vary due to different segmentation standards. In this study, we used the Sinica corpus as a standard reference data. As mentioned before, the Sinica corpus is a word-segmented corpus based on the Chinese word segmentation standard for information processing proposed by ROCLING. Therefore, it contains both known words and unknown words which are properly segmented, i.e., separated by blanks. The corpus was utilized for the purposes of training and testing. For unknown word and typographical error identification, the following two steps are proposed. The first step is to detect the existence of unknown words and typographical errors. The second step is the recognition process, which determines the type and boundaries of each unknown word and recognizes typographical errors. The reasons for separating the detection process from the recognition process are as follows:", |
| "cite_spans": [ |
| { |
| "start": 13, |
| "end": 27, |
| "text": "[Chang et al.,", |
| "ref_id": null |
| }, |
| { |
| "start": 28, |
| "end": 31, |
| "text": "94]", |
| "ref_id": null |
| }, |
| { |
| "start": 200, |
| "end": 212, |
| "text": "[Sun et al.,", |
| "ref_id": null |
| }, |
| { |
| "start": 213, |
| "end": 216, |
| "text": "94]", |
| "ref_id": null |
| }, |
| { |
| "start": 370, |
| "end": 385, |
| "text": "[Chen & Lee 94]", |
| "ref_id": null |
| }, |
| { |
| "start": 714, |
| "end": 724, |
| "text": "[Chang 94]", |
| "ref_id": null |
| }, |
| { |
| "start": 988, |
| "end": 1000, |
| "text": "[Lin et al.,", |
| "ref_id": null |
| }, |
| { |
| "start": 1001, |
| "end": 1003, |
| "text": "93", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unknown Word Detection", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "a. For different types of unknown words and typographical errors, they may share the same detection process but have different recognition processes. b. If the common method for spell checking is followed, an unknown word would be detected first, and a search for the best matching words performed next. Recognizing a Chinese word is somewhat different from spell checking, but they have a lot in common. c.If the detection process performs well, the recognition process is better focused, making the total performance more efficient.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unknown Word Detection", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "This paper focuses on the unknown word detection problem only. ( Note that a typographical error is considered as a special kind of unknown word.) The unknown word detection problem and the dictionary-word detection problem are complementary problems since if all known words in an input text can be detected, then the rest of the character string will be unknown words. However, this is not a simple task since there are no blanks to delimit known words from unknown words. Therefore, the word segmentation process is applied first, and known words are delimited by blanks. Since unknown words are not listed in the dictionary, they will be segmented into shorter character/word sequences after a conventional dictionary-look-up word segmentation process. Sentence (3.b) shows the result of the word segmentation process on (3.a):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unknown Word Detection", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "(3) a. \u00e4UR\u00ee.?\u00a3-ae\u00ff\"\u00fbR{S\u00cc\u00c28\u00e0Z\u00c0o\u00db According to an examination of a group of testing data which is a part of the Sinica corpus, 4572 occurrences out of 4632 unknowns were incorrectly segmented into sequences of shorter words, and each sequence contained at least one monosyllabic word. That is, 60 of the unknown words were segmented into sequences of multi-syllabic words only. Therefore, occurrences of monosyllabic words (i.e., single character words) in the segmented input text may denote the possible existence of unknown words. This is reasonable since it is very rare for compounds or proper names to be composed of several multi-syllabic words. Therefore, the process of detecting unknown words is equivalent to making a distinction between monosyllabic words and monosyllabic morphemes which are parts of unknown words. Hence, the complementary problem of unknown word detection is the problem of monosyllabic known-word detection. If all of the occurrences of monosyllabic words are considered as possible morphemes of unknown words, the precision of prediction is very low. When the word segmentation process was applied to the testing data taken from the Sinica corpus using a conventional dictionary look-up method, 69733 occurrences of monosyllabic words were found, but only 9343 were parts of unknown words, a precision of 13.40%. In order to improve the precision, monosyllabic words, which properly fit the contextual environment, should be identified and should not be considered as possible morphemes of unknown words. In the next section, the corpus-based learning approach to identification of contextually-proper monosyllabic words is introduced. In section 3, experimental results are presented, including a performance comparison between a hand-crafted method and the proposed corpus-based learning method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unknown Word Detection", |
| "sec_num": "1.2" |
| }, |
| { |
| "text": "The procedure for detecting unknown words is roughly divided into three steps: 1. word segmentation, 2. part-of-speech tagging, and 3. identification of contextually-proper monosyllabic words. The word segmentation procedure identifies words using a dictionary look-up method and resolves segmentation ambiguities by maximizing the probability of a segmented word sequence [Chiang 92, Chang 91, Sproat 96] or by using heuristic methods [Chen 92, Lee 91] . Either method can achieve very satisfactory results.", |
| "cite_spans": [ |
| { |
| "start": 373, |
| "end": 384, |
| "text": "[Chiang 92,", |
| "ref_id": null |
| }, |
| { |
| "start": 385, |
| "end": 394, |
| "text": "Chang 91,", |
| "ref_id": null |
| }, |
| { |
| "start": 395, |
| "end": 405, |
| "text": "Sproat 96]", |
| "ref_id": null |
| }, |
| { |
| "start": 436, |
| "end": 445, |
| "text": "[Chen 92,", |
| "ref_id": null |
| }, |
| { |
| "start": 446, |
| "end": 453, |
| "text": "Lee 91]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Both have an accuracy rate of over 99%. For the purpose of unknown word identification, some regular types of compounds, such as numbers, determinant-measure compounds, and reduplication, which have regular morphological structures, are also identified by means of their respective morphological rules during the word segmentation process [Chen 92, Lin 93] . The second step, part-of-speech (pos) tagging, is carried out to support step3 and the later process of unknown word identification. After pos tagging, sentence (3.b) becomes sentence (4); each word contains a unique pos:", |
| "cite_spans": [ |
| { |
| "start": 339, |
| "end": 348, |
| "text": "[Chen 92,", |
| "ref_id": null |
| }, |
| { |
| "start": 349, |
| "end": 356, |
| "text": "Lin 93]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "(4) \u00e4 (BOUND) (Nf) UR (Nb) \u00ee (VC) .?\u00a3 (DM) -ae\u00ff (Nb) Tsuku -ba university invite '73 Nobel", |
| "cite_spans": [ |
| { |
| "start": 29, |
| "end": 33, |
| "text": "(VC)", |
| "ref_id": null |
| }, |
| { |
| "start": 48, |
| "end": 52, |
| "text": "(Nb)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "\"\u00fbR (Na) { (Na) S\u00cc (Na) \u00c2 (Na) 8 (BOUND) \u00e0Z (VG) physics award winnter Esa -ki be \u00c0o (Na)p rincipal.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Although the pos sequence may not be 100% correct, it is the most probable pos sequence in terms of pos bi-gram statistics [Liu 95] . The details of the first two steps are not the major concern of this paper. The focus here is on the step of identifying contextually-proper monosyllabic words. Hereafter, for simplicity, the term 'proper-character' will denote a contextually-proper monosyllabic word, and the term 'improper-character' will be used to denote a contextually-improper monosyllabic word which might be part of an unknown word. The way to identify proper-characters is to check the following properties:", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 131, |
| "text": "[Liu 95]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "(1) a proper-character should not be a bound-morpheme, and (2) the context of a proper-character should be grammatical.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Hence, if a character is a bound-morpheme, it will be considered as possibly being an unknown word. However, almost any Chinese character can function either as a word or as a bound morpheme. A character's functional role is contextually dependent. Therefore, every monosyllabic word should be checked in its context for grammaticality by means of syntactic or semantic rules. For processing efficiency, such rules should be simple and have only local dependencies. It is not feasible to parse whole sentences in order to check whether or not characters are proper-characters. The task is then to derive a set of rules which can be used to check the grammaticality of characters in context. If the rules are too stringent, then too many proper-characters will be considered as improper-characters, resulting in a low precision rate. On the other hand, if the rules are too relaxed, then too many improper-characters will be considered as proper-characters, resulting in a low recall rate. Therefore, there is a tradeoff between recall and precision. In the case of unknown word detection, a higher recall rate and an acceptable precision rate is preferred. Writing handcrafted rules is difficult because there are more than 5000 commonly used Chinese characters, and each of them may behave differently. A corpus-based learning approach is adapted to derive the set of contextual rules and to select the best set of rules by evaluating the performance of each individual rule. The approach is very similar to the error-driven learning method proposed by Brill [Brill 95] . Before the learning method is introduced, two commonly used measures for unknown word detection are defined. These two performance measures will be used throughout the paper:", |
| "cite_spans": [ |
| { |
| "start": 1560, |
| "end": 1570, |
| "text": "[Brill 95]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Recall Rate = # of unknown word detected / total number of unknowns;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Precision Rate = # of correctly detected improper-characters / total # of guesses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "There are two types of unknown words. Type one unknown words include monosyllabic morphemes. Type two unknown words are composed with multi-syllabic words only. Only the detection of type one unknown words is considered here since type two unknown words occur very rarely as we mentioned before. An unknown word is considered successfully detected if any one of its components is detected as an improper-character. It is noted that the numerators for the recall rate and the precision rate are different since if two (or more) components of an unknown word are detected as improper-characters, it is reasonable to count only one word detection but two improper-character detections. For the corpus-based learning method, a training corpus with all the words segmented and pos-tagged is used. The monosyllabic words in the training corpus are instances of proper-characters, and the words in the training corpus which are not in the dictionary are instances of unknown words. Segmenting the unknown words using a dictionary look-up method produces instances of improper-characters. By examining the instances of proper and improper characters and their contexts, the rule patterns and their performance evaluations can be derived and can be represented as triplets (rule pattern, total # of matched instances, # of improper instances). Examples are shown in Appendix1. A contextual dependent rule may be: a uni-gram pattern, such as '{ 1 }', '{ }', '{(Nh)}', '{(T)}', a bi-gram patterns, such as '{ } S ', '{ }(VH)', '(Na){ B }', '{(Dfa)}(Vh)', '(Ve){(Vj)}', or a tri-gram patterns, such as '{ }(VH)(T)', '(Na)(Dfa){ \u00cf }', where the string in the curly brackets will match a proper-character and the other parts will match its context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "A good rule pattern has high applicability and high discrimination value ( i.e., it occurs frequently and matches either proper-characters or improper-characters only, but not both). In fact, no rule has perfect discriminating ability. For instance, the rule '(Na){(Nb)}' can be applied to ' \u00e0 (Na) \u00e5 (Nb)' in (5) and ' \u00c3S (Na) ] (Nb)' in (6). The results are correct in (5) and incorrect in (6): (5) \u00e0 (Na) 3I (Na) \u00e5 (Nb) [f (Na) accounting staff Liu Miss.", |
| "cite_spans": [ |
| { |
| "start": 294, |
| "end": 298, |
| "text": "(Na)", |
| "ref_id": null |
| }, |
| { |
| "start": 310, |
| "end": 313, |
| "text": "(5)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 323, |
| "end": 327, |
| "text": "(Na)", |
| "ref_id": null |
| }, |
| { |
| "start": 371, |
| "end": 374, |
| "text": "(5)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 397, |
| "end": 400, |
| "text": "(5)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 403, |
| "end": 407, |
| "text": "(Na)", |
| "ref_id": null |
| }, |
| { |
| "start": 411, |
| "end": 415, |
| "text": "(Na)", |
| "ref_id": null |
| }, |
| { |
| "start": 418, |
| "end": 422, |
| "text": "(Nb)", |
| "ref_id": null |
| }, |
| { |
| "start": 426, |
| "end": 430, |
| "text": "(Na)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "(6) \u00c3S (Na) ] (Nb) \u00a1 (VC) c3 (Na) academician Yan -strengthen Mr.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 11, |
| "text": "(Na)", |
| "ref_id": null |
| }, |
| { |
| "start": 29, |
| "end": 33, |
| "text": "(Na)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Therefore, a greedy method is adopted in selecting the best set of unknown word detection rules. A set of rules which can identify proper-characters with high accuracy is selected. The rules with applicability greater than a threshold value are sequentially chosen according to the order of their accuracy. The rules for identifying improper-characters was not used because most improper-characters are of low frequency. Conversely, the selected rules were used as the recognition rules for proper-characters. A character matched by any one of the selected rules is considered a proper-character. Characters which are not matched by any one of the rules are considered candidates of improper-characters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus-based Rule Learning for Identifying Monosyllabic Words", |
| "sec_num": "2." |
| }, |
| { |
| "text": "1. Determine the threshold values for rule accuracy and applicability. For each rule Ri, when applied on the training corpus, the rule accuracy(Ri) = Mi / Ti, where Mi is the # of instances of matches of Ri with proper characters; Ti is the total # of matches of Ri. The rule applicability(Ri) = Ti.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule selection algorithm:", |
| "sec_num": null |
| }, |
| { |
| "text": "2. Sequentially select the rules with the highest rule accuracy and applicability greater than the threshold value until there are no rules satisfying both threshold values.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule selection algorithm:", |
| "sec_num": null |
| }, |
| { |
| "text": "The threshold value for rule accuracy controls the precision and recall performance of the final selected rule set. A higher accuracy requirement means fewer improper-characters will be wrongly recognized as proper-characters. Therefore, the performance of such a rule set will have a higher recall value. However, those proper-characters not matched with any rules will be mistaken as improper-characters, which lowers precision. On the other hand, if a lower accuracy threshold value is used, then most of the proper-characters will be recognized, and many of the improper-characters will also be mistakenly recognized as proper-characters, resulting in a lower recall rate and possibly a higher precision rate before reaching the maximal precision value. Therefore, if a detection rule set with a high recall rate is desired, the threshold value of rule accuracy must be set high. If precision is more important, then the threshold value must be properly lowered to an optimal point. A balance between recall and precision should be considered.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule selection algorithm:", |
| "sec_num": null |
| }, |
| { |
| "text": "In the next section, the experimental results of different threshold values are presented. The threshold value for rule applicability controls the number of rules to be selected and ensures that only useful rules are selected.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule selection algorithm:", |
| "sec_num": null |
| }, |
| { |
| "text": "The selected rule type may subsume another. Shorter rule patterns are usually more general than longer ones. There are redundant rules in the initial rule selection. A further screening process is needed to remove the redundant rules. The screening process is based on the following fact: if a rule Ri is subsumed by rule Rj, then pattern of Ri is a sub-string of pattern Rj. For example the rule '{ 1 }' is more general than the rule '{ 1 } (Na)'. If the rule '{ 1 }' is selected, then the rule '{ 1 } (Na)' is redundant and can be removed from the rule set. Since a character matched by any one of the selected rules is considered a proper-character, more specific rules will be redundant and only the most general rules will remain after the screening process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule selection algorithm:", |
| "sec_num": null |
| }, |
| { |
| "text": "Screening Algorithm: 1. Sort the rules according to their string patterns in increasing order, resulting in rules R1...Rn.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule selection algorithm:", |
| "sec_num": null |
| }, |
| { |
| "text": "2. For i from 1 to n, if there is a j such that j< i, and Rj is a sub-string of Ri, then remove Ri.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule selection algorithm:", |
| "sec_num": null |
| }, |
| { |
| "text": "The corpus-based learning method for unknown word detection was tested on the Sinica corpus. The Sinica corpus version 2.0 contains 3.5 million words. 3 million words were used as the training corpus and 0.15 million words for the testing corpus. The word entries in the CKIP lexicon were considered as known words. The CKIP lexicon contains about 80,000 entries of Chinese words with their syntactic categories and grammatical information [CKIP 93] . A word is considered as an unknown word if it is not in the CKIP lexicon and is not identified by the word segmentation program as a foreign word (for instance, English), a number, or a reduplicated compound. There were 93285 unknown words in the training corpus and 4632 unknown words in the testing corpus. A few bi-word compounds were deliberately ignored as unknowns, such as \u00eb R", |
| "cite_spans": [ |
| { |
| "start": 440, |
| "end": 449, |
| "text": "[CKIP 93]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "3." |
| }, |
| { |
| "text": "'analytical chemistry' and~l3I 'technical member', since they are not identifiable by any algorithm which does not incorporate real world knowledge. In addition, whether these are single compounds or noun phrases made up of two words is debatable. In fact, ignoring bi-word compounds did not affect the results very much since the fact that there were only 60 such unknown words out of 4632 shows that they rarely occurred in the corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "3." |
| }, |
| { |
| "text": "The following types of rule patterns were generated from the training corpus. Each rule contains a token within curly brackets and its contextual tokens without brackets. For some rules, there may be no contextual dependencies. Rules of the 10 different types of patterns above were generated automatically by extracting each instance of monosyllabic words in the training corpus. Every generated rule pattern was checked for redundancy, and the frequencies of proper and improper occurrences were tallied. For instance, the pattern '{ }' occurred 165980 times in the training corpus; 165916 of these were proper instances and 64 of these were improper instances (i.e., ' ' occurred 64 times as part of an unknown word). Appendix 1 shows some of the rule patterns and their total occurrences counts as well as the number of improper instances. In the initial stage, 1455633 rules were found. After eliminating rules with frequency less than 3, 215817 rules remained. In the next stage, different rule selection threshold values were used to generate 10 different sets of rules. These rule sets were used to detect unknown words in the testing corpus. The testing corpus contained 152560 words. In the first step, the running text of the testing corpus was segmented into words using a dictionary look-up method which were then tagged with their part-of-speech by an automatic tagging process. Each different rule set was applied to detect the unknown words in the testing corpus. A character without a match was considered as part of an unknown word. Appendix 2 shows some examples. The performance results of different rule sets are shown in Table 2 . The experimental results of unknown word detection on the testing corpus.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1643, |
| "end": 1650, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "3." |
| }, |
| { |
| "text": "The results show that there is a tradeoff between precision and recall rate, but that the overall performance was much better than that of the handcraft rules written by human experts. They examined the training corpus and wrote up a rule set for proper-characters to the best of their ability. The handcraft rules had a precision rate of 39.11% and a recall rate of 81.45%, which are much lower than the rule set, made using the corpus-based rule learning method. The syntactic complexity of monosyllabic words was the reason for the lower coverage of the handcraft rules. Some handcraft rules are shown in Appendix 4. It is clearly shown that the handcraft rules suffer from low accuracy because a limited number of rules can be derived and the rules are usually too general to achieve high precision rates. There were only 139 hand-crafted rules while the proposed method generated thousands of rules as shown in Table 2 . The number of rules selected increased with the decrement of the accuracy of the rule selection criteria because more rules satisfied the lower accuracy requirement. However, the number of rules after the screening process was lower in accordance with the decrement of the accuracy of the rule selection criteria. For instance, 207059 and 210552 rules were selected, respectively, for the rule accuracy criterion of 98% and 95%, but after the screening process, the number of rules was 45839 and 31309. The reason for this is that achieving a higher accuracy requires more contextual dependency rules to discriminate between proper-characters and improper-characters. On the other hand, a lower accuracy requirement may cause the inclusion of many more short rules. This causes a lot of long rules to be subsumed by shorter rules eliminated during the screening process.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 916, |
| "end": 923, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "3." |
| }, |
| { |
| "text": "The corpus-based learning approach proved to be an effective and easy method for finding unknown word detection rules. The advantages of using a corpus-based method are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Research", |
| "sec_num": "4." |
| }, |
| { |
| "text": "a. The syntactic patterns of proper-characters are complicated and numerous. It is hard to hand-code each different pattern, yet most high frequency patterns are extractable from the corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Research", |
| "sec_num": "4." |
| }, |
| { |
| "text": "b. The corpus provides standard reference data not only for rule generation, but also for rule evaluation. The hand-craft rules can also be evaluated automatically and incorporated into the final detection rule set if the rule has a high accuracy rate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Research", |
| "sec_num": "4." |
| }, |
| { |
| "text": "c. It is easy to control the balance between the precision and the recall of the detection algorithm since we know the performance of each detection rule based on the training corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Research", |
| "sec_num": "4." |
| }, |
| { |
| "text": "Different types of unknown words have different levels of difficulty in identification. The detection of compounds is the most difficult because some of their morphological structures are similar to common syntactic structures. The detection of proper names and typographical errors is believed to be easier because of their irregular syntactic patterns. The results with respect to different types of syntactic categories were checked. Appendix 3 shows that the recall rates of proper names ( i.e., category Nb) were less affected by the higher precision requirement. There was no data for typographical errors, but the detection of typographical errors is believed to be similar to the detection of proper names; that is, a higher precision can be achieved without sacrificing the recall rate. If parallel corpora with and without typographical errors are available, the corpus-based rule learning method can also be applied to the detection of typographical errors in Chinese.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Research", |
| "sec_num": "4." |
| }, |
| { |
| "text": "After the unknown word detection process, an identification algorithm will be required to find the exact boundaries and the part-of-speech of each unknown word. This will require future research. Different types of rules will be required in identifying different compounds and proper names. The corpus can still play an essential role in the generation of rules and in their evaluation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Research", |
| "sec_num": "4." |
| }, |
| { |
| "text": "Sproat, R., C. Shih, W. Gale, & N. Chang, \"A Stochastic Finite-State Word-Segmentation Algorithm for Chinese,\" Computational Linguistics, 22.3 (1996) ", |
| "cite_spans": [ |
| { |
| "start": 101, |
| "end": 149, |
| "text": "Chinese,\" Computational Linguistics, 22.3 (1996)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Research", |
| "sec_num": "4." |
| }, |
| { |
| "text": "K. J. Chen, M. H. Bai", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Unknown Word Detection by Corpus-based Method", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The authors wish to thank Dr. Charles Lee and the anonymous reviewers for their useful comments on this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| }, |
| { |
| "text": "The first line contains the original text. The second line shows the result of word segmentation and pos tagging. The third line is the result of unknown word detection, where improper-characters are marked with '(?)'. ********************************** \u00b6\u00b4\"z\u00e3~ \u00b6 (Nepa)\u00b4\" (Na) (Na) \u00f1\u00d0 ()(D) ()(D) 7 ()(VH) ()(Na) \u00a2\u00a6 ()(VC) z! ()(DM) \u00fdI ()(A) 3 Q () (Na) The first column shows the categories of unknown words. The second column is the number of occurrences of the unknown words in the category shown in column one. The third column is the recall rates of the unknown words detected under different rule sets. The items in the curly brackets match a proper-character and the items in the round brackets match its context according to their linear order. The symbol ',' in the rules denotes an 'or' relation.", |
| "cite_spans": [ |
| { |
| "start": 272, |
| "end": 276, |
| "text": "(Na)", |
| "ref_id": null |
| }, |
| { |
| "start": 277, |
| "end": 281, |
| "text": "(Na)", |
| "ref_id": null |
| }, |
| { |
| "start": 349, |
| "end": 353, |
| "text": "(Na)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Appendix 2. Samples of testing results.", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Transformation-Based Error-Driven Learning and Natural Language Processing: A Case Study in Part-of-Speech Tagging", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Brill", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Computational Linguistics", |
| "volume": "21", |
| "issue": "", |
| "pages": "543--566", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brill, Eric, \"Transformation-Based Error-Driven Learning and Natural Language Processing: A Case Study in Part-of-Speech Tagging,\" Computational Linguistics 21.4 (1995), pp.543-566.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Word Segmentation through Constraint Satisfaction and Statistical Optimization", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "S" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "D" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "&", |
| "middle": [ |
| "S D" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "Proceedings of ROCLING IV", |
| "volume": "", |
| "issue": "", |
| "pages": "147--165", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chang, J. S., C. D. Chen, & S. D. Chen, \"Word Segmentation through Constraint Satisfaction and Statistical Optimization,\" Proceedings of ROCLING IV (1991), pp. 147-165.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A Pilot Study on Automatic Chinese Spelling Error Correction", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "H" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "143--149", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chang, C. H., \"A Pilot Study on Automatic Chinese Spelling Error Correction\" Communication of COLIPS, 4.2 (1994), pp.143-149.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A Multiple-Corpus Approach to Recognition of Proper Names in Chinese Texts", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "S" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "D" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "J" |
| ], |
| "last": "Ker", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "&", |
| "middle": [ |
| "J" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Computer Processing of Chinese and Oriental Languages", |
| "volume": "8", |
| "issue": "1", |
| "pages": "75--85", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chang J. S.,S.D. Chen, S. J. Ker, Y. Chen, & J. Liu,1994 \"A Multiple-Corpus Approach to Recognition of Proper Names in Chinese Texts\", Computer Processing of Chinese and Oriental Languages, 8.1(1994), pp.75-85.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "The Identification of Organization Names in Chinese Texts", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [ |
| "H" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "C" |
| ], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "131--142", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen, H.H., & J.C. Lee, \"The Identification of Organization Names in Chinese Texts\", Communication of COLIPS, 4.2(1994), pp. 131-142.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "SINICA CORPUS: Design Methodology for Balanced Corpora", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [ |
| "J" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "R" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "P L" |
| ], |
| "last": "Chang & H", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hsu", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of PACLIC 11th Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "167--176", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen, K.J., C.R. Huang, L. P. Chang & H.L. Hsu, \"SINICA CORPUS: Design Methodology for Balanced Corpora,\" Proceedings of PACLIC 11th Conference (1996), pp.167-176.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Word Identification for Mandarin Chinese Sentences", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [ |
| "J S H" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Proceedings of 14th Coling", |
| "volume": "", |
| "issue": "", |
| "pages": "101--107", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen, K.J. & S.H. Liu, \"Word Identification for Mandarin Chinese Sentences,\" Proceedings of 14th Coling (1992), pp. 101-107.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Statistical Models for Word Segmentation and Unknown Word Resolution", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "H" |
| ], |
| "last": "Chiang", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "&", |
| "middle": [ |
| "K Y" |
| ], |
| "last": "Su", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Proceedings of ROCLING V", |
| "volume": "", |
| "issue": "", |
| "pages": "121--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chiang, T. H., M. Y. Lin, & K. Y. Su, \"Statistical Models for Word Segmentation and Unknown Word Resolution,\" Proceedings of ROCLING V (1992), pp. 121-146.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "The Introduction of Sinica Corpus", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "R" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Proceedings of ROCLING VIII", |
| "volume": "", |
| "issue": "", |
| "pages": "81--89", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Huang, C. R. Et al.,\"The Introduction of Sinica Corpus,\" Proceedings of ROCLING VIII (1995), pp. 81-89.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Segmentation Standard for Chinese Natural Language Processing", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "R" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "J" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Li-Li", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "International Journal of Computational Linguistics and Chinese Language Processing", |
| "volume": "2", |
| "issue": "2", |
| "pages": "74--62", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Huang, C.R., K.J. Chen, & Li-Li Chang, \"Segmentation Standard for Chinese Natural Language Processing,\" International Journal of Computational Linguistics and Chinese Language Processing 2.2 (1997), pp. 74-62.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Rule-based Word Identification for Mandarin Chinese Sentences-A Unification Approach", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [ |
| "J C L" |
| ], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yeh", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "", |
| "volume": "5", |
| "issue": "", |
| "pages": "97--118", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lee,H.J. & C.L. Yeh, \"Rule-based Word Identification for Mandarin Chinese Sentences-A Unification Approach,\" Computer Processing of Chinese and Oriental Languages, 5.1 (1991), pp. 97-118.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A Preliminary Study on Unknown Word Problem in Chinese Word Segmentation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "H" |
| ], |
| "last": "Chiang", |
| "suffix": "" |
| }, |
| { |
| "first": "&", |
| "middle": [ |
| "K Y" |
| ], |
| "last": "Su", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Proceedings of ROCLING VI", |
| "volume": "", |
| "issue": "", |
| "pages": "119--137", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lin, M. Y., T. H. Chiang, & K. Y. Su, \"A Preliminary Study on Unknown Word Problem in Chinese Word Segmentation,\" Proceedings of ROCLING VI (1993), pp. 119-137.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Computer Processing of Chinese and Oriental Languages", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "H" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "J" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "P" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [ |
| "H" |
| ], |
| "last": "Chin", |
| "suffix": "" |
| }, |
| { |
| "first": "; P", |
| "middle": [], |
| "last": "Da", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Dk", |
| "suffix": "" |
| }, |
| { |
| "first": "Dm ){", |
| "middle": [], |
| "last": "Neqa", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Va", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vac", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vb", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vcl", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vd", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ve", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vf", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vg", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vhc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vj", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vk", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vl", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "V_2", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shi }", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "", |
| "volume": "9", |
| "issue": "", |
| "pages": "31--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liu S. H., K. J. Chen, L.P. Chang, & Y.H. Chin, \"Automatic Part-of-Speech Tagging for Chinese Corpora,\" Computer Processing of Chinese and Oriental Languages, 9.1(1995), pp.31-48. 1. ( F , ){ Na, Nc } 2. ( Di ){ Na, Nc }( DE ) 3. ( F ){ VH } 4. ( P, Da, Dk, D, Neqa, DM ){ VA, VAC, VB, VC, VCL, VD, VE, VF, VG, VH, VHC, VI, VJ, VK, VL, V_2, SHI }", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Unknown Word Detection by Corpus-based Method VL", |
| "authors": [ |
| { |
| "first": "{", |
| "middle": [], |
| "last": "Da", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Dk", |
| "suffix": "" |
| }, |
| { |
| "first": "}(", |
| "middle": [], |
| "last": "Neqa", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Va", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vac", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vb", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vcl", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vd", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ve", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vf", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vg", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vhc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vj", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vk", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "{ Da, Dk, D, Neqa }( VA, VAC, VB, VC, VCL, VD, VE, VF, VG, VH, VHC, VI, VJ, VK, Unknown Word Detection by Corpus-based Method VL, V_2, SHI )", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "content": "<table><tr><td colspan=\"2\">Tsuku -ba university invite \u00c2 8 \u00e0Z \u00c0o\u00ca '73</td><td>Nobel</td><td>physics award winner</td></tr><tr><td>sa -ki be</td><td>principal.</td><td/></tr></table>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "nknown Word Detection by Corpus-based Method 'The University of Tsukuba invited the winner of the '73 Nobel Award in Physics, Esaki, to be the Principal.' b. \u00e4 UR \u00ee .?\u00a3 -ae\u00ff \"\u00fbR { S\u00cc" |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td>, and the detail statistics are</td></tr></table>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "" |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td>Appendix rule</td><td>frequency</td><td>error</td><td>accuracy</td></tr><tr><td colspan=\"4\">========================================</td></tr><tr><td>{1}</td><td>165980</td><td>64</td><td>99.71 %</td></tr><tr><td>{}</td><td>41089</td><td>78</td><td>98.10 %</td></tr><tr><td>{H}</td><td>16066</td><td>11</td><td>99.31 %</td></tr><tr><td>{}</td><td>6185</td><td>4</td><td>99.35 %</td></tr><tr><td>{}</td><td>5046</td><td>1</td><td>99.80 %</td></tr><tr><td>{\u00a8}</td><td>4582</td><td>3</td><td>99.34 %</td></tr><tr><td>{\u00e0}</td><td>2302</td><td>2</td><td>99.13 %</td></tr><tr><td>{(T)}</td><td>177641</td><td>177</td><td>99.00 %</td></tr><tr><td>{(Nh)}</td><td>73034</td><td>344</td><td>99.53 %</td></tr><tr><td>{(Caa)}</td><td>46659</td><td>392</td><td>99.16 %</td></tr><tr><td>{(SHI)}</td><td>41089</td><td>78</td><td>99.81%</td></tr><tr><td>{(Dfa)}(VH)</td><td>11037</td><td>39</td><td>99.65 %</td></tr><tr><td>{(Nh)}(Na)</td><td>6640</td><td>62</td><td>99.07 %</td></tr><tr><td>{(P)}(Nh)</td><td>6247</td><td>52</td><td>99.17 %</td></tr><tr><td>{(Nep)}(Na)</td><td>4030</td><td>26</td><td>99.35 %</td></tr><tr><td>(Na){(VCL)}</td><td>8062</td><td>299</td><td>96.30 %</td></tr><tr><td>(VC){(Di)}</td><td>4155</td><td>76</td><td>98.18 %</td></tr><tr><td>(VE){(VJ)}</td><td>1884</td><td>46</td><td>97.56 %</td></tr><tr><td>(VJ){(VJ)}</td><td>1489</td><td>53</td><td>96.44 %</td></tr><tr><td>(VJ){(Dfa)}</td><td>1004</td><td>5</td><td>99.50 %</td></tr><tr><td>{8}(Na)</td><td>3933</td><td>6</td><td>99.85 %</td></tr><tr><td>{}(Na)</td><td>2831</td><td>18</td><td>99.36 %</td></tr><tr><td>{}(VC)</td><td>2451</td><td>2</td><td>99.92 %</td></tr><tr><td>(VH){}</td><td>1787</td><td>14</td><td>99.22 %</td></tr><tr><td>(VC){C}</td><td>1731</td><td>1</td><td>99.94 %</td></tr><tr><td>(Na){\u00f0}</td><td>1172</td><td>0</td><td>100 %</td></tr><tr><td colspan=\"2\">{f}(VC)(Na) 221</td><td>0</td><td>100 %</td></tr><tr><td colspan=\"2\">{\u00d7}(Na)(VH) 200</td><td>0</td><td>100 %</td></tr><tr><td>{z}(Na)(Na)</td><td>190</td><td>3</td><td>98.42 %</td></tr><tr><td>{}(VH)(T)</td><td>187</td><td>1</td><td>99.47 %</td></tr><tr><td colspan=\"2\">(Na)(Dfa){\u00cf} 263</td><td>0</td><td>100 %</td></tr><tr><td colspan=\"2\">(Na)(VH){} 248</td><td>1</td><td>99.60 %</td></tr><tr><td>(Na)(Na){\u00b4}</td><td>231</td><td>2</td><td>99.14 %</td></tr><tr><td>(T)(Na){}</td><td>174</td><td>0</td><td>100 %</td></tr><tr><td>{}5</td><td>139</td><td>1</td><td>99.28 %</td></tr><tr><td>{i}5</td><td>124</td><td>0</td><td>100 %</td></tr><tr><td>{}</td><td>121</td><td>0</td><td>100 %</td></tr><tr><td colspan=\"2\">{\u00f1}~117 1{\u00e9} 1406</td><td>0 2</td><td>100 % 99.86 %</td></tr><tr><td>{z}</td><td>319</td><td>0</td><td>100 %</td></tr><tr><td colspan=\"4\">========================================</td></tr></table>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": ", pp.377-404. Sun, M. S., C.N. Huang, H.Y. Gao, & Jie Fang, \"Identifying Chinese Names in UnrestrictedTexts\", Communication ofCOLIPS, 4.2 (1994), pp. 113-122." |
| } |
| } |
| } |
| } |