| { |
| "paper_id": "O05-1017", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:58:27.240558Z" |
| }, |
| "title": "Applying Maximum Entropy to Robust Chinese Shallow Parsing", |
| "authors": [ |
| { |
| "first": "Shih-Hung", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Academia Sinica", |
| "location": { |
| "country": "Taiwan, R.O.C" |
| } |
| }, |
| "email": "shwu@iis.sinica.edu.tw" |
| }, |
| { |
| "first": "Cheng-Wei", |
| "middle": [], |
| "last": "Shih", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Academia Sinica", |
| "location": { |
| "country": "Taiwan, R.O.C" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Chia-Wei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Academia Sinica", |
| "location": { |
| "country": "Taiwan, R.O.C" |
| } |
| }, |
| "email": "cwwu@iis.sinica.edu.tw" |
| }, |
| { |
| "first": "Tzong-Han", |
| "middle": [], |
| "last": "Tsai", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Academia Sinica", |
| "location": { |
| "country": "Taiwan, R.O.C" |
| } |
| }, |
| "email": "thtsai@iis.sinica.edu.tw" |
| }, |
| { |
| "first": "Wen-Lian", |
| "middle": [], |
| "last": "Hsu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Academia Sinica", |
| "location": { |
| "country": "Taiwan, R.O.C" |
| } |
| }, |
| "email": "hsu@iis.sinica.edu.tw" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Recently, shallow parsing has been applied to various information processing systems, such as information retrieval, information extraction, question answering, and automatic document summarization. A shallow parser is suitable for online applications, because it is much more efficient and less demanding than a full parser. In this research, we formulate shallow parsing as a sequential tagging problem and use a supervised machine learning technique, Maximum Entropy (ME), to build a Chinese shallow parser. The major features of the ME-based shallow parser are POSs and the context words in a sentence. We adopt the shallow parsing results of Sinica Treebank as our standard, and select 30,000 and 10,000 sentences from Sinica Treebank as the training set and test set respectively. We then test the robustness of the shallow parser with noisy data. The experiment results show that the proposed shallow parser is quite robust for sentences with unknown proper nouns.", |
| "pdf_parse": { |
| "paper_id": "O05-1017", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Recently, shallow parsing has been applied to various information processing systems, such as information retrieval, information extraction, question answering, and automatic document summarization. A shallow parser is suitable for online applications, because it is much more efficient and less demanding than a full parser. In this research, we formulate shallow parsing as a sequential tagging problem and use a supervised machine learning technique, Maximum Entropy (ME), to build a Chinese shallow parser. The major features of the ME-based shallow parser are POSs and the context words in a sentence. We adopt the shallow parsing results of Sinica Treebank as our standard, and select 30,000 and 10,000 sentences from Sinica Treebank as the training set and test set respectively. We then test the robustness of the shallow parser with noisy data. The experiment results show that the proposed shallow parser is quite robust for sentences with unknown proper nouns.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Parsing is a basic technique in natural language processing; however, a full parser is usually costly and slow. Recently, shallow parsing has been applied to various information processing systems [12] . Compared to the performance of full parsers, a shallow parser is much faster and the parsing result is more useful for various applications, such as information retrieval and extraction, question answering, and automatic document summarization. In this paper, we adopt a machine learning approach to the Chinese shallow parsing problem.", |
| "cite_spans": [ |
| { |
| "start": 197, |
| "end": 201, |
| "text": "[12]", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Chinese full parsing is very challenging, [18, 22] because it is difficult to achieve high accuracy, and the performance is not suitable for online applications. Shallow parsing of Chinese, on the other hand, is promising and desirable in terms of efficiency. Researchers in Beijing, Harbin, Shenyang, and Hong Kong have also developed related techniques [10, 15, 16, 20, 21] . Most of these works use machine learning approaches, instead of the rule-based approach used in full parsing. Popular machine learning methods such as SVM, CRF, and ME, have been tested. The parsing speed of each approach is fast and the parsing accuracy is acceptable.", |
| "cite_spans": [ |
| { |
| "start": 42, |
| "end": 46, |
| "text": "[18,", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 47, |
| "end": 50, |
| "text": "22]", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 355, |
| "end": 359, |
| "text": "[10,", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 360, |
| "end": 363, |
| "text": "15,", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 364, |
| "end": 367, |
| "text": "16,", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 368, |
| "end": 371, |
| "text": "20,", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 372, |
| "end": 375, |
| "text": "21]", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Currently, there is no standard for Chinese shallow parsing. Li et. al. [9] developed a Chinese shallow parsed treebank to extract Chinese collocations automatically and built a large collocation bank.", |
| "cite_spans": [ |
| { |
| "start": 72, |
| "end": 75, |
| "text": "[9]", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "There are also some works on a standard for Chinese shallow parsing [9, 19, 20] . Nevertheless, the POS standard and vocabulary in each approach are different; thus, between simplified Chinese and traditional Chinese, we cannot adopt their standard for simplified Chinese to traditional Chinese.", |
| "cite_spans": [ |
| { |
| "start": 68, |
| "end": 71, |
| "text": "[9,", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 72, |
| "end": 75, |
| "text": "19,", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 76, |
| "end": 79, |
| "text": "20]", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Instead, we use the first level of the parsing results of Sinica Treebank as our shallow parsing standard [4] . Originally, Sinica Treebank was designed to provide full parsing results, whereby sentences could be labeled with POS tags and the full parsing structure. There are 54,000 sentences in Sinica Treebank, from which we randomly selected 30,000 and 10,000 sentences as the training set and test set respectively.", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 109, |
| "text": "[4]", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Since there are many unknown words in Chinese [11] , a Chinese shallow parser must be robust against such words [22] . For example, it is not hard to correctly chunk the sentence \"\u9ad8\u6f38\uf9ea/\u64ca\u7b51/\u7684/ \u97f3\u8abf/\u5ffd\u7136/\u6025\u8f49\u6210/\u60b2\u58ef\" into \"\u9ad8\u6f38\uf9ea\u64ca\u7b51\u7684\u97f3\u8abf/NP \u5ffd\u7136/Dd \u6025\u8f49\u6210/DM \u60b2\u58ef/VP\", if we know that \"\u9ad8\u6f38\uf9ea\" is a proper noun. However, if the name is unknown, it could be split into three single characters and tagged with the three POS of the single characters, i.e., \"\u9ad8/\u6f38/\uf9ea [VH13/Dd/P15]\". It might then be incorrectly chunked as \"\u9ad8\u6f38/NP \uf9ea\u64ca\u7b51\u7684\u97f3\u8abf/PP \u5ffd\u7136/Dd \u6025\u8f49\u6210/DM \u60b2\u58ef/VP\".", |
| "cite_spans": [ |
| { |
| "start": 46, |
| "end": 50, |
| "text": "[11]", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 112, |
| "end": 116, |
| "text": "[22]", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "In this research, we simulate unknown words by adding some noises to the corpus in order to test the robustness of the shallow parser. Since new proper nouns are normally unknown, we design three ways to add noises to the training and testing sets by treating proper nouns as unknown words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Sinica Treebank provides a full parse tree for each sentence. Here, we use the first-layer parsing results of Sinica Treebank as the standard for shallow parsing. Instead of using all the phrase tags in Sinica Treebank, we annotate five of them for chunking; all other phrases (including single words not in any phrase) are tagged as others (X). The five tags, namely, noun phrase (NP), verb phrase (VP), preposition phrase (PP), geographic phrase (GP), and clause (S), are the major tags in Sinica Treebank, and therefore play significant syntactical roles. Thus, the constituents of the root node of a parse tree are NP, VP, PP, GP, S, and X. Table 1 lists examples of the six types of constituent. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 645, |
| "end": 652, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Shallow Parsing Standard", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Parsing is a fundamental technique in natural language processing, the results of which can be used to improve various natural language tasks, such as word-sense disambiguation (WSD) [3] and part-of-speech (POS) tagging [12] .", |
| "cite_spans": [ |
| { |
| "start": 183, |
| "end": 186, |
| "text": "[3]", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 220, |
| "end": 224, |
| "text": "[12]", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Maximum Entropy-based Shallow Parser", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Many natural language processing tasks, such as part-of-speech tagging, named-entity recognition, and shallow parsing, can be viewed as sequence analysis tasks. Shallow parsing identifies the non-recursive core of each phrase type in a text as a precursor to full parsing or information extraction [1, 6] . The paradigmatic shallow parsing problem is called NP chunking, which finds the non-recursive cores of noun phrases called base NPs. Ramshaw and Marcus introduced NP chunking as a machine-learning problem [14] .", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 301, |
| "text": "[1,", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 302, |
| "end": 304, |
| "text": "6]", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 512, |
| "end": 516, |
| "text": "[14]", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Maximum Entropy-based Shallow Parser", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Machine learning techniques, such as maximum entropy (ME) and conditional random fields (CRF), are quite popular for sequential tagging. We adopt ME to build a robust Chinese shallow parser.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Maximum Entropy-based Shallow Parser", |
| "sec_num": "3." |
| }, |
| { |
| "text": "In this work, we regard each word as a token, and consider a test corpus and a set of n phrase categories. Since a phrase can have more than one token, we associate two tags, Given a set of features and a training corpus, the ME estimation process produces a model in which every feature f i has a weight \u03b1 i . This allows us to compute the conditional probability as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The B-I-O Scheme of Our Shallow Parser", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u220f = i o h f i i h Z h o p ) , ( ) ( 1 ) | ( \u03b1 ,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "The B-I-O Scheme of Our Shallow Parser", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where Z(h) is a normalization factor. Intuitively, the probability is the multiplication of the weights of active features (i.e., those f i (h,o) = 1). The weight \u03b1 i is estimated by means of a procedure called Generalized Iterative Scaling (GIS) [8] , which improves the estimation of the weights at each iteration.", |
| "cite_spans": [ |
| { |
| "start": 247, |
| "end": 250, |
| "text": "[8]", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The B-I-O Scheme of Our Shallow Parser", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The ME estimation technique guarantees that, for every feature f i , the expected value of \u03b1 i will be equal to the empirical expectation of \u03b1 i in the training corpus. ME allows the designer to concentrate on finding the features that characterize the problem, while letting the ME estimation routine deal with assigning relative weights to the features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The B-I-O Scheme of Our Shallow Parser", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "After an ME model has been trained and the proper weight \u03b1 i has been assigned to each feature f i , decoding (i.e., marking up) a new piece of text becomes a simple task. First, the model tokenizes the text and preprocesses the test sentence. Then, for each token, it checks which features are active and combines the \u03b1 i of the active features according to Equation 2. Finally, a Viterbi search is run to find the highest probability path through the lattice of conditional probabilities that does not produce any invalid tag sequences. Further details of the Viterbi search can be found in [17] .", |
| "cite_spans": [ |
| { |
| "start": 593, |
| "end": 597, |
| "text": "[17]", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "By comparing models with and without noisy training data, we can determine whether our Chinese shallow parser is noisy-data-tolerant. In this section, we describe how we add noisy data to maximum entropy models and evaluate the tolerance of our system to Chinese chunking.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "4." |
| }, |
| { |
| "text": "Sinica Treebank contains more than 54,000 sentences, from which we randomly extract 30,000", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data and Features", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "for training and 10,000 for testing. The tokenized results and the corresponding part-of-speech sequences of these sentences are extracted into a feature file, and the top-level chunks of the parsing tree structure can be taken as the standard for training and evaluation. The information in the feature file is translated into machine learning features by ME model in both the training and testing phrases. The accuracy of chunking in this model is then compared with that of models containing noise to observe the difference.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data and Features", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The most important issue in noisy model generation is how to mix noisy features with correct features as smoothly as in a real parsing system. We design three methods for adding noise to generate different types of models with noisy tokenization and POS sequences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Noise Model Generation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The first two approaches are based on unknown word replacement. We find that unknown words are one of the major causes of noisy data in real world system processing, because most unknown words are proper nouns. Theoretically, we can pick a certain number of proper nouns in the selected data and substitute them with noisy data to simulate real world input. In our experiment, \"Nb\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Noise Model Generation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "and \"Nc\", which are defined as \"proper nouns\" and \"proper location nouns\" respectively in the Sinica Treebank tagging guideline [5] , are chosen as replacement targets. Words with these two target POS are regarded as replacement target strings and replaced by noisy data.", |
| "cite_spans": [ |
| { |
| "start": 128, |
| "end": 131, |
| "text": "[5]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Noise Model Generation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We adopt two types of noisy data for unknown word replacement. The first is the split character sequence of a replacement target string in a sentence. Initially, we extract the correct tokenization results and POS sequences of all data in the Sinica Treebank with \"Nb\" and \"Nc\". Then, wherever applicable, we split the replacement target string in a sentence into single Chinese characters. The corresponding POS tag of each split character is re-assigned by selecting the most frequent POS tags of these single characters in Sinica Treebank. For example, \"\u99ac\uf92d\u897f\u4e9e\" (Malaysia) would be split into \"\u99ac\", \"\uf92d\", \"\u897f\", and \"\u4e9e\", and the original POS tag \"Nca\" would be replaced by the pos tags of four single characters: \"Nab\", \"Dbab\", \"Ncda\", and \"Nca\". In this experiment, we control the amount of noisy data in models to observe the relation between the percentage of imprecise data and the chunking performance. The model generated by this approach is called a Type 1 noise model. Another approach, called the Type 2 noise model, tokenizes the replacement target with AUTOTAG, which may produce segmenting boundaries and POS tags that differ from those in Sinica Treebank. The information is then used as noisy features and replaces the target string. For instance, the replacement target string \"\u592a\u767d\uf90a \u661f\" with POS tag \"Nb\" would be tagged by AUTOTAG as \"\u592a\u767d/Nb\" and \"\uf90a\u661f/Nb\". The above noise-adding approaches are used to generate training data, as well as various kinds of noisy information in the test sets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Noise Model Generation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In addition, we adopt an automatic tool, CKIP AUTOTAG [7] , to obtain the tokenization information and POS features for generating models. This is a Chinese tokenizing tool that can deal with word segmentation in both the training and testing sets. CKIP AUTOTAG provides the POS sequences of the sentences. The tokenized sentences and POS sequences produced by AUTOTAG are used to generate feature files for ME processing.", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 57, |
| "text": "[7]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Noise Model Generation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In our experiment, we adopt the B-I-O scheme to identify the boundaries of Chinese chunks and the position of each element word in the chunks. In addition, we employ the following four standards when calculating the accuracy of Chinese shallow parsing: evaluation by token, by chunk boundary, by chunk category sequence, and by chunks. Token evaluation is based on the number of Chinese words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5." |
| }, |
| { |
| "text": "All words in the test data can be verified independently to determine if they have the correct boundaries and belong to the right chunks. Evaluation by chunk boundary only checks the boundaries of each chunk, while evaluation by chunk category sequence only checks if all the chunks in a sentence can be identified successfully and disregards the constituents. By contrast, in chunk evaluation, the basic unit is the whole chunk, and only a chunk with the right constituents and tagged with proper categories can be considered correct. We use an example to demonstrate the evaluation process. The input sentence is \"\u5c0f\u670b\u53cb \u63db\u6210 \u4f60 \uf92d \u8a66\u8a66\u770b\", which consists of five tokens; and the standard parsing result is \"\u5c0f\u670b \u53cb/NP \u63db\u6210/VC \u4f60/NP \uf92d-\u8a66\u8a66\u770b/VP\", which contains four chunks. The parsing result we obtain from the system is \"\u5c0f\u670b\u53cb/NP \u63db\u6210/VC \u4f60/NP \uf92d/Db \u8a66\u8a66\u770b/VE\", which contains five chunks. In this case, the accuracy of the chunk boundary and the chunk category are both 3/4=0.75, because the first three chunks in the sentence have the correct boundaries and phrase tags, and the last VP chunk is separated by two units. The token number in this sentence is 5 and the last two tokens have incorrect phrase category tags. Therefore, the accuracy of the token is 3/5=0.6. In chunk evaluation, three of the four chunks are identified successfully and the chunk accuracy is 3/4=0.75. We adopt these evaluation methods in all the experiment configurations in Tables 2 to 5. Tables 2 and 3 . We can observe the trends in the experiment results more intuitively. Table 5 shows the accuracy rates using the model generated by AUTOTAG-parsed data and Sinica Treebank chunking tags. Both the training and the test sets are preprocessed by AUTOTAG. This experiment is designed for open testing; thus, we can use the AUTOTAG program to tokenize any sentence and give it POS tags. However, compared to the standard model, the chunking accuracy is lower. The parsing results of the AUTOTAG-parsed model and the Type 2 noise models are shown in Figure 5 . Table 6 shows the parsing examples with Type 1 noise. The shallow parsing results of the first two sentences are correct, while those of the last two sentences are incorrect. Table 7 shows the parsing examples with Type 2 noise. The shallow parsing results of the first and the last sentences are correct, while those of the second and the third sentences are incorrect. Table 8 shows the parsing results using AUTOTAG-parsed training data and test data. The results of the first and last sentences are correct, while those of the second and the third sentences are incorrect.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1447, |
| "end": 1461, |
| "text": "Tables 2 and 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 1534, |
| "end": 1541, |
| "text": "Table 5", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 2008, |
| "end": 2016, |
| "text": "Figure 5", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 2019, |
| "end": 2026, |
| "text": "Table 6", |
| "ref_id": "TABREF9" |
| }, |
| { |
| "start": 2194, |
| "end": 2201, |
| "text": "Table 7", |
| "ref_id": "TABREF10" |
| }, |
| { |
| "start": 2390, |
| "end": 2397, |
| "text": "Table 8", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5." |
| }, |
| { |
| "text": "We replace the original word segmentation and POS tags of all the sentences with AUTOTAG-parsed word segmentation and POS tags. The word segmentation of the last sentence provided by AUTOTAG is incorrect; however, the chunking result is correct. The experiment results show the noise-tolerance of our Chinese shallow parser with two different kinds of noise from unknown proper nouns. The system's performance is only degraded slightly when noisy data is added. Most sentences, such as \"\uf9d1\u5341\uf98e\u4ee3\u7684\u53f0\u7063\u662f\u600e\u6a23\u7684\u5f62\u8c8c\uff02 in which \"\u53f0 \u7063\uff02 is split into two characters and assigned with incorrect POS tags, can still be identified. However, the token accuracy is a little lower than the chunk accuracy, which indicates that our system needs to be improved for chunking longer phrases. In contrast, the chunking accuracy obviously decreases if models fully generated by AUTOTAG-parsed data are used. The difference between the AUTOTAG and Sinica Treebank tag sets probably causes the accuracy to decrease. Furthermore, this suggests that, while the shallow parsing system can deal with unknown nouns, it has difficulty dealing with other kinds of noisy data. For example, data preprocessing errors, such as, incorrect tokenization or wrong tagging in other POS categories, affect the performance of shallow parsing substantially We can not comment on which part-of-speech tags are the major factors in Chinese chunking without conducting additional experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance on Noisy Data", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "For the first application of our shallow parser, we collect some news articles as the test set. The articles did not have standard word segmentation, POS tagging, and parsing results; therefore, we cannot report on the accuracy. However, we find the results interesting. Some examples are given in Table 9 . The left column shows the original sentences tokenized and tagged with POS tags by AUTOTAG. The right column shows the shallow parsing results using our system.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 298, |
| "end": 305, |
| "text": "Table 9", |
| "ref_id": "TABREF12" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Use of Our Shallow Parser on News Articles", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "One interesting point is that the shallow parser tends to group named entities into a phrase.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Use of Our Shallow Parser on News Articles", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Therefore, the shallow parsing result can be used as a feature for boundary detection in named entity recognition (NER). In sentence 1, \"\u4e2d\u92fc\u516c\u53f8\" is grouped as one phrase, and in sentence 9, \"\u4e2d\u92fc\u516c\u53f8 88 \uf98e\u76c8\u9918\" is grouped as one phrase, without first recognizing that \"\u4e2d\u92fc\u516c\u53f8\" is an entity by NER.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Use of Our Shallow Parser on News Articles", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Another example, in sentence 2 is that \"\u76ca\u83ef\u5728\u82b1\uf999\u7684\u4e09\u68df\u5927\uf94c\" is grouped as one phrase, without first recognizing that \"\u76ca\u83ef\" is a company name. 2 \u76ca/\u83ef/\u5728/\u82b1\uf999/\u7684/\u4e09\u68df/\u5927\uf94c/\u6709/\u4e8c/\u68df/\u662f/\u4e03\u5c64/\u5efa\u7bc9 ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Use of Our Shallow Parser on News Articles", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "[", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Use of Our Shallow Parser on News Articles", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In this paper, we propose a Chinese shallow parser that can chunk Chinese sentences into five chunk types. We test the noise tolerance of the shallow parser and found that the accuracy of data with simulated unknown words only decreases slightly in chunk parsing. We also test our Chinese shallow", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Works", |
| "sec_num": "6." |
| }, |
| { |
| "text": "parser on an open corpus, and found that it yields interesting chunking results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Works", |
| "sec_num": "6." |
| }, |
| { |
| "text": "Tolerance of unknown words is an essential characteristic of a Chinese shallow parser. In this paper, we demonstrate our parser's robustness in handling noisy data from proper nouns. However, we could not verify the robustness of chunking noisy data from other kinds of POS. Thus, adopting other POS systems, such as the Penn Chinese Treebank tagset, for Chinese shallow parsing could prove both interesting and useful. In the future, we will improve our model by adding more types of noise, such as random noise, filled noise, and repeated noise proposed by Osborne [13] . In addition to Sinica", |
| "cite_spans": [ |
| { |
| "start": 567, |
| "end": 571, |
| "text": "[13]", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Works", |
| "sec_num": "6." |
| }, |
| { |
| "text": "Treebank, we will extend our training corpus by incorporating other corpora, such as Penn's Chinese", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Works", |
| "sec_num": "6." |
| }, |
| { |
| "text": "Treebank.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Works", |
| "sec_num": "6." |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This research was supported in part by the National Science Council under GRANT NSC94-2752-E-001-001-PAE.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Parsing by Chunks", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "P" |
| ], |
| "last": "Abney", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abney, S.P. Parsing by Chunks. in Berwick, R.C., Abney, S.P. and Tenny, C. eds.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Principle-Based Parsing: Computation and Psycholinguistics", |
| "authors": [], |
| "year": 1991, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "257--278", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Principle-Based Parsing: Computation and Psycholinguistics, Kluwer, Dordrecht, 1991, 257-278.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A maximum entropy approach to natural language processing", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Berger", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "A" |
| ], |
| "last": "Della Pietra", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [ |
| "J" |
| ], |
| "last": "Della Pietra", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Computational Linguistics", |
| "volume": "22", |
| "issue": "", |
| "pages": "39--71", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Berger, A., Della Pietra, S.A. and Della Pietra, V.J. A maximum entropy approach to natural language processing. Computational Linguistics, 22. 39-71.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A Statistical Model for Parsing and Word-Sense Disambiguation", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "M" |
| ], |
| "last": "Bikel", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "the Joint SIGDAT Conference on Empirical Methods in Natural Language Processing and Very Large Corpora", |
| "volume": "", |
| "issue": "", |
| "pages": "155--168", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bikel, D.M., A Statistical Model for Parsing and Word-Sense Disambiguation. in the Joint SIGDAT Conference on Empirical Methods in Natural Language Processing and Very Large Corpora, (Hong Kong, 2000), 155-168.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Computational Linguistics and Chinese Language Processing", |
| "authors": [ |
| { |
| "first": "F.-Y", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "P.-F", |
| "middle": [], |
| "last": "Tsai", |
| "suffix": "" |
| }, |
| { |
| "first": "K.-J", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "-R. \u4e2d \u6587 \uf906 \u7d50 \u69cb \u6a39 \u8cc7 \uf9be \u5eab \u7684 \u69cb \u5efa", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "4", |
| "issue": "", |
| "pages": "87--104", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen, F.-Y., Tsai, P.-F., Chen, K.-J. and Huang, C.-R. \u4e2d \u6587 \uf906 \u7d50 \u69cb \u6a39 \u8cc7 \uf9be \u5eab \u7684 \u69cb \u5efa . Computational Linguistics and Chinese Language Processing, 4 (2). 87-104.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Sinica Treebank: Design Criteria, Representational Issues and Implementation", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sinica Treebank: Design Criteria, Representational Issues and Implementation. in Abeille, A. ed.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Treebanks Building and Using Parsed Corpora. Language and Speech series", |
| "authors": [], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "231--248", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Treebanks Building and Using Parsed Corpora. Language and Speech series, Kluwer, Dordrecht, 2003, 231-248.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A Stochastic Parts Program and Noun Phrase Parser for Unrestricted Text", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [ |
| "W" |
| ], |
| "last": "Church", |
| "suffix": "" |
| } |
| ], |
| "year": 1988, |
| "venue": "the Second Conference on Applied Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "136--143", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Church, K.W., A Stochastic Parts Program and Noun Phrase Parser for Unrestricted Text. in the Second Conference on Applied Natural Language Processing, (1988), 136-143.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Generalized iterative scaling for log-linear models", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "N" |
| ], |
| "last": "Darroch", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Ratcliff", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Annals of Mathematicl Statistics", |
| "volume": "43", |
| "issue": "", |
| "pages": "1470--1480", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Darroch, J.N. and Ratcliff, D. Generalized iterative scaling for log-linear models. Annals of Mathematicl Statistics, 43. 1470-1480.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Building a Chinese Shallow Parsed TreeBank for Collocation Extraction", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "CICLing", |
| "volume": "", |
| "issue": "", |
| "pages": "402--405", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li, B., Lu, Q. and Li, Y., Building a Chinese Shallow Parsed TreeBank for Collocation Extraction. in CICLing, (2003), 402-405.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Machine Learning Approaches for Chinese Shallow Parsers", |
| "authors": [ |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "R.-F", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "International Conference On Machine Learning And Cybernetics", |
| "volume": "", |
| "issue": "", |
| "pages": "2309--2314", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lu, Q., Zhou, J. and Xu, R.-F., Machine Learning Approaches for Chinese Shallow Parsers. in International Conference On Machine Learning And Cybernetics, (Xi'an, 2003), 2309-2314.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A Bottom-up Merging Algorithm for Chinese Unknown Word Extraction", |
| "authors": [ |
| { |
| "first": "W.-Y", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "K.-J", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "the Second SIGHAN Workshop on Chinese Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "31--38", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ma, W.-Y. and Chen, K.-J., A Bottom-up Merging Algorithm for Chinese Unknown Word Extraction. in the Second SIGHAN Workshop on Chinese Language Processing, (2003), 31-38.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Annotating topological fields and chunks -and revising POS tags at the same time", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [ |
| "H" |
| ], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Ule", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Nineteenth International Conference on Computational Linguistics (COLING 2002)", |
| "volume": "", |
| "issue": "", |
| "pages": "695--701", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M\u00fcller, F.H. and Ule, T., Annotating topological fields and chunks -and revising POS tags at the same time. in Nineteenth International Conference on Computational Linguistics (COLING 2002), (Taipei, Taiwan, 2002), ACM, 695-701.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Shallow Parsing using Noisy and Non-Stationary Training Material", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Osborne", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "2", |
| "issue": "", |
| "pages": "695--719", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Osborne, M. Shallow Parsing using Noisy and Non-Stationary Training Material. Journal of Machine Learning Research, 2. 695-719.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Text chunking using transformation-based learning", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "A" |
| ], |
| "last": "Ramshaw", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "P" |
| ], |
| "last": "Marcus", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "The ACL Third Workshop on Very Large Corpora", |
| "volume": "", |
| "issue": "", |
| "pages": "82--94", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramshaw, L.A. and Marcus, M.P., Text chunking using transformation-based learning. in The ACL Third Workshop on Very Large Corpora, (1995), 82-94.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Applying Conditional Random Fields to Chinese Shallow Parsing", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "CICLing", |
| "volume": "", |
| "issue": "", |
| "pages": "167--176", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tan, Y., Yao, T., Chen, Q. and Zhu, J., Applying Conditional Random Fields to Chinese Shallow Parsing. in CICLing, (2005), 167-176.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Chinese Chunk Identification Using SVMs plus Sigmoid", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "The First International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "527--536", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tan, Y., Yao, T., Chen, Q. and Zhu, J., Chinese Chunk Identification Using SVMs plus Sigmoid. in The First International Joint Conference on Natural Language Processing (IJCNLP-04), (2004), 527-536.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Error Bounds for Convolutional Codes and an Asymptotically Optimum Decoding Algorithm", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "J" |
| ], |
| "last": "Viterbi", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "IEEE Transactions on Information Theory", |
| "volume": "", |
| "issue": "", |
| "pages": "260--269", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Viterbi, A.J. Error Bounds for Convolutional Codes and an Asymptotically Optimum Decoding Algorithm. IEEE Transactions on Information Theory, IT. 260-269.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Parsing Chinese with an almost-context-free grammar", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "EMNLP-96, Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "XIA, X. and WU, D., Parsing Chinese with an almost-context-free grammar. in EMNLP-96, Conference on Empirical Methods in Natural Language Processing, (Philadelphia, 1996).", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "The Construction of A Chinese Shallow Treebank", |
| "authors": [ |
| { |
| "first": "R.-F", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "the Third SIGHAN Workshop on Chinese Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "94--101", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xu, R.-F., Lu, Q., Li, Y. and Li, W., The Construction of A Chinese Shallow Treebank. in the Third SIGHAN Workshop on Chinese Language Processing, (2004), 94-101.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Extract Chinese Chunk Candidates from Large Corpora", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "To", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "20th International Conference on Computer Processing of Oriental Languages (ICCPOL03)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, L.roach to Extract Chinese Chunk Candidates from Large Corpora. in 20th International Conference on Computer Processing of Oriental Languages (ICCPOL03), (ShenYang, P.R.China, 2003).", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Statistics Based Hybrid Approach to Chinese Base Phrase Identification", |
| "authors": [ |
| { |
| "first": "T.-J", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "M.-Y", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "J.-M", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Second Chinese Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "73--77", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhao, T.-J., Yang, M.-Y., Liu, F., Yao, J.-M. and Yu, H., Statistics Based Hybrid Approach to Chinese Base Phrase Identification. in Second Chinese Language Processing Workshop, (Hong Kong, China, 2001), 73-77.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A block-based robust dependency parser for unrestricted Chinese text", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "The second Chinese Language Processing Workshop attached to ACL2000", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhou, M., A block-based robust dependency parser for unrestricted Chinese text. in The second Chinese Language Processing Workshop attached to ACL2000, (Hong Kong, 2000).", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "The features we adopted are: words, adjacent characters, prefixes of words(1 and 2characters), suffixes of words (1 and 2 characters), word length, POS of words, adjacent POS tags, and the word's location in the chunk it belongs to. To analyze the performance of our shallow parser under noisy conditions, we build a standard model and various noisy models. Training data consisting of the tokenization and POS information derived from the manually annotated Sinica Treebank is used as the standard model in our experiments." |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Evaluation of the chunking category in different experiment configurations" |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Evaluation of tokens in different experiment configurations" |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Evaluation of chunks in different experiment configurations" |
| }, |
| "FIGREF4": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Comparison of various experiment configurations using tokenized string noisy data (the Type 2 noise model) and the AUTOTAG-parsed model In Tables 6, 7, and 8, we give examples of the correct and incorrect shallow parsing results of four sentences. In each table, the left column contains the original sentences tokenized and tagged with POS tags; the center column shows the standard chunking result from Sinica Treebank; and the right column shows the shallow parsing result of our system." |
| }, |
| "FIGREF5": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "/\u5728/\u53f0/\u7063/\u548c/\u4e2d/\u570b/\u5927/\uf9d3/\u5c0f \uf96f/\u662f/\u89e3\u653e/\u7684/\u904e\u7a0b [Nab/Nac/P21/Nca/Nab/Caa/Ng/Ncb/ VH13/Nab/Nac/V_11/VC2/DE/Nac] \uf981\u6027\u5f62\u8c61/NP \u5728\u53f0\u7063\u548c \u4e2d\u570b\u5927\uf9d3\u5c0f\uf96f/PP \u662f/V_ \u89e3\u653e\u7684\u904e\u7a0b/NP \uf981\u6027\u5f62\u8c61/NP \u5728\u53f0\u7063\u548c\u4e2d \u570b\u5927\uf9d3\u5c0f\uf96f/PP \u662f/V_ \u89e3 \u653e\u7684\u904e\u7a0b/NP \u8207/\u4e2d/\u83ef/\u53ca/\u65e5/\u672c/\u968a/\u5728/\u4f2f\u4ef2/\u4e4b\u9593 [P35/Ng/Nca/Caa/Nca/Nes/Nab/VC1/ Nhac/Ng] \u8207\u4e2d\u83ef\u53ca\u65e5\u672c\u968a/PP \u5728 /VC \u4f2f\u4ef2\u4e4b\u9593/GP \u8207 /P3 \u4e2d \u83ef \u53ca \u65e5 \u672c \u968a /NP \u5728/VC \u4f2f\u4ef2\u4e4b\u9593/GP \u9996\u5148/\u7fa9\u8ce3/\u7684/\u662f/\u9ed1/\u5c07/\u8ecd/\u53f2/\u6771/\u7684/\u624b \u5957 [Cbbb/VC31/DE/V_11/VH11/Dd/Nab/ Nad/Ncda/DE/Nab] \u9996 \u5148 \u7fa9 \u8ce3 \u7684 /NP \u662f /V_ \u9ed1\u5c07\u8ecd\u53f2\u6771\u7684\u624b\u5957/NP \u9996\u5148/Cb \u7fa9\u8ce3\u7684/NP \u662f/V_ \u9ed1\u5c07\u8ecd\u53f2\u6771\u7684\u624b\u5957/NP * \u5b78\u85dd/\u80a1\u9577/\u738b/\u6587/\u661f/\u7ad9\u8d77\uf92d/\uf96f [Nad/Nab/Nbc/Nab/Nab/VA11/VE2]" |
| }, |
| "FIGREF6": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "VJ/ Nc/P/Nc/DE/Nb/Na/V_2/Neu/Nf/SHI/Na/Na] \u76ca\u83ef\u5728\u82b1\uf999\u7684\u4e09\u68df\u5927\uf94c/NP \u6709 \u4e8c \u68df/NP \u662f \u4e03\u5c64\u5efa\u7bc9/NP 3 \u5927\uf9d3/\u4ecd/\u6709/\u5ee3\u95ca/\u767c\u5c55/\u7a7a\u9593 [Nc/D/V_2/VH/VC/Na] \u5927\uf9d3/NP \u4ecd \u6709 \u5ee3\u95ca\u767c\u5c55\u7a7a\u9593/NP4 \u5149 / \u662f / \u4e2d \u5171 / \u570b \u5bb6 / \u4e3b \u5e2d / \u6c5f \u6fa4 \u6c11 / \u5c31 / \u51fa / \u8a2a \u4e94 \u6b21 [Da/SHI/Nb/Na/Na/Nb/D/VC/Na] \u5149/NP \u662f \u4e2d\u5171\u570b\u5bb6\u4e3b\u5e2d\u6c5f\u6fa4\u6c11/NP \u5c31\u51fa\u8a2a\u4e94\u6b21/PP 5 \u8a31 \u591a / \u5730 \u5340 / \u90fd / \u51fa \u73fe / \u65b0 \u820a / \u5171 \u5b58 / \u7684 / \u666f \u89c0 [Neqa/Nc/D/VH/Na/VH/DE/Na] \u8a31\u591a\u5730\u5340/NP \u90fd \u51fa\u73fe \u65b0\u820a\u5171\u5b58\u7684 \u666f\u89c0/NP 6 \u904e\u53bb/\u4e00\uf98e/\u662f/\uf978\u5cb8/\u95dc\u4fc2/\u6bd4\u8f03/\u56f0\u96e3/\u3001/\u4e14/\u5e0c\u671b/\uf918\u7a7a/ \u7684/\u4e00\uf98e [Nd/Nd/SHI/Nc/Na/Dfa/VH/PAUSECATEGORY/Cbb /VK/VH/DE/Nd] \u904e\u53bb\u4e00\uf98e/NP \u662f \uf978\u5cb8\u95dc\u4fc2\u6bd4\u8f03\u56f0 \u96e3\u3001\u4e14\u5e0c\u671b\uf918\u7a7a\u7684\u4e00\uf98e/NP 7 \u6046\u751f/\u6307\uf969/\u5275\u4e0b/\uf98c\u53f2/\u65b0\u9ad8 [Nb/Na/VC/Na/VH] \u6046\u751f \u6307\uf969/NP \u5275\u4e0b \uf98c\u53f2\u65b0\u9ad8/NP 8 \u5c07 / \u8cc7 \u672c \u4e3b \u7fa9 / \u53ca / \u6295 \u6a5f / \u6c23 \u606f / \u5e36 \u5165 / \u5927 \uf9d3 / \u5167\u90e8 [P/Na/Caa/VH /Na/VCL/Nc/Ncd] \u5c07\u8cc7\u672c\u4e3b\u7fa9\u53ca\u6295\u6a5f\u6c23\u606f/PP \u5e36\u5165 \u5927\uf9d3\u5167\u90e8/NP 9 \u4e2d\u92fc/\u516c\u53f8/88/\uf98e\u76c8\u9918/\u53ef\u671b/\u9054\u5230/140 \u5104 /\u5143/\u5de6\u53f3 [Nc/Nc/Neu/Na/VK/VJ/Neu/Nf/Ng] \u4e2d\u92fc\u516c\u53f8 88 \uf98e\u76c8\u9918/NP \u53ef\u671b\u9054\u5230 140 \u5104\u5143/VP \u5de6\u53f3 10 \u4f01\u696d\u754c/\u5df2/\u958b\u59cb/\u5c3e\u7259/\u805a\u9910 [Nc/D/VL/Nd/VA] \u4f01\u696d\u754c/NP \u5df2 \u958b\u59cb \u5c3e\u7259\u805a\u9910/VP 11 \u6295\u8cc7/\u4eba/\u975c\u5019/\u7f8e\u570b/\uf997\u90a6/\u6e96\u5099/\uf9e4\u4e8b\u6703/(/Fed/)/21 \u65e5/ \u7684/\uf9dd\uf961/\u6c7a\u7b56 [Nc/Na/VJ/Nc/Na/VC/Na/PARENTHESISCATEGOR Y/FW /PARENTHESISCATEGORY/Nd/DE/Na/Na] \u6295\u8cc7\u4eba/NP \u975c\u5019 \u7f8e\u570b\uf997\u90a6\u6e96\u5099\uf9e4 \u4e8b\u6703(Fed)21 \u65e5\u7684\uf9dd\uf961\u6c7a\u7b56/NP 12 \u592e \ufa08 / \u7e3d \u88c1 / \u53ca / \uf9e4 \u76e3 \u4e8b / \u90fd / \u6709 / \u4e00 \u5b9a / \u7684 / \u4efb \u671f [Nc/Na/Caa/Na/D/V_2/A/DE/Na] \u592e\ufa08\u7e3d\u88c1\u53ca\uf9e4\u76e3\u4e8b/NP \u90fd \u6709 \u4e00\u5b9a \u7684\u4efb\u671f/NP" |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td>Chunk Tag</td><td>Description</td><td>Example</td></tr><tr><td>NP</td><td>Noun Phrase</td><td>\u524d\u5341\u540d / \u7684 / \u9078\u624b [DM / DE / Nab]</td></tr><tr><td>VP</td><td>Verb Phrase</td><td>\u50b3\u905e / \u5340\u904b / \u8056\u706b [VD1 / Nad / Nac]</td></tr><tr><td>PP</td><td>Preposition Phrase</td><td>\u5728 / \uf983\u5ba2 / \u53e3 / \u4e2d [P21 / Nab / Nab / Ncda]</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "num": null |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"2\">x: x_begin and</td></tr><tr><td colspan=\"20\">x_continue, with each category. In addition, we use the tag others to indicate that a token is not part of</td></tr><tr><td colspan=\"20\">a phrase. The shallow parsing problem can then be redefined as a problem of assigning one of 2n + 1</td></tr><tr><td colspan=\"20\">tags to each token. This is called the B-I-O scheme. There are 5 named entity categories and 11 tags:</td></tr><tr><td colspan=\"20\">NP_begin, NP_continue, VP_begin, VP_continue, PP_begin, PP_continue, GP_begin, GP_continue,</td></tr><tr><td colspan=\"7\">S_begin, S_continue, and X(others).</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"20\">3.2 The computation of p(o|h) in ME depends on a set of binary-valued features, which is helpful in</td></tr><tr><td colspan=\"20\">making a prediction about the outcome. For instance, one of our features is as follows: when the</td></tr><tr><td colspan=\"20\">current token is a verb, it is likely to be the leading character of a verb phrase. More formally, we can</td></tr><tr><td colspan=\"7\">represent this feature as</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>f</td><td>(</td><td>, h</td><td>o</td><td>)</td><td>=</td><td>\u23a9 \u23a8 \u23a7 0 : : if else Current 1</td><td>-</td><td>token</td><td>-</td><td>verb(h)</td><td>=</td><td>true</td><td>and</td><td>o</td><td>=</td><td>VP</td><td>_</td><td>begin</td><td>(1)</td></tr><tr><td colspan=\"20\">Here, Current-token-verb(h) is a binary function that returns the value true if the current token of the</td></tr><tr><td colspan=\"5\">history h is a verb.</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "a flexible statistical model that assigns an outcome to each token based on its history and features[2]. The outcome space is comprised of the tags for an ME formulation. ME computes the probability p(o|h) for any o from the space of all possible outcomes, O, and for every h from the space of all possible histories, H. A history is composed of all the conditioning data that enables one to assign probabilities to the space of outcomes. In shallow parsing, history can be viewed as all the information derived from the test corpus relevant to the current token.", |
| "num": null |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td/><td>Boundary</td><td>Category</td><td>Tokens</td><td>Chunks</td></tr><tr><td>0 (%)</td><td>84.83</td><td>70.10</td><td>69.14</td><td>70.47</td></tr><tr><td>10 (%)</td><td>84.74</td><td>69.92</td><td>69.04</td><td>70.30</td></tr><tr><td>20 (%)</td><td>84.80</td><td>69.94</td><td>69.03</td><td>70.26</td></tr><tr><td>30 (%)</td><td>84.77</td><td>69.88</td><td>69.10</td><td>70.20</td></tr><tr><td>40 (%)</td><td>84.70</td><td>69.77</td><td>68.97</td><td>70.13</td></tr><tr><td>50 (%)</td><td>84.64</td><td>69.65</td><td>69.02</td><td>70.00</td></tr><tr><td>60 (%)</td><td>84.56</td><td>69.57</td><td>68.78</td><td>69.82</td></tr><tr><td>70 (%)</td><td>84.42</td><td>69.39</td><td>68.76</td><td>69.59</td></tr><tr><td>80 (%)</td><td>84.53</td><td>69.67</td><td>68.99</td><td>69.77</td></tr><tr><td>90 (%)</td><td>84.38</td><td>69.44</td><td>68.58</td><td>69.72</td></tr><tr><td>100 (%)</td><td>84.26</td><td>69.51</td><td>68.57</td><td>69.75</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "num": null |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table", |
| "text": "shows the accuracy rates using the Type 1 model with different scales of noisy data for chunking test data with single character noise (Type 1). It is quite interesting that the curve is not monotonically increasing or decreasing. This indicates that the accuracy in this series decreases until the percentage of noise reaches 60%, and then it increases.Figures 1 to 4show the differences between the clean test data and the noisy test data in", |
| "num": null |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td/><td colspan=\"2\">Boundary</td><td/><td/><td>Category</td><td/><td/><td>Tokens</td><td/><td colspan=\"2\">Chunks</td></tr><tr><td/><td>0 (%)</td><td>83.73</td><td/><td/><td>69.09</td><td/><td/><td>65.16</td><td/><td>66.51</td></tr><tr><td colspan=\"2\">10 (%)</td><td>83.65</td><td/><td/><td>69.00</td><td/><td/><td>65.33</td><td/><td>66.36</td></tr><tr><td colspan=\"2\">20 (%)</td><td>83.72</td><td/><td/><td>69.11</td><td/><td/><td>65.34</td><td/><td>66.30</td></tr><tr><td colspan=\"2\">30 (%)</td><td>83.69</td><td/><td/><td>69.20</td><td/><td/><td>65.37</td><td/><td>66.27</td></tr><tr><td colspan=\"2\">40 (%)</td><td>83.62</td><td/><td/><td>69.14</td><td/><td/><td>65.42</td><td/><td>66.25</td></tr><tr><td colspan=\"2\">50 (%)</td><td>83.58</td><td/><td/><td>69.05</td><td/><td/><td>65.52</td><td/><td>66.13</td></tr><tr><td colspan=\"2\">60 (%)</td><td>83.57</td><td/><td/><td>69.07</td><td/><td/><td>65.36</td><td/><td>66.00</td></tr><tr><td colspan=\"2\">70 (%)</td><td>83.52</td><td/><td/><td>69.24</td><td/><td/><td>65.70</td><td/><td>66.07</td></tr><tr><td colspan=\"2\">80 (%)</td><td>83.63</td><td/><td/><td>69.46</td><td/><td/><td>65.83</td><td/><td>66.25</td></tr><tr><td colspan=\"2\">90 (%)</td><td>83.65</td><td/><td/><td>69.49</td><td/><td/><td>65.69</td><td/><td>69.30</td></tr><tr><td colspan=\"2\">100 (%)</td><td>83.77</td><td/><td/><td>69.67</td><td/><td/><td>65.85</td><td/><td>66.42</td></tr><tr><td/><td/><td/><td/><td colspan=\"4\">Evaluation on Boundary</td><td/><td/><td/></tr><tr><td/><td>84.9</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td>84.7</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>Accuracy (%)</td><td>83.9 84.1 84.3 84.5</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td>83.7</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td>83.5</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td>0</td><td>10</td><td>20</td><td>30</td><td>40</td><td>50</td><td>60</td><td>70</td><td>80</td><td>90</td><td>100</td></tr><tr><td/><td/><td/><td/><td colspan=\"5\">Noisy Data Percentage in Training Data (%)</td><td/><td/></tr><tr><td/><td colspan=\"2\">Clean Test Data</td><td colspan=\"4\">Test Data With Character Noise</td><td/><td/><td/><td/></tr><tr><td/><td>Figure 1.</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "num": null |
| }, |
| "TABREF5": { |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "num": null |
| }, |
| "TABREF6": { |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table", |
| "text": "also shows that noisy training data yields better accuracy for both clean and noisy test data, although the difference is quite small.", |
| "num": null |
| }, |
| "TABREF7": { |
| "content": "<table><tr><td/><td>Boundary</td><td>Category</td><td>Tokens</td><td>Chunks</td></tr><tr><td>C-C</td><td>84.83</td><td>70.10</td><td>69.14</td><td>70.47</td></tr><tr><td>C-N</td><td>84.84</td><td>70.09</td><td>69.04</td><td>70.37</td></tr><tr><td>N-C</td><td>84.89</td><td>70.13</td><td>69.15</td><td>70.51</td></tr><tr><td>N-N</td><td>84.90</td><td>70.11</td><td>69.02</td><td>70.38</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "num": null |
| }, |
| "TABREF8": { |
| "content": "<table><tr><td/><td>Boundary</td><td>Category</td><td>Tokens</td><td>Chunks</td></tr><tr><td>Fully AUTOTAG</td><td>81.42</td><td>64.81</td><td>61.80</td><td>61.30</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "num": null |
| }, |
| "TABREF9": { |
| "content": "<table><tr><td>Sentence and POS sequences with Type</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "num": null |
| }, |
| "TABREF10": { |
| "content": "<table><tr><td>Sentence and POS sequences with Type</td><td>Chunking standard from</td><td>Chunking results of our</td></tr><tr><td>2 noise</td><td>Sinica Treebank</td><td>system</td></tr><tr><td>\uf981\u6027/\u5f62\u8c61/\u5728/\u53f0\u7063/\u548c/\u4e2d\u570b/\u5927\uf9d3/\u5c0f\uf96f</td><td>\uf981\u6027\u5f62\u8c61/NP \u5728\u53f0\u7063\u548c</td><td>\uf981\u6027\u5f62\u8c61/NP \u5728\u53f0\u7063\u548c\u4e2d</td></tr><tr><td>/\u662f/\u89e3\u653e/\u7684/\u904e\u7a0b</td><td>\u4e2d\u570b\u5927\uf9d3\u5c0f\uf96f/PP \u662f/V_</td><td>\u570b\u5927\uf9d3\u5c0f\uf96f/PP \u662f/V_ \u89e3</td></tr><tr><td>[Nab/Nac/P21/Nca/Caa/Nc/Nc/Nac/V_</td><td>\u89e3\u653e\u7684\u904e\u7a0b/NP</td><td>\u653e\u7684\u904e\u7a0b/NP</td></tr><tr><td>11/VC2/DE/Nac]</td><td/><td/></tr><tr><td>\u8207/\u4e2d\u83ef/\u53ca/\u65e5\u672c/\u968a/\u5728/\u4f2f\u4ef2/\u4e4b\u9593</td><td>\u8207\u4e2d\u83ef\u53ca\u65e5\u672c\u968a/PP \u5728</td><td>\u8207 /P3 \u4e2d \u83ef \u53ca \u65e5 \u672c \u968a /NP</td></tr><tr><td>[P35/Nba/Caa/Nc/Na/VC1/Nhac/Ng]</td><td>/VC \u4f2f\u4ef2\u4e4b\u9593/GP</td><td>\u5728/VC \u4f2f\u4ef2\u4e4b\u9593/GP *</td></tr><tr><td>\u9996\u5148/\u7fa9\u8ce3/\u7684/\u662f/\u9ed1/\u5c07\u8ecd/\u53f2\u6771/\u7684/\u624b</td><td>\u9996 \u5148 \u7fa9 \u8ce3 \u7684 /NP \u662f /V_</td><td>\u9996\u5148/Cb \u7fa9\u8ce3\u7684/NP \u662f/V_</td></tr><tr><td>\u5957</td><td>\u9ed1\u5c07\u8ecd\u53f2\u6771\u7684\u624b\u5957/NP</td><td>\u9ed1\u5c07\u8ecd\u53f2\u6771\u7684\u624b\u5957/NP *</td></tr><tr><td>[Cbbb/VC31/DE/V_11/VH/Na/Nba/D</td><td/><td/></tr><tr><td>E/Nab]</td><td/><td/></tr><tr><td>\u5b78\u85dd/\u80a1\u9577/\u738b/\u6587\u661f/\u7ad9\u8d77\uf92d/\uf96f</td><td>\u5b78\u85dd\u80a1\u9577\u738b\u6587\u661f/NP \u7ad9</td><td>\u5b78\u85dd\u80a1\u9577\u738b\u6587\u661f/NP \u7ad9\u8d77</td></tr><tr><td>[Nad/Nab/Nb/Nb/VA11/VE2]</td><td>\u8d77\uf92d/VA \uf96f/VP</td><td>\uf92d/VA \uf96f/VP</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "num": null |
| }, |
| "TABREF11": { |
| "content": "<table><tr><td>AUTOTAG-parsed Sentence and POS</td><td>Chunking standard from</td><td>Chunking results of our</td></tr><tr><td>sequences</td><td>Sinica Treebank</td><td>system</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "num": null |
| }, |
| "TABREF12": { |
| "content": "<table><tr><td/><td>\u4e2d\u92fc\u516c\u53f8/NP \u662f \u53f0\u7063\u92fc\u9435\u696d\uf9c4\u982d</td></tr><tr><td>[Nc/Nc/SHI/Nc/Na/Na]</td><td>/NP</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "text": "Tokenization and POS of SentencesShallow Parsing Result1 \u4e2d \u92fc / \u516c \u53f8 / \u662f / \u53f0 \u7063 / \u92fc \u9435 \u696d / \uf9c4 \u982d", |
| "num": null |
| } |
| } |
| } |
| } |