ACL-OCL / Base_JSON /prefixY /json /Y09 /Y09-2003.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "Y09-2003",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T13:42:52.820423Z"
},
"title": "Incorporate Web Search Technology to Solve Out-of-Vocabulary Words in Chinese Word Segmentation",
"authors": [
{
"first": "Wei",
"middle": [],
"last": "Qiao",
"suffix": "",
"affiliation": {
"laboratory": "State Key Laboratory of Intelligent Technology and Systems",
"institution": "Tsinghua University",
"location": {
"postCode": "100084",
"settlement": "Beijing",
"country": "China"
}
},
"email": ""
},
{
"first": "Maosong",
"middle": [],
"last": "Sun",
"suffix": "",
"affiliation": {
"laboratory": "State Key Laboratory of Intelligent Technology and Systems",
"institution": "Tsinghua University",
"location": {
"postCode": "100084",
"settlement": "Beijing",
"country": "China"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Chinese word segmentation (CWS) is the fundamental technology for many NLPrelated applications. It is reported that more than 60% of segmentation errors is caused by the out-of-vocabulary (OOV) words. Recent studies in CWS show that, statistical machine learning method is, to some extent, effective on solving OOV words. But labeled data is limited in size and unbalanced in content which makes it impossible to obtain all the required knowledge to recognize OOV words. In this paper, large scaled web data is incorporated as knowledge supplement. A framework which combines using web search technology and machine learning method is proposed. For each sentence, basic segmentation is performed using linear-chain Conditional Random Fields (CRF) model. Substrings which CRF model gives low confidence decisions are extracted and sent to search engine to perform web search based word segmentation. Final decision is made by considering both CRF model based segmentation result and that of web search based result. Evaluations are conducted on SIGHAN Bakeoff 2005 and 2006 datasets, showing the effectiveness of the proposed framework on dealing with OOV words.",
"pdf_parse": {
"paper_id": "Y09-2003",
"_pdf_hash": "",
"abstract": [
{
"text": "Chinese word segmentation (CWS) is the fundamental technology for many NLPrelated applications. It is reported that more than 60% of segmentation errors is caused by the out-of-vocabulary (OOV) words. Recent studies in CWS show that, statistical machine learning method is, to some extent, effective on solving OOV words. But labeled data is limited in size and unbalanced in content which makes it impossible to obtain all the required knowledge to recognize OOV words. In this paper, large scaled web data is incorporated as knowledge supplement. A framework which combines using web search technology and machine learning method is proposed. For each sentence, basic segmentation is performed using linear-chain Conditional Random Fields (CRF) model. Substrings which CRF model gives low confidence decisions are extracted and sent to search engine to perform web search based word segmentation. Final decision is made by considering both CRF model based segmentation result and that of web search based result. Evaluations are conducted on SIGHAN Bakeoff 2005 and 2006 datasets, showing the effectiveness of the proposed framework on dealing with OOV words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Chinese word segmentation plays an important role in many Chinese language processing tasks. In the past decade it has drawn a large body of research in Chinese language processing community. A variety of methods have been exploited ranging from rule-based (Palmer, 1997; Cheng et al., 1999) to statistics-based (Sproat et al., 1996) , word-based (Sun et al., 1998) to character-based (Xue, 2003) , supervised learning-based (Peng et al., 2004; Low et al., 2005) to unsupervised learning-based (Goldwater et al., 2006; Zhao and Kit, 2008) , as well as their hybrid (Gao et al., 2005) . It is reported in SIGHAN Bakeoff-2005 (Emerson, 2005) and SIGHAN Bakeoff-2006 (Levow, 2006 that the highest F1-measure achieved on open tracks is 97.9% while the OOV recall rate is only 84%. This performance is achieved on the test sets of which OOV rates only ranging from 2% to 8%. When facing Chinese running text with much higher OOV rate, the performance will drop dramatically. It is reported that performance loss caused by out-of-vocabulary (OOV) words is at least five times greater than that of segmentation ambiguities (Huang and Zhao, 2007) . So, OOV problem is the main factor which extremely influences the performance of CWS system and there still has some room to improve.",
"cite_spans": [
{
"start": 257,
"end": 271,
"text": "(Palmer, 1997;",
"ref_id": "BIBREF9"
},
{
"start": 272,
"end": 291,
"text": "Cheng et al., 1999)",
"ref_id": "BIBREF0"
},
{
"start": 312,
"end": 333,
"text": "(Sproat et al., 1996)",
"ref_id": "BIBREF11"
},
{
"start": 347,
"end": 365,
"text": "(Sun et al., 1998)",
"ref_id": "BIBREF12"
},
{
"start": 385,
"end": 396,
"text": "(Xue, 2003)",
"ref_id": "BIBREF14"
},
{
"start": 425,
"end": 444,
"text": "(Peng et al., 2004;",
"ref_id": "BIBREF10"
},
{
"start": 445,
"end": 462,
"text": "Low et al., 2005)",
"ref_id": "BIBREF8"
},
{
"start": 494,
"end": 518,
"text": "(Goldwater et al., 2006;",
"ref_id": "BIBREF4"
},
{
"start": 519,
"end": 538,
"text": "Zhao and Kit, 2008)",
"ref_id": "BIBREF15"
},
{
"start": 565,
"end": 583,
"text": "(Gao et al., 2005)",
"ref_id": "BIBREF2"
},
{
"start": 604,
"end": 623,
"text": "SIGHAN Bakeoff-2005",
"ref_id": null
},
{
"start": 624,
"end": 639,
"text": "(Emerson, 2005)",
"ref_id": "BIBREF1"
},
{
"start": 644,
"end": 663,
"text": "SIGHAN Bakeoff-2006",
"ref_id": null
},
{
"start": 664,
"end": 676,
"text": "(Levow, 2006",
"ref_id": "BIBREF7"
},
{
"start": 1116,
"end": 1138,
"text": "(Huang and Zhao, 2007)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Recent studies in CWS focus on statistic machine learning methods. Regarding CWS task as sequence labeling problem (Xue, 2003; Goh et al., 2005) , various machine learning methods can be adopted to do this task. Features derived from labeled corpora are taken to train the model. The performance of this kind of method much depends on the size and the quality of the training data. As labeled corpus is usually limited in size and unbalanced in content, it can not provide enough knowledge to train a model which is robust enough when facing large scaled running text which contains large majority of OOV words.",
"cite_spans": [
{
"start": 115,
"end": 126,
"text": "(Xue, 2003;",
"ref_id": "BIBREF14"
},
{
"start": 127,
"end": 144,
"text": "Goh et al., 2005)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Nowadays the number of web pages grows very fast. The web text can be considered as a very large scaled knowledge database which seldom has OOV problem. So, one way that can supplement the knowledge is to incorporate web knowledge database. There already have some works which are motivated by this idea. The most related one is (Wang et al., 2007) , they proposed a search-based CWS method which is entirely unsupervised. They perform word segmentation as a search procedure by using search engine to directly find answer on web. First, sub-sentences are extracted from sentences using punctuation as delimiters. Second, these sub-sentences are directly sent to search engine as user queries. At last, the highlight parts in the returned snippets are used to construct the final word segmentation. Experimental result shows performance improvement on OOV recall rate but the reported F-measure is only about 87% which is much worse than supervised machine learning method. Motivated by taking both advantages of web-search method and supervised machine learning method, a new framework combines using web search and CRF model is proposed. For every sentence, segmentation candidates are collected and organized as lattice. Instead of sending sub-sentences as queries, specific small segments derived from the lattice are sent to the search engine. Search based segmentation is constructed using the highlighted parts of returned snippets. Final decision is made by measuring the distance of the search-based segmentation with the CRF segmentation candidates.",
"cite_spans": [
{
"start": 329,
"end": 348,
"text": "(Wang et al., 2007)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The rest of the paper is organized as follows. We introduce our specific implementation of linear-chain CRF model based word segmenter in Section 2. In Section 3, we propose the new segmentation framework which combines using search technology and supervised machine learning method. Experimental results are given in Section 4 and in Section 5, we conclude our work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Recent studies show that linear-chain structured CRF model (Lafferty et al., 2001) , which was first applied to CWS task in the year 2004 (Peng et al., 2004) , has been proved to be the most effective one for sequence labeling problem. In this paper, CRF-based word segmenter is selected as our basic word segmenter. In subsection 2.1, we introduce the specific implementation of our CRFbased Chinese word segmenter. Error analysis and performance evaluation is done in subsection 2.2 and 2.3 separately.",
"cite_spans": [
{
"start": 59,
"end": 82,
"text": "(Lafferty et al., 2001)",
"ref_id": "BIBREF6"
},
{
"start": 138,
"end": 157,
"text": "(Peng et al., 2004)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "CRF-based Chinese word segmenter",
"sec_num": "2"
},
{
"text": "In this paper, the specific implementation of CRF-based word segmenter uses the CRF++ toolkit version 0.53 provided by Taku Kudo 1 . Four tags, denoted as S(single-character word), L(the most left character of a word), M(middle character of a word) and R(the most right character of a word), are used to distinguish the position of a character in a word. The window size is set as five to extract features to train the model. This means when we consider current character, the adjacent four characters (the two ahead of it and the two after it) are taken as local features. The basic feature template adopted from (Low et al., 2005) is used, here we restate them to make the paper self-contained:",
"cite_spans": [
{
"start": 614,
"end": 632,
"text": "(Low et al., 2005)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Implementation of CRF-based word segmenter",
"sec_num": "2.1"
},
{
"text": "(a) C n , n = \u22122, \u22121, 0, 1, 2 (b) C n C n+1 , n = \u22122, \u22121, 0, 1 (c) C \u22121 C 1 (d) P u(C 0 ) (e) T (C \u22122 )T (C \u22121 )T (C 0 )T (C 1 )T (C 2 )",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Implementation of CRF-based word segmenter",
"sec_num": "2.1"
},
{
"text": "Where C n refers to a Chinese character, here, n indicates the relative distance to current character C 0 . For example, C 1 indicates the character next to C 0 while C \u2212 1 refers to the character previous to C 0 . P u(C 0 ) represents whether current character is a punctuation. T (C n ) represents what type the character C n belongs to. Here, four types are defined as Numbers, Dates (the Chinese characters for \"day\", \"month\", \"year\", respectively), English letters and Others. See more detailed illustration in (Low et al., 2005) .",
"cite_spans": [
{
"start": 516,
"end": 534,
"text": "(Low et al., 2005)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Implementation of CRF-based word segmenter",
"sec_num": "2.1"
},
{
"text": "The second International Chinese Word Segmentation Bakeoff (SIGHAN-bakeoff 2005) provides four datasets for Chinese word segmentation competition. Every set has training set and corresponding test set in it. In this paper, the one constructed by Microsoft Research center in Asia, denoted as MSRA05, is used to do error analysis.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Error analysis",
"sec_num": "2.2"
},
{
"text": "A CRF-based word segmenter is trained on the training set of MSRA05. Segmentation is performed on the corresponding test set. Compared with gold standard, totally 2,908 segmentation errors are found. They are manually classified into four groups according to their error types: the segmentation error caused by OOV words fall into type A; those caused by ambiguity problem are classified into type B; for strings whose segmentation way provided by gold-standard and by CRFbased segmenter are both acceptable, fell under type C; errors caused by inaccurate Gold-standard fall into type D. Table 1 shows the error distribution. From Table 1 we can see that, when using CRF-based word segmenter, OOV words causes over 54% segmentation errors. So, how to appropriately process OOV words is the key point to improve the entire performance.",
"cite_spans": [],
"ref_spans": [
{
"start": 588,
"end": 595,
"text": "Table 1",
"ref_id": "TABREF0"
},
{
"start": 631,
"end": 638,
"text": "Table 1",
"ref_id": "TABREF0"
}
],
"eq_spans": [],
"section": "Error analysis",
"sec_num": "2.2"
},
{
"text": "The conclusion obtained above is under the case that the one with the highest probability is selected as final segmentation result. What if we consider more candidates? Is there any possibility that the best answer be ranked behind or, in other words, with lower probability? In order to answer these questions, top 10 candidates are recorded, according to gold standard the best one is chosen from 10 candidates as final segmentation result. This constructs the upper bound of our improvement. Word segmentation is performed on all the four datasets provided by the SIGHAN-bakeoff 2005, denoted as MSRA05, PKU05, CITYU05 and AS05 respectively 2 . The strategy using 10best candidates are compared with the one-best strategy. Table 2 shows the comparison results which shows the improvement potential. From Table 2 we can see that the improvement on F1-measure is about 2% while it is ranging from 7.3% to 13.5% on OOV recall. The improvement is statistical significant, thus is worthy of further investigation. Experiment is done to see how many sentences have the case that the final selected segmentation is not the one with the highest probability. Figure 1 shows the distribution. The horizontal axis represents the Nth (N=0,. . . ,9) candidate while the vertical axis represents the number of sentences whose segmentation are provided by the Nth candidate: From Figure 1 we can see, there are still some exceptions that the best answer falls into the candidates with relatively lower confidence. And the amount of this kind of cases can't be ignored.",
"cite_spans": [],
"ref_spans": [
{
"start": 726,
"end": 733,
"text": "Table 2",
"ref_id": "TABREF1"
},
{
"start": 807,
"end": 814,
"text": "Table 2",
"ref_id": "TABREF1"
},
{
"start": 1153,
"end": 1161,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 1368,
"end": 1376,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "The improvement potential when consider 10-best segmentation candidates",
"sec_num": "2.3"
},
{
"text": "Experiment result shows that considering more candidates (20-best or more) doesn't provide significant improvement. So, we consider maximal top 10 candidates. We will illustrate how to decide the exact number of candidates automatically according to different cases of sentences.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The improvement potential when consider 10-best segmentation candidates",
"sec_num": "2.3"
},
{
"text": "In this section we propose the new framework for Chinese word segmentation. The basic idea is mining information from web to perform web search based word segmentation. By using search based segmentation result, we re-rank the candidates provided by basic word segmenter.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The proposed framework",
"sec_num": "3"
},
{
"text": "What we want to benefit from web is it seldom has OOV problem, and OOV words are often given low confidence by CRF model. This motivates us to distinguish high confidence segmentation and low confidence segmentation. For substrings in a sentence, the segmentation way will be adopted if CRF model provides high confidence on them. Otherwise they will be recorded for further processing. In order to do this task, lattice is constructed based on segmentation candidates. Substrings with lower confidence can be easily extracted from the lattice and sent to search engine as queries. Search-based segmentation is performed on these substrings. Final segmentation is reconstructed through similarity measurement between the search-based segmentation and the candidates. Figure 2 shows the whole flow chart of the proposed framework. It consists of three modules which will be introduced one by one in the following subsections. ",
"cite_spans": [],
"ref_spans": [
{
"start": 767,
"end": 775,
"text": "Figure 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "The proposed framework",
"sec_num": "3"
},
{
"text": "In this subsection, in order to extract low confidence parts, lattice is constructed according to segmentation candidates provided by CRF model. This corresponds to module 1 in the flow chart shown in Figure 2 .",
"cite_spans": [],
"ref_spans": [
{
"start": 201,
"end": 209,
"text": "Figure 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Module 1: Lattice construction and low confidence substrings extraction",
"sec_num": "3.1"
},
{
"text": "Here we give out the formal description of the construction process of lattice: Given a Chinese string S to be segmented, with length l, then there exists l + 1 segmentation positions denoted as p 0 , p 1 , .., p l . Each specific segmentation way s for S can be represented by a sequence of positions",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 1: Lattice construction and low confidence substrings extraction",
"sec_num": "3.1"
},
{
"text": "{p s 0 , p s 1 , ..., p s l } which satisfies: (a) s 0 = 0, s l = l (b) s i \u2208 {0, 1, ..., l} (c) s i < s i+1",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 1: Lattice construction and low confidence substrings extraction",
"sec_num": "3.1"
},
{
"text": "Also s can be viewed as a total order, and different segmentations can be combined into a partial order relationship, with p 0 as source and p l as destination. Take Figure 3 for example, the position 0 is the source and 13 is the destination. If a segmentation position p i is unanimous point, then in the partial order they define, unanimous positions can be viewed as joint nodes, just like the position 1, 8 and 10 which are emphasized in Figure 3 .",
"cite_spans": [],
"ref_spans": [
{
"start": 166,
"end": 174,
"text": "Figure 3",
"ref_id": "FIGREF2"
},
{
"start": 443,
"end": 451,
"text": "Figure 3",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Module 1: Lattice construction and low confidence substrings extraction",
"sec_num": "3.1"
},
{
"text": "A sequence of unanimous positions can be retrieved from the partial order, recorded as p u . If the sub graph between two positions in p u , i.e., p u i and p u j , is inherently a total order, then the substring between p u i and p u j contains unanimous segmentations. For example, substring \" (ex-)\" between position 0 and 1, \" (take on)\" between position 8 and 10 and \" (model)\" between position 10 and 13 in Figure 3 . If not, then there exists several possible segmentations and the substring defined by the pair of positions, such as \" (Miss HongKong Biyi Jia accepts the invitation)\" defined by the pair of positions 1 and 8 in Figure 3 , will be delivered for further diagnosis by search engines.",
"cite_spans": [],
"ref_spans": [
{
"start": 413,
"end": 421,
"text": "Figure 3",
"ref_id": "FIGREF2"
},
{
"start": 636,
"end": 644,
"text": "Figure 3",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Module 1: Lattice construction and low confidence substrings extraction",
"sec_num": "3.1"
},
{
"text": "By now the low confidence strings are extracted which compose a set denoted as S u . In subsection 3.2 we will describe the procedure of segmentation reconstruction for strings in S u . ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 1: Lattice construction and low confidence substrings extraction",
"sec_num": "3.1"
},
{
"text": "The search-based segmentation in this paper is an unsupervised one. The idea is motivated by the work of (Wang et al., 2007) while the method is a very different one. Briefly, the segmentation is implemented by using the highlighted parts in the snippets returned by search engine. Specifically in this paper, we use Sogou 3 search engine to do this task. The detailed implementation of our search-based segmentation can be divided into two parts. In the followings, we will introduce them one by one. The first part is segments collection. String in S u are automatically submitted to search engine as user queries. The returned snippets are collected for further processing. Figure 4 shows an example of the returned snippet when using string \" (Miss HongKong Biyi Jia accepts invitation)\" as query. Every highlighted (red) part in the snippet is said as a segment. For each substring, in total one hundred snippets are used to do segments collecting and at meanwhile, the frequency of each segment is recorded. We then rank the segments in descending order of their frequency and organize the data in the form of shown in Figure 5 .",
"cite_spans": [
{
"start": 105,
"end": 124,
"text": "(Wang et al., 2007)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [
{
"start": 677,
"end": 685,
"text": "Figure 4",
"ref_id": "FIGREF3"
},
{
"start": 1125,
"end": 1133,
"text": "Figure 5",
"ref_id": "FIGREF4"
}
],
"eq_spans": [],
"section": "Module 2: Search-based segmentation for low confidence substrings",
"sec_num": "3.2"
},
{
"text": "The second part is segmentation reconstructed. For every substring, we iteratively select the segment with currently the highest frequency as a segmentation unit, and tag the corresponding characters in the original string until all the characters in the substring is tagged. For example in Figure5, the segment \" (Miss HongKong)\" will first be selected as a segmentation unit. Thus the two characters \" (Miss HongKong)\" in substring \"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 2: Search-based segmentation for low confidence substrings",
"sec_num": "3.2"
},
{
"text": "(Miss HongKong Biyi Jia accepts the invitation)\" will be tagged as L and R respectively.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 2: Search-based segmentation for low confidence substrings",
"sec_num": "3.2"
},
{
"text": "One would suspect of using the local segmenter of search engine. Here, we argue that although search engines generally have their own local segmenters, the returned highlighted parts which we use are quite different from that generated by the segmenters. We have investigated the segmentation strategy of Sogou search engine, it is a \"reduplicate\" one. In other words, they make redundant segmentation. Take segmentation result of \" (Miss HongKong Biyi Jia accepts the invitation)\" as an example, by checking the HTML source code, we could find that both \" (Miss HongKong Biyi Jia)\", \" (Miss HongKong)\" and \" (Biyi Jia)\" are taken as segmentation units. So, our search-based segmentation results are generally independent to the local segmenters of the search engine.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 2: Search-based segmentation for low confidence substrings",
"sec_num": "3.2"
},
{
"text": "Now, we have search-based segmentation result and the segmentation candidates. There are two ways to reconstruct the final segmentation:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 3: Segmentation reconstruction",
"sec_num": "3.3"
},
{
"text": "1. For low confidence part, directly take place the segmentation provided by basic word segmenter by the search-based segmentation result, thus we can reconstruct segmentation result;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 3: Segmentation reconstruction",
"sec_num": "3.3"
},
{
"text": "2. After we finish 1, do similarity measurement between reconstructed one and the candidates. The candidate which has highest score will be taken as final segmentation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 3: Segmentation reconstruction",
"sec_num": "3.3"
},
{
"text": "In experiment part, we will compare the two strategies (with and without similarity measurement).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 3: Segmentation reconstruction",
"sec_num": "3.3"
},
{
"text": "In the following, we introduce the similarity measurement method used in this paper. Inspired by Edit distance, Segmentation Distance (SD) is proposed here to measure the similarity between two segmented strings: For two segmented strings S 1 and S 2 the segmentation distance is defined as the minimum number of boundary insertions and boundary deletions to transform one segmentation way to the other which can be represented as:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 3: Segmentation reconstruction",
"sec_num": "3.3"
},
{
"text": "SD(S 1 , S 2 ) = min{\u03a3(Insertion(S 1 \u2192 S 2 ) + Deletion(S 1 \u2192 S 2 ))}",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 3: Segmentation reconstruction",
"sec_num": "3.3"
},
{
"text": "Dynamic programming algorithm is used to calculate SD value.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Module 3: Segmentation reconstruction",
"sec_num": "3.3"
},
{
"text": "In experiment part, we firstly determine the number of candidates and rules for lattice construction. Then, performance evaluation for the proposed framework will be performed on five corpora provided by SIGHAN-bakeoff 2005 (Emerson, 2005 and SIGHAN-bakeoff 2006 (Levow, 2006 . Statistics of the five datasets 4 is listed in Table 3 . The performance is evaluated by F1-Measure and OOV recall rate (R OOV).",
"cite_spans": [
{
"start": 204,
"end": 223,
"text": "SIGHAN-bakeoff 2005",
"ref_id": null
},
{
"start": 224,
"end": 238,
"text": "(Emerson, 2005",
"ref_id": "BIBREF1"
},
{
"start": 243,
"end": 262,
"text": "SIGHAN-bakeoff 2006",
"ref_id": null
},
{
"start": 263,
"end": 275,
"text": "(Levow, 2006",
"ref_id": "BIBREF7"
}
],
"ref_spans": [
{
"start": 325,
"end": 332,
"text": "Table 3",
"ref_id": "TABREF4"
}
],
"eq_spans": [],
"section": "Experimental results",
"sec_num": "4"
},
{
"text": "If all the 10-best candidates are used to construct lattice, there will be many small segments in S u (see section 3.1) which only contains one character. In order to bring down the complexity of lattice, experiment is done to determine the number of candidates we adopted to construct the ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Determine the number of candidates for lattice construction",
"sec_num": "4.1"
},
{
"text": "The hit rate Figure 6 : The relation of the threshold and the hit rate lattice. Candidates are selected out one by one according to the priority of high probability until the accumulate probability value reaches the threshold. During this process, we aim to have hit rate as high as possible. Here \"hit rate\" is defined as: The number of gold-standard segmentation candidates which are selected out divided by the total number of candidates we selected out.",
"cite_spans": [],
"ref_spans": [
{
"start": 13,
"end": 21,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "The threshold",
"sec_num": null
},
{
"text": "From Figure 6 we can see, distinguished by the threshold value of \"0.7\", the curve grows steeply before that while it turns flat after that. The hit rate reaches 80% at 0.7. In the latter part of this paper we take 0.7 as the threshold to determine number of candidates we adopted.",
"cite_spans": [],
"ref_spans": [
{
"start": 5,
"end": 13,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "The threshold",
"sec_num": null
},
{
"text": "Firstly, we compare the performance between baseline word segmenter (CRF-based word segmenter) and our proposed framework including with and without similarity measurement. The comparison result is given in Table 4 . It shows that, compared with baseline, the proposed scheme without similarity measurement achieves improvement on both OOV recall rate (3.2% to 8.4%) and F1 (0.4% to 1.9%). Further more, the strategy with similarity measurement shows even better performance on OOV recall rate. Secondly, the best performance we achieved is taken to do comparison with the best reported result of SIGHAN. Table 5 gives out the result. We can see, in most cases our proposed scheme achieves improvement on R OOV. Since OOV rate of SIGHAN datasets ranging only from 2.6% to 8.8%, although R OOV is significantly improved the F-measure does not improve much. Here, we manually construct a small test set (denoted as C Web) which is extracted from web pages (about 100 pages). It includes various topics such as Sports, Medical science, Mechanical, etc., with 4,000 words in it. MSRA05 training set is taken to train CRF model. Table 6 gives out the performance comparison between baseline and our proposed method. It shows, for high OOV test set, that the proposed method achieves significant improvement. We investigate the segmentation result and select some typical sentences to do test. Table 7 shows three sentences which are wrongly (the underlined parts) segmented by using ICTCLAS1.0 5 and MSRSeg1.0 6 . It shows that our proposed scheme is effective on particular OOV types such as new words (\" \"(Mac daddy star)), loanwords (\" \"(Disco)) and name entity (\" \"(Biyi Jia)) while well known high-quality word segmenter, such as MSRSeg1.0 and ICT-CLAS1.0 fail on processing these kinds of OOV words. Table 7 : Three typical segmentation errors derived from MSRSeg1.0 and ICTCLAS1.0",
"cite_spans": [],
"ref_spans": [
{
"start": 207,
"end": 214,
"text": "Table 4",
"ref_id": "TABREF5"
},
{
"start": 605,
"end": 612,
"text": "Table 5",
"ref_id": "TABREF6"
},
{
"start": 1124,
"end": 1131,
"text": "Table 6",
"ref_id": "TABREF7"
},
{
"start": 1388,
"end": 1395,
"text": "Table 7",
"ref_id": null
},
{
"start": 1801,
"end": 1808,
"text": "Table 7",
"ref_id": null
}
],
"eq_spans": [],
"section": "Performance Evaluation",
"sec_num": "4.2"
},
{
"text": "Output of ICTCLAS1.0 and MSRSeg1.0 \" \" (Mac daddy star burst sex scandal again) \" \" (Miss HongKong Jiabi Yi accepts the invitation of being a model) \" \" (We go to disco to sing songs)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Input sentences",
"sec_num": null
},
{
"text": "Within this paper, a framework which combines supervised machine learning method and web search technology to do Chinese word segmentation is proposed. Experimental result shows that the proposed framework obtains improved segmentation performance and is especially effective on processing OOV words. There are still some future works left: first, we can construct a local search engine instead of using commercial ones. Second, well defined rules should be concluded to help us to reconstruct the search-based word segmentation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "23rd Pacific Asia Conference on Language, Information and Computation, pages 454-463",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://chasen.org/taku/software/CRF++/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://sighan.cs.uchicago.edu/bakeoff2005/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Sogou is a Chinese search engine which is owned by Sohu, Inc., and is one of the fastest growing search engines in China.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Here, PKU05 dataset is not included the representation of personal names is different from others: family name and given name are segmented as two words",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "ICTCLAS 1.0: http://www.nlp.org.cn 6 MSRSeg.v1.:http://research.microsoft.com/-S-MSRSeg",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "A study on word-based and integral-bit Chinese text compression algorithms",
"authors": [
{
"first": "K",
"middle": [
"S"
],
"last": "Cheng",
"suffix": ""
},
{
"first": "G",
"middle": [
"H"
],
"last": "Young",
"suffix": ""
},
{
"first": "K",
"middle": [
"F"
],
"last": "Wong",
"suffix": ""
}
],
"year": 1999,
"venue": "Journal of the American Society for Info. Sci",
"volume": "50",
"issue": "3",
"pages": "218--228",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Cheng, K.S., G.H. Young and K.F. Wong. 1999. A study on word-based and integral-bit Chinese text compression algorithms. Journal of the American Society for Info. Sci., 50(3), 218-228.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "The second international Chinese word segmentation bakeoff",
"authors": [
{
"first": "T",
"middle": [],
"last": "Emerson",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the 4th SIGHAN Workshop",
"volume": "",
"issue": "",
"pages": "123--133",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Emerson, T. 2005. The second international Chinese word segmentation bakeoff. Proceedings of the 4th SIGHAN Workshop, pp.123-133.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Chinese word segmentation and named entity recognition: A pragmatic approach",
"authors": [
{
"first": "J",
"middle": [
"F"
],
"last": "Gao",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Chang-Ning",
"middle": [],
"last": "Huang",
"suffix": ""
}
],
"year": 2005,
"venue": "Computational Linguistics",
"volume": "31",
"issue": "4",
"pages": "531--574",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gao, J.F., M. Li, A. Wu and Chang-Ning Huang. 2005. Chinese word segmentation and named entity recognition: A pragmatic approach. Computational Linguistics, 31(4), 531-574.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Chinese Word Segmentatin by Classification of Characters",
"authors": [
{
"first": "C",
"middle": [
"L"
],
"last": "Goh",
"suffix": ""
},
{
"first": "Masayuku",
"middle": [],
"last": "Asahara",
"suffix": ""
},
{
"first": "Yuji",
"middle": [],
"last": "Matsumoto",
"suffix": ""
}
],
"year": 2005,
"venue": "Computational Linguistics",
"volume": "10",
"issue": "3",
"pages": "381--396",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Goh, C.L., Masayuku Asahara and Yuji Matsumoto. 2005. Chinese Word Segmentatin by Classi- fication of Characters. Computational Linguistics, 10(3), 381-396.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Contextual Dependencies in Unsupervised Word Segmentation",
"authors": [
{
"first": "S",
"middle": [],
"last": "Goldwater",
"suffix": ""
},
{
"first": "T",
"middle": [
"L"
],
"last": "Griffiths",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Johnson",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of COLING-ACL",
"volume": "",
"issue": "",
"pages": "673--680",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Goldwater, S., T.L. Griffiths and M. Johnson. 2006. Contextual Dependencies in Unsupervised Word Segmentation. Proceedings of COLING-ACL 2006 , pp.673-680.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Chinese Word Segmentation: A Decade Review Journal of Chinese Information Processing",
"authors": [
{
"first": "C",
"middle": [
"N"
],
"last": "Huang",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Zhao",
"suffix": ""
}
],
"year": 2007,
"venue": "",
"volume": "21",
"issue": "",
"pages": "8--20",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Huang, C.N. and H. Zhao. 2007. Chinese Word Segmentation: A Decade Review Journal of Chinese Information Processing, 21(3), 8-20.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data",
"authors": [
{
"first": "J",
"middle": [],
"last": "Lafferty",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Mccallum",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Pereira",
"suffix": ""
}
],
"year": 2001,
"venue": "Proceedings of 18th International Conference on Machine Learning (ICML2001)",
"volume": "",
"issue": "",
"pages": "282--289",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lafferty, J., A.McCallum and F.Pereira. 2001. Conditional random fields: Probabilistic models for segmenting and labeling sequence data. Proceedings of 18th International Conference on Machine Learning (ICML2001), pp.282-289.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "The third international Chinese word segmentation bakeoff",
"authors": [
{
"first": "G",
"middle": [],
"last": "Levow",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the 5th SIGHAN Workshop",
"volume": "",
"issue": "",
"pages": "108--117",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Levow, G. 2006. The third international Chinese word segmentation bakeoff. Proceedings of the 5th SIGHAN Workshop, pp.108-117.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "A maximum entropy approach to Chinese word segmentation",
"authors": [
{
"first": "J",
"middle": [
"K"
],
"last": "Low",
"suffix": ""
},
{
"first": "W",
"middle": [
"Y"
],
"last": "Hwee Tou Ng",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Guo",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the 4th SIGHAN Workshop",
"volume": "",
"issue": "",
"pages": "161--164",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Low, J.K., Hwee Tou Ng and W.Y. Guo. 2005. A maximum entropy approach to Chinese word segmentation. Proceedings of the 4th SIGHAN Workshop, pp.161-164.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "A Trainable Rule-based Algorithm for Word Segmentation",
"authors": [
{
"first": "David",
"middle": [
"D"
],
"last": "Palmer",
"suffix": ""
}
],
"year": 1997,
"venue": "Proceedings of Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "321--328",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Palmer, David D. 1997. A Trainable Rule-based Algorithm for Word Segmentation. Proceedings of Annual Meeting of the Association for Computational Linguistics 1997, pp.321-328.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Chinese segmentation and new word detection using conditional random fields",
"authors": [
{
"first": "F",
"middle": [],
"last": "Peng",
"suffix": ""
},
{
"first": "Fangfang",
"middle": [],
"last": "Feng",
"suffix": ""
},
{
"first": "Andrew",
"middle": [],
"last": "Mccallum",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of COLING 2004",
"volume": "",
"issue": "",
"pages": "562--568",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Peng, F., Fangfang Feng and Andrew McCallum. 2004. Chinese segmentation and new word detection using conditional random fields. Proceedings of COLING 2004, pp.562-568.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "A stochastic finite-state wordsegmentation algorithm for Chinese",
"authors": [
{
"first": "R",
"middle": [],
"last": "Sproat",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Shih",
"suffix": ""
},
{
"first": "William",
"middle": [],
"last": "Gale",
"suffix": ""
},
{
"first": "Nancy",
"middle": [],
"last": "Chang",
"suffix": ""
}
],
"year": 1996,
"venue": "Computational Linguistics",
"volume": "22",
"issue": "3",
"pages": "377--404",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sproat, R., C.Shih, William Gale and Nancy Chang. 1996. A stochastic finite-state word- segmentation algorithm for Chinese. Computational Linguistics, 22(3), 377-404.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Word segmentation without using lexicon and handcrafted training data",
"authors": [
{
"first": "M",
"middle": [
"S"
],
"last": "Sun",
"suffix": ""
},
{
"first": "D",
"middle": [
"Y"
],
"last": "Shen",
"suffix": ""
},
{
"first": "B",
"middle": [
"K"
],
"last": "Tsou",
"suffix": ""
}
],
"year": 1998,
"venue": "Proceeding of COLING-ACL'98",
"volume": "",
"issue": "",
"pages": "1265--1271",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sun M.S., D.Y. Shen and B. K. Tsou. 1998. Word segmentation without using lexicon and hand- crafted training data. In Proceeding of COLING-ACL'98, pp 1265-1271.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "A search-based Chinese word segmentation method",
"authors": [
{
"first": "X",
"middle": [
"J"
],
"last": "Wang",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Qin",
"suffix": ""
},
{
"first": "W",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceeding of 16th International Conf. of WWW",
"volume": "",
"issue": "",
"pages": "1129--1130",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Wang, X.J., Y. Qin and W. Liu. 2007. A search-based Chinese word segmentation method. Proceeding of 16th International Conf. of WWW, pp.1129-1130.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Chinese word segmentation as character tagging",
"authors": [
{
"first": "N",
"middle": [
"W"
],
"last": "Xue",
"suffix": ""
}
],
"year": 2003,
"venue": "Journal of Computational Linguistics and Chinese Language Processing",
"volume": "8",
"issue": "1",
"pages": "29--48",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Xue, N.W. 2003. Chinese word segmentation as character tagging. Journal of Computational Linguistics and Chinese Language Processing, 8(1), 29-48.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "An Empirical Comparison of Goodness Measures for Unsupervised Chinese Word Segmentation with a Unified Framework",
"authors": [
{
"first": "H",
"middle": [],
"last": "Zhao",
"suffix": ""
},
{
"first": "Chunyu",
"middle": [],
"last": "Kit",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of 3th International Joint Conf. on Natural Language Processing",
"volume": "",
"issue": "",
"pages": "9--16",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zhao, H. and Chunyu Kit. 2008. An Empirical Comparison of Goodness Measures for Unsuper- vised Chinese Word Segmentation with a Unified Framework. Proceedings of 3th International Joint Conf. on Natural Language Processing (IJCNLP-2008), pp.9-16.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"uris": null,
"num": null,
"text": "Distribution of N best results.",
"type_str": "figure"
},
"FIGREF1": {
"uris": null,
"num": null,
"text": "The flow chart of new proposed framework.",
"type_str": "figure"
},
"FIGREF2": {
"uris": null,
"num": null,
"text": "The illustration of Lattice construction.",
"type_str": "figure"
},
"FIGREF3": {
"uris": null,
"num": null,
"text": "The search result of \" (Miss HongKong Biyi Jia accepts the invitation)\" using Sogou search engine.",
"type_str": "figure"
},
"FIGREF4": {
"uris": null,
"num": null,
"text": "Segments of query string: \" \"(Miss HongKong Biyi Jia accepts the invitation).",
"type_str": "figure"
},
"TABREF0": {
"html": null,
"content": "<table><tr><td>Error Type # Errors</td><td>A 1,581</td><td>B 897</td><td>C 357</td><td>D 73</td></tr><tr><td>Percentage</td><td>54.4%</td><td>30.8%</td><td>12.3%</td><td>2.5%</td></tr></table>",
"text": "Error distribution of CRF-based word segmenter tested on MSRA05.",
"num": null,
"type_str": "table"
},
"TABREF1": {
"html": null,
"content": "<table><tr><td>MSRA05</td><td>OOV Rate 2.6</td><td>R-OOV 1 best 75.6</td><td>R-OOV 10 best 89.1</td><td>Improvement on R-OOV 13.5</td><td>F1 1 best 96.6</td><td>F1 10 best 98.9</td><td>Improvement on F1 2.3</td></tr><tr><td>PKU05</td><td>5.8</td><td>76.8</td><td>84.1</td><td>7.3</td><td>94.4</td><td>96.4</td><td>2.0</td></tr><tr><td>CITYU05</td><td>7.4</td><td>78.5</td><td>91.1</td><td>12.6</td><td>95.4</td><td>98.4</td><td>3.0</td></tr><tr><td>AS05</td><td>4.3</td><td>71.3</td><td>80.2</td><td>8.9</td><td>95.0</td><td>97.1</td><td>2.1</td></tr></table>",
"text": "Improvement potential testing on SIGHAN-bakeoff 2005 dataset.",
"num": null,
"type_str": "table"
},
"TABREF4": {
"html": null,
"content": "<table><tr><td>Corpora</td><td>Encoding</td><td>Training(MB)</td><td>Test(KB)</td><td>OOV Rate(%)</td></tr><tr><td>MSRA05</td><td>GB</td><td>2.37</td><td>107</td><td>2.6</td></tr><tr><td>AS05</td><td>BIG5</td><td>5.45</td><td>122</td><td>4.3</td></tr><tr><td>CITYU05</td><td>BIG5</td><td>1.46</td><td>41</td><td>7.4</td></tr><tr><td>MSRA06</td><td>GB</td><td>1.26</td><td>100</td><td>3.4</td></tr><tr><td>CTB06</td><td>BIG5</td><td>0.5</td><td>154</td><td>8.8</td></tr></table>",
"text": "Statistics of five datasets ofSIGHAN-bakeoff 2005 and2006.",
"num": null,
"type_str": "table"
},
"TABREF5": {
"html": null,
"content": "<table><tr><td>Corpora MSRA05</td><td>R-OOV CRF 75.6</td><td>F1 CRF 96.6</td><td>R-OOV CRF+Search 79.6</td><td>F1 CRF+Search 97.0</td><td>R-OOV CRF+Search +similarity 80.9</td><td>F1-Measure CRF+Search +similarity 97.1</td></tr><tr><td>AS05</td><td>71.3</td><td>95.0</td><td>75.2</td><td>95.5</td><td>75.8</td><td>95.8</td></tr><tr><td>CITYU05</td><td>78.5</td><td>95.4</td><td>81.7</td><td>96.0</td><td>82.6</td><td>96.5</td></tr><tr><td>MSRA06</td><td>67.3</td><td>95.3</td><td>75.7</td><td>97.2</td><td>76.5</td><td>97.3</td></tr><tr><td>CTB06</td><td>71.2</td><td>93.0</td><td>78.3</td><td>94.6</td><td>79.5</td><td>94.7</td></tr></table>",
"text": "Performance comparison with baseline system (%).",
"num": null,
"type_str": "table"
},
"TABREF6": {
"html": null,
"content": "<table><tr><td>Corpora MSRA05 AS05 CITYU05</td><td>Participant Wei Jiang Hwee Tou Ng Hwee Tou Ng Yaoyong Li Hwee Tou Ng</td><td>R-OOV Open best 59.0 73.6 68.4 68.6 80.6</td><td>F1-Measure Open best 97.2 96.8 95.6 94.8 96.2</td><td>R-OOV CRF+Search +similarity 80.9 80.9 75.8 75.8 82.6</td><td>F1-Measure CRF+Search +similarity 97.1 97.1 95.8 95.8 96.5</td></tr><tr><td>MSRA06 CTB06</td><td>France Telecom France Telecom Univ. Texas Austin</td><td>83.9 84.0 76.8</td><td>97.9 97.7 94.4</td><td>76.5 76.5 79.5</td><td>97.3 97.3 94.7</td></tr></table>",
"text": "Compare with the best reported results (%).",
"num": null,
"type_str": "table"
},
"TABREF7": {
"html": null,
"content": "<table><tr><td>Corpora C Web</td><td>OOV rate(%) 21.5</td><td>R-OOV CRF (%) 74.8</td><td>F1 CRF (%) 92.6</td><td>R-OOV CRF+Search +similarity (%) 90.3</td><td>F1 CRF+Search +similarity (%) 97.2</td></tr></table>",
"text": "Performance comparison with baseline system.",
"num": null,
"type_str": "table"
}
}
}
}