| { |
| "paper_id": "Y08-1034", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:38:11.739088Z" |
| }, |
| "title": "Sentiment Sentence Extraction Using a Hierarchical Directed Acyclic Graph Structure and a Bootstrap Approach *", |
| "authors": [ |
| { |
| "first": "Kazutaka", |
| "middle": [], |
| "last": "Shimada", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Kyushu Institute of Technology", |
| "location": { |
| "addrLine": "680-4 Iizuka", |
| "postCode": "820-8502", |
| "settlement": "Fukuoka", |
| "country": "Japan" |
| } |
| }, |
| "email": "shimada@pluto.ai.kyutech.ac.jp" |
| }, |
| { |
| "first": "Daigo", |
| "middle": [], |
| "last": "Hashimoto", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Kyushu Institute of Technology", |
| "location": { |
| "addrLine": "680-4 Iizuka", |
| "postCode": "820-8502", |
| "settlement": "Fukuoka", |
| "country": "Japan" |
| } |
| }, |
| "email": "d_hashimoto@pluto.ai.kyutech.ac.jp" |
| }, |
| { |
| "first": "Tsutomu", |
| "middle": [], |
| "last": "Endo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Kyushu Institute of Technology", |
| "location": { |
| "addrLine": "680-4 Iizuka", |
| "postCode": "820-8502", |
| "settlement": "Fukuoka", |
| "country": "Japan" |
| } |
| }, |
| "email": "endo@pluto.ai.kyutech.ac.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "As the World Wide Web rapidly grows, a huge number of online documents are easily accessible on the Web. We obtain a huge number of review documents that include user's opinions for products. To classify the opinions is one of the hottest topics in natural language processing. In general, we need a large amount of training data for the classification process. However, construction of training data by hand is costly. The goal of our study is to construct a sentiment tagging tool for particular domains. In this paper, we propose a method of sentiment sentence extraction for the 1st step of the system. For the task, we use a Hierarchical Directed Acyclic Graph (HDAG) structure. We obtained high accuracy with the graph based approach. Furthermore, we apply a bootstrap approach to the sentiment sentence extraction process. The experimental result shows the effectiveness of the method.", |
| "pdf_parse": { |
| "paper_id": "Y08-1034", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "As the World Wide Web rapidly grows, a huge number of online documents are easily accessible on the Web. We obtain a huge number of review documents that include user's opinions for products. To classify the opinions is one of the hottest topics in natural language processing. In general, we need a large amount of training data for the classification process. However, construction of training data by hand is costly. The goal of our study is to construct a sentiment tagging tool for particular domains. In this paper, we propose a method of sentiment sentence extraction for the 1st step of the system. For the task, we use a Hierarchical Directed Acyclic Graph (HDAG) structure. We obtained high accuracy with the graph based approach. Furthermore, we apply a bootstrap approach to the sentiment sentence extraction process. The experimental result shows the effectiveness of the method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "As the World Wide Web rapidly grows, a huge number of online documents are easily accessible on the Web. Finding information relevant to user needs has become increasingly important. The most important information on the Web is usually contained in the text. We obtain a huge number of review documents that include user's opinions for products. Buying products, users usually survey the product reviews. More precise and effective methods for evaluating the products are useful for users. To classify the opinions is one of the hottest topics in natural language processing. Many researchers have recently studied extraction and classification of opinions (Hatzivassiloglou and McKeown 1997 , Kobayashi et al. 2005 , Pang et al. 2002 , Wiebe and Riloff 2005 .", |
| "cite_spans": [ |
| { |
| "start": 657, |
| "end": 691, |
| "text": "(Hatzivassiloglou and McKeown 1997", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 692, |
| "end": 715, |
| "text": ", Kobayashi et al. 2005", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 716, |
| "end": 734, |
| "text": ", Pang et al. 2002", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 735, |
| "end": 758, |
| "text": ", Wiebe and Riloff 2005", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "There are many research areas for sentiment analysis; extraction of sentiment expressions, identification of sentiment polarity of sentences, classification of review documents and so on. The goal of our study is to easily construct a corpus for sentiment information for particular domains that users want. For the purpose we need to extract sentiment sentences from documents as the 1st step of the corpus construction. Extraction of sentiment expressions or sentiment sentences is one of the most important tasks in the sentiment analysis because classification tasks usually need a large amount of training data to generate a high accuracy classifier. There are several reports for classification of sentences (Kudo and Matsumoto 2004, Osajima et al .2005) . However, the purpose of these studies is to classify sentences into positive and negative opinions. Our purpose in this paper is to classify sentences into opinions and non-opinions. Touge et al. (2004) and Kawaguchi et al. (2006) have proposed methods for opinion extraction. However, these approaches essentially need a large amount of training data for the process. Construction of training data by hand is costly. Kaji and Kitsuregara (2006) have reported a method of acquisition of sentiment sentences in HTML documents. The method required only several rules by hand and obtained high accuracy. Also they have proposed a method for building lexicon for sentiment analysis (Kaji and Kitsuregawa 2007) . The knowledge extracted from the Web by using the proposed methods contains the huge quantities of words and sentences. Takamura et al. (2005) also have reported a method for extracting polarity of words. These dictionaries are versatile and valuable for users because they do not depend on a specific domain. Here, assume that we need to construct a system for a domain. In that case, we often desire domain-specific knowledge for the system. Therefore, we need to efficiently extract sentiment sentences, which depend on a particular domain or topic.", |
| "cite_spans": [ |
| { |
| "start": 714, |
| "end": 723, |
| "text": "(Kudo and", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 724, |
| "end": 760, |
| "text": "Matsumoto 2004, Osajima et al .2005)", |
| "ref_id": null |
| }, |
| { |
| "start": 946, |
| "end": 965, |
| "text": "Touge et al. (2004)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 970, |
| "end": 993, |
| "text": "Kawaguchi et al. (2006)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1181, |
| "end": 1208, |
| "text": "Kaji and Kitsuregara (2006)", |
| "ref_id": null |
| }, |
| { |
| "start": 1441, |
| "end": 1468, |
| "text": "(Kaji and Kitsuregawa 2007)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1591, |
| "end": 1613, |
| "text": "Takamura et al. (2005)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "In this paper, we propose a method of sentiment sentence extraction. It uses several sample sentences for the extraction process. In the process, we compute a similarity between the sample sentences and target sentences. Yu and Hatzivassiloglou (2003) have reported a similarity based method using words, phrases and WordNet synsets for sentiment sentence extraction. However, word-level features are not always suitable for the extraction process because of lack of relations between words. For the similarity calculation we, therefore, employ the graph-based approach, called Hierarchical Directed Acyclic Graph (HDAG), which has been proposed by Suzuki et al. (2006) . Furthermore, we apply a bootstrap approach into the sentiment sentence extraction process. The number of extracted sentiment sentences increases with the bootstrap approach.", |
| "cite_spans": [ |
| { |
| "start": 221, |
| "end": 251, |
| "text": "Yu and Hatzivassiloglou (2003)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 649, |
| "end": 669, |
| "text": "Suzuki et al. (2006)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "In this section, we explain a graph-based data structure to compute a similarity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Graph-based Data Structure", |
| "sec_num": "2." |
| }, |
| { |
| "text": "In natural language processing, bag-of-words representation is the most general way to express features of a sentence for the similarity calculation. However, it is insufficient to represent the features of a sentence because of lack of relations between words. To solve the problems, Suzuki et al. (2006) have reported a new graph-based approach, called Hierarchical Directed Acyclic Graph kernels (HDAG). The method can handle many linguistic features in a sentence and includes characteristics of tree and sequence kernels. The HDAG is a hierarchized graph-in-graph structure. It represents semantic or grammatical information in a sentence. In this paper, we use the HDAG structure for the sentiment sentence extraction. We compute a similarity between HDAGs generated from sentences. See (Suzuki et al. 2006) for more information about the HDAG.", |
| "cite_spans": [ |
| { |
| "start": 285, |
| "end": 305, |
| "text": "Suzuki et al. (2006)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 793, |
| "end": 813, |
| "text": "(Suzuki et al. 2006)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hierarchical Directed Acyclic Graph", |
| "sec_num": "2.1." |
| }, |
| { |
| "text": "Layers in the HDAG denote grammatical information in a sentence. To compute similarity between sentences correctly, we add some layers to a naive HDAG structure. The HDAG in this paper consists of three layers as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Layer", |
| "sec_num": "2.2." |
| }, |
| { |
| "text": "\u2022 Combined POS tag layer", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Layer", |
| "sec_num": "2.2." |
| }, |
| { |
| "text": "This layer consists of part of speech tags of words. We unify the POS tags of words in a bunsetsu 1 into one node. This layer expresses a pack of POS tags in each bunsetsu.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Layer", |
| "sec_num": "2.2." |
| }, |
| { |
| "text": "This layer consists of the POS tags of each word or each compound noun.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u2022 POS tag layer", |
| "sec_num": null |
| }, |
| { |
| "text": "This layer contains the surface expression of each word. We can use the surface information for calculation of similarity by adding this layer. The layer is used for handling compound nouns in bunsetsus. This layer often resolves a problem of difference between surface expressions. We unify nouns belonging to a compound noun and then dispose it under the POS node of its compound nouns. For example, we flexibly treat the difference of the following expressions in similarity calculation by adding this layer: \"file downloading software\", \"downloading software\" and \"software\". Figure 1 shows an example of an HDAG expression in this paper. In the HDAG, the elements, such as \"Bunsetsu\" and \"Common noun\", in each rectangle are the attributes of each node. The directed links are a kind of the dependency relation between elements. The double-headed arrows denote the link between a node and a sub-graph enclosed with a dashed line.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 580, |
| "end": 588, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "\u2022 Word/Compound noun layer", |
| "sec_num": null |
| }, |
| { |
| "text": "Tensou Sokudo ga Osoi desu.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u2022 Word/Compound noun layer", |
| "sec_num": null |
| }, |
| { |
| "text": "(The transfer rate is low.)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u2022 Word/Compound noun layer", |
| "sec_num": null |
| }, |
| { |
| "text": "Bunsetsu Bunsetsu ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u2022 Word/Compound noun layer", |
| "sec_num": null |
| }, |
| { |
| "text": "In this section, we explain a method of similarity calculation based on the HDAG structure. First, we describe a conversion method of sentences into the HDAGs. Next, we explain an extraction method of hierarchical attribute subsequences from HDAG structures for the similarity calculation. Finally, we introduce a method of similarity calculation and the extraction process using it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity Calculation", |
| "sec_num": "3." |
| }, |
| { |
| "text": "There are two processes as the preprocessing for similarity calculation; conversion and extraction of hierarchical attribute subsequences. First we explain the conversion process. To convert sentences into the HDAG structure, we need to analyze them, that is morphological analysis and dependency analysis. In this paper we use JUMAN 2 as the morphological analyzer and KNP 3 as the dependency analyzer. Next we need to extract hierarchical attribute subsequences for the similarity calculation. A hierarchical attribute subsequence is an attribute list with hierarchical structures. The similarity is computed from correspondence of hierarchical attribute subsequences extracted from sentences that we want to compare.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "Here Suzuki et al. (2006) introduced two factors; \u03b2 and \u03b3 . The", |
| "cite_spans": [ |
| { |
| "start": 5, |
| "end": 25, |
| "text": "Suzuki et al. (2006)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "\u03b2 ( 1 0 \u2264 < \u03b2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": ") is the factor for the correspondence. The value of each hierarchical attribute sequence is multiplied by m \u03b2 where m represents the number of attributes in the hierarchical attribute sequence. The \u03bb is the decay factor ( 1 0 \u2264 \u2264 \u03bb ). The system allows not only exactly matching structures but also similar structures by using this factor. The actual decay value of a skipping node v i is 1 ) (", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "+ = \u039b n i v \u03bb", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "where n is the number of nodes in a graph if vertical link exists, or \u03bb = \u039b ) (v otherwise. Figure 2 shows an example of hierarchical attribute subsequences and the factors 4 . In the figure, a dependency relation and a hierarchical relation are expressed by using a comma and a nested structure, respectively. For the two HDAGs, the hierarchical attribute subsequence <Bunsetsu <Noun, Particle>, Bunsetsu <Adj, Aux>> appears in both sentences. Since the number of attributes in the hierarchical attribute subsequence is 6, the value of \u03b2 is 3 6 \u03b2 \u03b2 = .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 92, |
| "end": 100, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "The hierarchical attribute subsequence of the 2nd sentence in the figure is generated by skipping a node, Adverb. Therefore, the weight contains ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "Next, we compute a similarity between two HDAG structures. First, we search the common hierarchical attribute subsequences between HDAGs (See Figure 2) . Then we multiply the weight values of them. For example, the correspondence of them in Figure 2 is 6 2 \u03b2 \u03bb . Finally, we divide the sum total of correspondence values by the product of the numbers of bunsetsus of the two sentences. We handle this value as the similarity between them.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 142, |
| "end": 151, |
| "text": "Figure 2)", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 241, |
| "end": 249, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Similarity between Two Sentences", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "Bunsetsu Bunsetsu Here we consider the factor \u03b2 . Suzuki et al. (2006) . If the \u03b2 is more than 1, it computes the similarity focusing on surface expressions (SE). This is due to layers that we constructed. In our layers, the word layer and compound noun layer are lower layer than the structural layer, i.e., the POS tag layer. Therefore surface expressions are treated as important element in the case that 1 > \u03b2 because the elements in deeper layers possess high weight values. We apply these two types of the parameter \u03b2 to our method.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 70, |
| "text": "Suzuki et al. (2006)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity between Two Sentences", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "In this subsection, we explain the sentence extraction process based on the HDAG and the similarity calculation. The process is as follows: 1. prepare sample sentences as seeds for similarity calculation, 2. compute the similarity between each seed and target sentences, 3. extract n-best lists of each seed as sentiment sentence lists, 4. combine n-best lists obtained by two different parameters of \u03b2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Extraction", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "For the combination in the last step, we compare two strategies.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Extraction", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "\u2022 CombAND We extract the intersection of each n-best list as the output.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Extraction", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "\u2022 CombOR We extract the union of each n-best list as the output. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Extraction", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "The method proposed in the previous section contains a problem; the number of sentences extracted from target sentences. It depends on the number of seed sentences and the number of n of n-best. Preparing many seed sentences is usually high cost for users. If the number of n becomes large, the number of extracted sentences increases. However, it leads to decrease of the accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bootstrap Approach", |
| "sec_num": "4." |
| }, |
| { |
| "text": "To solve this problem, we apply a bootstrap approach to our method. In the bootstrap approach, we use both CombAND and CombOR as the situation demands. We use the CombAND to acquire new seed sentences because it usually generates the high accuracy. On the other hand, we use the CombOR in the final step of the bootstrap process because it usually extracts more sentences than the CombAND. In addition, we change the value of n in each step of the bootstrap approach. In other words, we decrease the value in the bootstrap process. In the bootstrap approach, the method often extracts incorrect sentences as sentiment sentences, and add them into the seed sentence list. The method tends to extract incorrect sentences in the later steps of the bootstrap process. Therefore we set a larger value to the n in the early steps and a smaller value to it in the later steps. The process is as follows: 1. extract sentences with CombAND by using current seed sentences, 2. add the extracted sentences into the seed sentence list as new seeds, 3. decrease the value of n, 4. iterate the process 1-3, m-times, 5. extract sentences with CombOR as the final output. Figure 4 shows the outline of the bootstrap process. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1156, |
| "end": 1164, |
| "text": "Figure 4", |
| "ref_id": "FIGREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Bootstrap Approach", |
| "sec_num": "4." |
| }, |
| { |
| "text": "In this section we evaluated the proposed method with a review document set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "5." |
| }, |
| { |
| "text": "We used review documents of a portable audio player 5 posted in the bulletin board system of kakaku.com 6 . We extracted 1052 Japanese sentences from the review documents. The dataset consists of 610 sentiment sentences and 442 non-sentiment sentences. For the experiment, we prepared 10 sample sentences as seeds for the sentence extraction process. All the seed sentences in this experiment were sentiment sentences. We generated the seed sentences on the basis of some evaluation criteria which were mentioned in the review documents; e.g., \"design of the product\", \"Sound quality\" and so on.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and Criteria", |
| "sec_num": "5.1." |
| }, |
| { |
| "text": "In this experiment, we set 5 . 0 = \u03bb . Also we set 5 . 0 = \u03b2 as the parameter for focusing on structural information and 5 . 1 = \u03b2 as the parameter for focusing on surface expressions. The number of sentences we extracted in this experiment is 5 for each seed sentence, that is 5-best list. In other words, we extracted the top 5 sentences that possessed high similarity as the sentiment sentences that were estimated from each sample sentences. We did not employ any thresholds for the similarity in the extraction process. For the bootstrap approach, we set the number of iterations as 3. The values of n in each step were n=7 in the 1st step, n=5 in the 2nd step and n=3 in the final step, respectively. We used the following three criteria for this evaluation. Table 1 shows the experimental result. In the table, the BOW denotes a similarity calculation method based on the cosine measure and bag-of-words features. This is a baseline in this experiment. The accuracy rates of each non-bootstrap approach in our method outperformed the baseline method based on BOW features. Our methods obtained high accuracies even without combinations, namely CombAND and CombOR. In addition, the method focusing on surface expressions (SE) outperformed the method focusing on structural information (SI) in terms of all criteria. For the combinations, the accuracy of the CombOR was the lowest of the methods although the number of sentiment sentences extracted correctly was the best of them. On the other hand, the accuracy of the CombAND produced the best performance. Although the number of extracted sentences with the CombAND drastically decreased, the output possessed high reliability. By using the bootstrap approach, the number of correct sentences nearly tripled although the accuracy decreased somewhat. This result shows the effectiveness of applying the bootstrap approach to our method. As an additional experiment, we evaluated our method with a fixed n; n=5, that is an experiment without decrease of n in each step. In the additional experiment, the accuracy rate became 0.78. This result shows the importance of changing n in each step of the bootstrap approach. Therefore we need to consider the determination of the appropriate n in each step. One reason of the decrease of the accuracy with the bootstrap approach is that our method occasionally added non-sentiment sentences as seed sentences in each step. One of the solutions to this problem is to apply feedback from users to the bootstrap process, such as relevance feedback. This is one future work for our method. The number of extracted sentences increased by using the bootstrap approach. However, the recall rate was insufficient. It was 0.256 even if we applied the bootstrap approach to the extraction process. One approach to improve the recall rate is use of the extracted sentences for the training data of the sentiment classification task. Wiebe and Riloff (2005) have proposed a method for creating subjective and objective classifiers from unannotated texts. They used some rules for constructing initial training data. Then they used the data for generating a classifier. We think that the outputs from our method also can be used for the training data of a classifier for this sentiment sentence classification task. ", |
| "cite_spans": [ |
| { |
| "start": 2921, |
| "end": 2944, |
| "text": "Wiebe and Riloff (2005)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 765, |
| "end": 772, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset and Criteria", |
| "sec_num": "5.1." |
| }, |
| { |
| "text": "In this paper, we proposed a method of sentiment sentence extraction based on a graph-based approach, called Hierarchical Directed Acyclic Graph (HDAG). Our method can extract sentiment sentences by using several sample sentences. We obtained high accuracy in the experiment. However, the number of extracted sentences was not enough, that is the recall rate was extremely low. To solve this problem, we applied a bootstrap approach to out method. As a result, we acquired more sentiment sentences by adding the extracted sentences as new seeds. By using the bootstrap approach based on the combination of CombAND and CombOR, we obtained large quantities of sentences as compared with the non-bootstrap approach. However, even the recall rate with the bootstrap approach was insufficient. Therefore we need to discuss solutions to the problem. One approach is to apply feedback from users to the method. We need to consider a semi-automatic approach based on human-aid to accomplish high recall and accuracy rates. Some researchers have reported approaches with structured features (Zhang et al. 2006 and Zhou et al. 2007) . We need to compare our method with them. In this experiment, we evaluated our method with fixed parameters. However, they are not always the best parameter values. In previous work, we compared several values of these parameters (Shimada et al. 2008) . By tuning the parameters, we obtained higher accuracy for the SI and SE. However, these values depended on the dataset in the experiment. Therefore we need to consider an automatic determination method of these parameters to achieve higher accuracy for the bootstrap approach. Our future work includes (1) evaluation of our method in a large-scale dataset and other datasets, (2) improvement of the accuracy by adding other layers to the HDAG structure, such as semantic features of words (Ikehara et al. 1997) , and (3) construction of a sentiment sentence maintenance tool based on this approach.", |
| "cite_spans": [ |
| { |
| "start": 1082, |
| "end": 1104, |
| "text": "(Zhang et al. 2006 and", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1105, |
| "end": 1122, |
| "text": "Zhou et al. 2007)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1354, |
| "end": 1375, |
| "text": "(Shimada et al. 2008)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1867, |
| "end": 1888, |
| "text": "(Ikehara et al. 1997)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6." |
| }, |
| { |
| "text": "22nd Pacific Asia Conference on Language, Information and Computation, pages 341-349", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "A bunsetsu is a linguistic unit in Japanese. It usually consists of one content word and its function words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://nlp.kuee.kyoto-u.ac.jp/nl-resource/juman.html 3 http://nlp.kuee.kyoto-u.ac.jp/nl-resource/knp.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In this simplified explanation, the graph structure for an example is expressed without the layers described in the previous section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "SONY Walkman NW-A808 6 http://www.kakaku.com/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Predicting the Semantic Orientation of Adjectives", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Hatzivassiloglou", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "R" |
| ], |
| "last": "Mckeown", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics (ACL) and the 8th Conference of the European Chapter of the Association for Computational Linguistics (EACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "174--181", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hatzivassiloglou, V. and K. R. McKeown. 1997. Predicting the Semantic Orientation of Adjectives. In Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics (ACL) and the 8th Conference of the European Chapter of the Association for Computational Linguistics (EACL), pp.174-181.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Automatic Construction of Polarity-tagged Corpus from HTML Documents", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Kaji", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kitsuregawa", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 21st International Conference on Computational Linguistics (COLING/ACL2006)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaji, N. and M. Kitsuregawa. 2006. Automatic Construction of Polarity-tagged Corpus from HTML Documents. In Proceedings of the 21st International Conference on Computational Linguistics (COLING/ACL2006), pages 452.459.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Building Lexicon for Sentiment Analysis from Massive HTML Documents", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Kaji", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kitsuregawa", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP-CoNLL2007)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaji, N. and M. Kitsuregawa. 2007. Building Lexicon for Sentiment Analysis from Massive HTML Documents. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP-CoNLL2007).", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Opinion Extraction from Weblog Using SVM and Newspaper Article", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Kawaguchi", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Matsui", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ohwada", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "The 20th Annual Conference of the", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kawaguchi, T., T. Matsui and H. Ohwada. 2006. Opinion Extraction from Weblog Using SVM and Newspaper Article (in Japanese). In The 20th Annual Conference of the Japanese Society for Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Opinion Extraction Using a Learningbased Anaphora Resolution Technique", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Kobayashi", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Iida", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Inui", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Matsumoto", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the Second International Joint Conference on Natural Language Processing (IJCNLP-05)", |
| "volume": "", |
| "issue": "", |
| "pages": "175--180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kobayashi, N., R. Iida, K. Inui and Y. Matsumoto. 2005. Opinion Extraction Using a Learning- based Anaphora Resolution Technique. In Proceedings of the Second International Joint Conference on Natural Language Processing (IJCNLP-05), pp. 175-180.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A Boosting Algorithm for Classification of Semi-structured Text", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Matsumoto", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kudo, T. and Y. Matsumoto. 2004. A Boosting Algorithm for Classification of Semi-structured Text. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Classification of Evaluative Sentences Using Sequential Patterns", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Osajima", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Shimada", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Endo", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 11nd Annual Meeting of The Association for Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Osajima, I., K. Shimada and T. Endo. 2005. Classification of Evaluative Sentences Using Sequential Patterns. In Proceedings of the 11nd Annual Meeting of The Association for Natural Language Processing (in Japanese).", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Thumbs up? Sentiment Classification Using Machine Learning Techniques", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Vaithyanathan", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pang, B., L. Lee, and S. Vaithyanathan. 2002. Thumbs up? Sentiment Classification Using Machine Learning Techniques. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 79-86.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A Graph-based Approach for Sentiment Sentence Extraction", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Shimada", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Hashimoto", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Endo", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "First International Workshop on Algorithms for Large-Scale Information Processing in Knowledge Discovery (ALSIP 2008), Working Note", |
| "volume": "", |
| "issue": "", |
| "pages": "42--51", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shimada, K., D. Hashimoto and T. Endo. 2008. A Graph-based Approach for Sentiment Sentence Extraction. First International Workshop on Algorithms for Large-Scale Information Processing in Knowledge Discovery (ALSIP 2008), Working Note, pp. 42-51.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Hierarchical Directed Acyclic Graph Kernel. Systems and Computers in Japan", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Suzuki", |
| "suffix": "" |
| }, |
| { |
| "first": "Yutaka", |
| "middle": [], |
| "last": "Sasaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Eisaku", |
| "middle": [], |
| "last": "Maeda", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "37", |
| "issue": "", |
| "pages": "58--68", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Suzuki, J., Yutaka Sasaki and Eisaku Maeda. 2006. Hierarchical Directed Acyclic Graph Kernel. Systems and Computers in Japan, 37(10), pp. 58-68.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Extracting Semantic Orientations of Words Using Spin Model", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Takamura", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Inui", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Okumura", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL2005)", |
| "volume": "", |
| "issue": "", |
| "pages": "133--140", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Takamura, H., T. Inui and M. Okumura.2005. Extracting Semantic Orientations of Words Using Spin Model. In Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL2005), pp. 133-140.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Extracting Opinion Sentence Adapted to Topic Using Iteration Learning", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Touge", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Ohashi", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Yamamoto", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "IPSJ SIG Notes", |
| "volume": "", |
| "issue": "", |
| "pages": "43--50", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Touge, Y., K. Ohashi and K. Yamamoto.2004. Extracting Opinion Sentence Adapted to Topic Using Iteration Learning (in Japanese). In IPSJ SIG Notes, pp. 43-50.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Thumbs up? or Thumbs down? Semantic Orientation Applied to Unsupervised Classification of Reviews", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "D" |
| ], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "417--424", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Turney, P. D. 2002. Thumbs up? or Thumbs down? Semantic Orientation Applied to Unsupervised Classification of Reviews. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 417-424.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Creating Subjective and Objective Sentence Classifiers from Unannotated Texts", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Riloff", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Sixth International Conference on Intelligent Text Processing and Computational Linguistics (CICLing-2005)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wiebe, J. and E. Riloff. 2005. Creating Subjective and Objective Sentence Classifiers from Unannotated Texts. In Sixth International Conference on Intelligent Text Processing and Computational Linguistics (CICLing-2005).", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Towards Answering Opinion Questions: Separating Facts from Opinions and Identifying the Polarity of Opinion Sentences", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Hatzivassiloglou", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "129--136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu, H. and V. Hatzivassiloglou. 2003. Towards Answering Opinion Questions: Separating Facts from Opinions and Identifying the Polarity of Opinion Sentences. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP), pp.129-136.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "A Composite Kernel to Extract Relations between Entities with Both Flat and Structured Features", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "825--832", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, M., J. Zhang, J. Su, G. Zhou. 2006. A Composite Kernel to Extract Relations between Entities with Both Flat and Structured Features. In Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the Association for Computational Linguistics, pp. 825-832.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Tree Kernel-Based Relation Extraction with Context-Sensitive Structured Parse Tree Information", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "728--736", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhou, G., M. Zhang, D. Ji and Q. Zhu. 2007. Tree Kernel-Based Relation Extraction with Context-Sensitive Structured Parse Tree Information. In Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL 2007), pp. 728-736.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "An example of an HDAG expression.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF1": { |
| "text": "These weights are used in the similarity calculation process.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF2": { |
| "text": "An example of corresponding hierarchical attribute subsequences and the weight.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF4": { |
| "text": "The outline of the sentence extraction process.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF5": { |
| "text": "The outline of the bootstrap approach process.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF6": { |
| "text": "Sent real : This criterion is the number of sentiment sentences extracted correctly from target sentences. \u2022 Sent non : This criterion is the number of non-sentiment sentences extracted from target sentences. \u2022 Acc: This criterion is the accuracy computed from Sent real and Sent non . Recall: This criterion is the recall rate computed from Sent real and the number of sentiment sentences in the dataset.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td/><td>Sent real</td><td>Sent non</td><td>Acc</td><td>Recall</td></tr><tr><td>Structural information</td><td>32</td><td>4</td><td>0.889</td><td>0.052</td></tr><tr><td>Surface expression</td><td>43</td><td>4</td><td>0.915</td><td>0.070</td></tr><tr><td>CombAND</td><td>22</td><td>1</td><td>0.957</td><td>0.002</td></tr><tr><td>CombOR</td><td>53</td><td>7</td><td>0.883</td><td>0.087</td></tr><tr><td>Bootstrap</td><td>156</td><td>30</td><td>0.839</td><td>0.256</td></tr><tr><td>BOW (Baseline)</td><td>42</td><td>7</td><td>0.857</td><td>0.069</td></tr></table>", |
| "html": null, |
| "text": "The experimental result.", |
| "num": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |