| { |
| "paper_id": "O10-3003", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:06:38.330741Z" |
| }, |
| "title": "Tourism-Related Opinion Detection and Tourist-Attraction Target Identification", |
| "authors": [ |
| { |
| "first": "Chuan-Jie", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan Ocean University", |
| "location": { |
| "addrLine": "No 2, Pei-Ning Road", |
| "postCode": "20224", |
| "settlement": "Keelung", |
| "country": "Taiwan" |
| } |
| }, |
| "email": "cjlin@ntou.edu.tw" |
| }, |
| { |
| "first": "Pin-Hsien", |
| "middle": [], |
| "last": "Chao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan Ocean University", |
| "location": { |
| "addrLine": "No 2, Pei-Ning Road", |
| "postCode": "20224", |
| "settlement": "Keelung", |
| "country": "Taiwan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hsien", |
| "middle": [], |
| "last": "Chao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Taiwan Ocean University", |
| "location": { |
| "addrLine": "No 2, Pei-Ning Road", |
| "postCode": "20224", |
| "settlement": "Keelung", |
| "country": "Taiwan" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper focuses on tourism-related opinion mining, including tourism-related opinion detection and tourist-attraction target identification. The experimental data are blog articles labeled as being in the domestic tourism category in a blogspace. Annotators were asked to annotate the opinion polarity and the opinion target for every sentence. Different strategies and features have been proposed to identify opinion targets, including tourist attraction keywords, coreferential expressions, tourism-related opinion words, and a 2-level classifier. We used machine learning methods to train classifiers for tourism-related opinion mining. A retraining mechanism is proposed to obtain the system decisions of preceding sentences. The precision and recall scores of tourism-related opinion detection were 55.98% and 59.30%, respectively, and the scores of tourist attraction target identification among known tourism-related opinionated sentences were 90.06% and 89.91%, respectively. The overall precision and recall scores were 51.30% and 54.21%, respectively.", |
| "pdf_parse": { |
| "paper_id": "O10-3003", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper focuses on tourism-related opinion mining, including tourism-related opinion detection and tourist-attraction target identification. The experimental data are blog articles labeled as being in the domestic tourism category in a blogspace. Annotators were asked to annotate the opinion polarity and the opinion target for every sentence. Different strategies and features have been proposed to identify opinion targets, including tourist attraction keywords, coreferential expressions, tourism-related opinion words, and a 2-level classifier. We used machine learning methods to train classifiers for tourism-related opinion mining. A retraining mechanism is proposed to obtain the system decisions of preceding sentences. The precision and recall scores of tourism-related opinion detection were 55.98% and 59.30%, respectively, and the scores of tourist attraction target identification among known tourism-related opinionated sentences were 90.06% and 89.91%, respectively. The overall precision and recall scores were 51.30% and 54.21%, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The blogspace is a large resource for opinion mining. Opinion extraction methods are valuable for a wide range of applications.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Our initial interest is to extract opinions related to tourist attractions from blog articles because it is helpful to see other people's opinions about tourist attractions when planning a tour. Nevertheless, two issues arise when trying to apply published methods to retrieve opinions of tourist attractions: conventional definition, an opinion target in a tourism-related opinion is not always the name of the tourist attraction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Therefore, we define tourism-related opinion mining as a new topic and propose several approaches to solve the problem, including rule-based approaches and machine learning approaches. Although the experimental data used in this paper are written in Chinese, many of the rules and features are not language-dependent or can be easily adopted if necessary resources are available. We also hope that the experience gained from these experiments can be applied to other domains where articles are often multi-topic, such as baseball game critics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "The structure of this paper is as follows. Section 2 presents the main ideas of tourism-related opinion identification and introduces the resources prepared for the work. Section 3 describes the design of a rule-based opinion identification system. Section 4 defines the features for training classifiers to build an opinion identification system. Section 5 discusses the experimental results, and Section 6 concludes this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Opinionated sentences related to tourist attractions are the main interest of this paper. We call such an opinionated sentence a tourism-related opinion (hereafter \"TR-opinion\") and its targeted tourist attraction a tourist attraction target (hereafter \"TA-target\").", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The main goal of this paper is to retrieve TR-opinions and determine their TA-targets. That is, given an opinionated sentence, determine whether it is tourism-related or not, and decide which tourist attraction is the focus of this opinion. Our experiments were performed based on two assumptions: (1) sentences have been correctly tagged as 'opinionated' or not;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "(2) tourist attraction names appearing in a document have been correctly recognized. Hence, we have not integrated an opinion detection module and a tourist-attraction recognition module into our system yet.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Opinion identification is not the main focus of this paper. There has been a lot of research on this topic. In the future, we would like to perform well-developed methods to do opinion detection in order to build a full system. In this paper, though, the input sentences are those sentences correctly labeled as opinions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Tourist attraction name recognition also is not a focus of this paper. It requires a named entity recognition system specifically designed for tourist attraction names, but we cannot find one. Although some of the tourist attractions are locations or organizations, such as parks or museums, there are various types of names, such as monuments or scenic spots that would need to be learned. In this paper, we simply prepare a list of tourist attraction names and manually check the correctness of the occurrences of the attraction names in the articles.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Tourist attraction name recognition will be studied in the future.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The main ideas in accomplishing the tasks are:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "(1) Some opinion words strongly hint that a sentence is tourism-related.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "(2) The frequency of use of a tourist attraction and its distance to an opinionated sentence can be useful information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "(3) A tourist attraction can be expressed in several ways in an article. This is the well-known coreference problem.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "(4) A sentence may target a tourist attraction if its preceding sentence also focuses on a tourist attraction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Before designing rules or features according to these ideas, some resources were prepared beforehand, as described in the following subsections.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Definition", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The best known benchmarks for opinion mining are the NTCIR MOAT datasets (Seki et al., 2010) . There was one pilot task in NTCIR-6 and were two formal tasks in NTCIR-7 and NTCIR-8. There are a total of 70 topics in Traditional Chinese. Nevertheless, none of their information need is about tourism attraction opinions. Although some topics may bring in tourism-related documents, such as the terrorist bombing on Bali Island and the tsunami in Sumatra, the number of topics is too small, and we still have to find TR-opinions among the opinionated sentences. For these reasons, we decided to build a new experimental dataset in the tourism domain.", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 92, |
| "text": "(Seki et al., 2010)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Dataset Preparation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "200 travel articles were collected from a blog site called Wretch 1 (\u7121\u540d\u5c0f\u7ad9). These articles were categorized as \"domestic travel\" on the blog site. We chose the most recommended articles by the readers in order to assure that the articles were truly about travel.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Dataset Preparation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Three annotators were asked to annotate the data. Each sentence was labeled as opinionated or not, its opinion polarity was assigned, and its TA-target was found if the annotator considered it a TR-opinion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Dataset Preparation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The guidelines of TA-target decision for the annotators are as follows. Given a document, a list of tourist attractions mentioned in the document is shown to the annotators. A TA-target must be one of the tourist attractions on the list. If an opinion is made on a part of a tourist attraction (e.g. the souvenir shop in an amusement park), its TA-target is set to be the tourist attraction. If an opinionated sentence mentions a tourist attraction together with the city it belongs to, its TA-target is set to be the tourist attraction only. A city can be chosen as a TA-target only when the blogger directly expresses his or her feeling about the city. Note that, if a sentence only expresses the blogger's emotion (e.g. \"I am so happy today\"), it is not a TR-opinion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Dataset Preparation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The final annotations of the experimental dataset were determined by two-stage voting. The first stage determined a sentence being positive-, neutral-, negative-, or non-opinionated. The second stage determined the sentence being a TR-opinion or not by deciding its TA-target. In each stage, an option agreed upon by at least two annotators became the final annotation. If no agreement was found, the authors of this paper would choose one of the decisions made by the annotators. Those sentences voted as \"non-opinionated\" in the first stage were automatically labeled as \"not TR-opinion\" in the second stage.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Dataset Preparation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Opinion and Polarity TR-opinion TA-target Table 1 lists the agreement of TR-opinion and TA-target measured by Cohen's kappa coefficient. The first three rows show the agreement among the annotators. The last three rows give the agreement between the final experimental dataset and each annotator. We can see that the agreement level is not high enough. This means TR-opinion detection and TA-target identification are very challenging.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 42, |
| "end": 49, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison", |
| "sec_num": null |
| }, |
| { |
| "text": "Among the 200 articles, 37 of them did not contain a tourist attraction and 7 did not contain a TR-opinion. After removing these articles, there were a total of 10,904 sentences in the remaining 156 articles, with 3,542 opinionated sentences and 1,199 TR-opinions, which leads to a precision rate of 33.9% (1199/3542) if a baseline system guesses all of the opinions as TR-opinions. Table 2 lists the statistical data regarding the number of tourist attractions mentioned in the articles. As we can see, 28 articles contained only one tourist attraction, which means that almost 89% of the articles mentioned multiple tourist attractions, making TA-target detection an issue. There were on average 6.378 tourist attractions mentioned in each article. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 383, |
| "end": 390, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison", |
| "sec_num": null |
| }, |
| { |
| "text": "Some opinion words are more related to tourist attractions than others. Consider the following two examples:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tourism-Related Opinion Words", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "I am so excited that the vacation is coming. The lake is so large and clear.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tourism-Related Opinion Words", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "The adjective \"excited\" is often used when describing personal feelings. On the other hand, \"clear\" is often seen in sentences describing scenic spots. We can say that opinion words are often domain-dependent.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tourism-Related Opinion Words", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Many papers have focused on finding domain-specific opinion words and deciding their polarities, as mentioned in Section 1. This, however, is slightly different from our need. \"Domain\" in their works often refers to \"a product type,\" such as digital cameras. Opinion words related to digital cameras are the adjectives used to express the features of digital cameras, such as \"long\" for battery life and \"heavy\" for weight.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tourism-Related Opinion Words", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Nevertheless, the question remains as to whether there are common features or attributes among tourist attractions. The feature water or clearness only relates to bodies of water, such as rivers and lakes, while the feature design only relates to buildings. Moreover, there are many adjectives expressing opinions directly without denoting any specific features, such as amazing and beautiful (e.g. \"this city is beautiful\"). Therefore, we want to collect a set of opinion words which are often used in tourism-related opinionated sentences without considering features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tourism-Related Opinion Words", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We define a simple function TRscore(ow), the tourism-relatedness score, to estimate the likelihood of an opinion word ow appearing in a TR-opinion by evaluating the ratio of the opinionated sentences where the word ow appears to be tourism-related:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tourism-Related Opinion Words", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": ") opinion in ( # ) opinion - TR in ( # ) ( ow ow ow TRscore =", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Tourism-Related Opinion Words", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Opinion words whose TR-scores are higher than a predetermined threshold are collected as the tourism-related opinion words (hereafter \"TR-opword\"). The determination of the value of the threshold of TR-scores is discussed in Section 5.1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tourism-Related Opinion Words", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Coreference is an important problem in natural language processing. When a tourist attraction is mentioned in an article, it is quite often expressed in several different ways. Consider the following three sentences selected and adapted from our experimental dataset: We were impressed by the fresh air when we arrived at the resort. Wufeng also thoughtfully provides parking service.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expressions", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "All three underlined expressions refer to the same tourist attraction \"the Wufeng Resort,\" where \"resort\" is its category, \"Wufeng\" its name, and \"the Wufeng Resort\" its full name.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expressions", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "It is quite common to refer a tourist attraction by the category keyword in its name. For this reason, we created a list of tourist attraction keywords (hereafter TA-keywords), which are tourist attraction categories. Note that there are several synonymous keywords in the same category. The method of collecting TA-keywords is as follows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expressions", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "First, a tourism website called Travel King 2 (\uf983\u904a\u8cc7\u8a0a\u738b) was visited and 1,836 tourist attraction names located in Taiwan were collected. All of the names were written in Chinese without word segmentation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expressions", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "For every pair of tourist attraction names, their longest common trailing substring was extracted. The substrings containing only one Chinese character were discarded. After having humans check their correctness, 158 TA-keywords were collected, such as \u570b \u5bb6 \u516c \u5712 (national park) and \u6eab\u6cc9 (hot spring).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expressions", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "We do not resolve the coreference problem directly. Instead, we try to find potential coreferential expressions. The frequency or distance feature of a tourist attraction is measured by the occurrences of all kinds of coreferential expressions of this tourist attraction. The first type of coreference is expressed by the longest TA-keyword found in a tourist attraction's name.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expressions", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "The list of the TA-keywords may not be complete enough. Some types of names are not in the list. In order to make the system more robust, we also take the trailing substring (the last two characters) of a full name as one of its possible coreferential expressions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expressions", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Similarly, although we can extract the name part of a tourist attraction by deleting the keyword part from its full name, we simply take its leading substring (the first two characters) as one of its possible coreferential expressions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expressions", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "The function ref all (a) is defined to denote all possible coreferential expressions of a tourist attraction a. For example, ref all (\u4e94\u5cf0\u6e21\u5047\u6751) = {\u4e94\u5cf0\u6e21\u5047\u6751, \u6e21\u5047\u6751, \u4e94\u5cf0, \u5047\u6751}, i.e. for the tourist attraction \u4e94\u5cf0\u6e21\u5047\u6751, its possible coreferential expressions include its full name \"\u4e94\u5cf0\u6e21\u5047\u6751\" (the Wufeng Resort), its TA-keyword \"\u6e21\u5047\u6751\" (Resort), its leading substring \"\u4e94\u5cf0\" (Wufeng), and its trailing substring \"\u5047\u6751\". An example of coreferential expression detection is given here: In this paragraph, a full name \"the Wufeng Resort\" (the bordered text) appears in the first and the last lines, while its TA-keyword \"resort\" (the first underlined text) is found in the second line and its leading substring \"Wufeng\" (the second underlined text) in the third line.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expressions", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "The strategy for finding occurrences of tourist attractions in a sentence is longestexpression-first. In other words, given a set of tourist attractions {A 1 , A 2 , \u2026, A m }, we will find the attraction A i whose coreferential expression appearing in this sentence is the longest.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expressions", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "This strategy has its limitations. If a tourist attraction does not reveal its category in its name, it would be difficult to know its category, such as the Louvre as a museum. Another limitation is to know the hierarchy of the tourist attractions. For example, some people will refer to the Wufeng Resort as a hotel or a park. How to detect a tourist attraction and identify its category will be our future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expressions", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "To describe our approaches more clearly, Table 3 lists the definitions of notations and functions used in this paper to define opinion-mining rules and features.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 41, |
| "end": 48, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": "The set of opinionated sentences S op and the set of tourist attractions TA appearing in a document D are given in advance. Our goal is to predict a set of TR-opinions S to as similar to the correct set S # to as possible, and determine each TR-opinion's TA-target. Note that we have n sentences and m tourist attractions in a document D, and S # to \u2286 S op \u2286 S. Our rule-based approaches for TR-opinion mining include the following decisions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": "(1) Select a set of TR-opinion candidates S c . We can consider only a subset of the opinionated sentences S op as potential TR-opinions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": "(2) Select a set of TA-target candidates TA c . We can take only a subset of tourist attractions TA as TA-target candidates. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Nop \u2212 (S i ) ) ( max , k op k S S i k \u2208 <", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": ", the ID of the nearest opinion which precedes S i ; -1 if no preceding opinionated sentences", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Nop + (S i ) ) ( min , k op k S S k i \u2208 <", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": ", the ID of the nearest opinion which follows S i ; \u221e if no following opinionated sentences We can consider only some types of expressions as coreferences to the tourist attraction a.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Sid \u2212 (a, S i ) ) 1 , 1 , ( max ) ( \u2212 \u2208 i x lst a ref x c ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": "(4) Determine if a sentence s in S c is a TR-opinion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": "(5) Determine which tourist attraction a in TA c is the TA-target of a TR-opinion s.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Two TR-opinion mining rules, Rnt1 and Rnt2, are proposed to guess a sentence S i in S c being a TR-opinion and its TA-target. Their definitions are explained here as illustrated in Table 4 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 181, |
| "end": 188, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Rule-Based Approaches", |
| "sec_num": "3." |
| }, |
| { |
| "text": "If there is a TA-target candidate appearing inside or before S i , it is predicted as a TR-opinion and its TA-target is the nearest tourist attraction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Nearest Preceding Tourist Attraction Rule (Rnt1):", |
| "sec_num": null |
| }, |
| { |
| "text": "Nearest in-Window Tourist Attraction Rule (Rnt2): Set the window size as b sentences. If there is a TA-target candidate appearing inside, before, or after Si in the same window, it is predicted as a TR-opinion and its TA-target is the nearest tourist attraction. Table 4 and described more clearly in the following paragraphs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 263, |
| "end": 270, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Nearest Preceding Tourist Attraction Rule (Rnt1):", |
| "sec_num": null |
| }, |
| { |
| "text": "(x, 1, i) \u2265 1 ) , 1 , ( max arg ) ( , i x lst a ref x TA a c c \u2208 \u2208", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Nearest Preceding Tourist Attraction Rule (Rnt1):", |
| "sec_num": null |
| }, |
| { |
| "text": "The baseline systems use the simplest way to make the first three decisions: In order to filter non-tourism-related sentences, such as bloggers' sentiments, an opinionated sentence is considered as a TR-opinion candidate only if it contains a TR-opword. The selection of S c is given in the second row of Table 5 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 305, |
| "end": 312, |
| "text": "Table 5", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": null |
| }, |
| { |
| "text": "The most frequent tourist attraction appearing in a document D may be the focus of D. Many TR-opinions will target this tourist attraction. So, we only choose the most frequent tourist attractions in an article as the TA-target candidates, i.e. TA c =A maxf .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Most Frequent Tourist Attraction Rule (Rmf)", |
| "sec_num": null |
| }, |
| { |
| "text": "All ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreferential Expression Rule (Rcr)", |
| "sec_num": null |
| }, |
| { |
| "text": "Approaches to build a TR-opinion analysis system by machine learning are described in this section. Such a system takes a whole article (including opinions and non-opinions) as its input and returns a set of TR-opinions together with their TA-targets. Features can be divided into two sets, which are defined in Section 4.1 and Section 4.2. The options of the system's architecture and training techniques are discussed in Section 4.3 and Section 4.4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine Learning Approach", |
| "sec_num": "4." |
| }, |
| { |
| "text": "The first set of features is used to detect TR-opinions, i.e. to determine whether an opinionated sentence S i is tourism-related. Therefore, these features are designed for an opinionated sentence S i . These features are quickly demonstrated in Table 6 and described more clearly in the following paragraphs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 247, |
| "end": 254, |
| "text": "Table 6", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Features for TR-Opinion Detection", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The first sentence in an article often states the overall opinion of the author. It is interesting to see if the first sentence is tourism-related. The feature ffs finds the first sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "First Sentence Feature (ffs)", |
| "sec_num": null |
| }, |
| { |
| "text": "If S i contains a TR-opword, it is likely to be a TR-opinion. Based on this idea, two kinds of features are defined: fow all checks if S i contains a TR-opword and fow k checks if S i contains a specific TR-opword ow k . ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TR-Opword Features (fow all and fow k )", |
| "sec_num": null |
| }, |
| { |
| "text": "fta d-/ ftac d-1 \u2212 (i\u2212Nid \u2212 (S i ))/n fta d+ / ftac d+ 1 \u2212 (Nid + (S i )\u2212i)/n fop -1 1 if Nop \u2212 (S i ) = i\u22121; 0 otherwise fop +1 1 if Nop + (S i ) = i+1; 0 otherwise fop d- 1 \u2212 (i\u2212Nop \u2212 (S i ))/n fop d+ 1 \u2212 (Nop + (S i )\u2212i)/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TR-Opword Features (fow all and fow k )", |
| "sec_num": null |
| }, |
| { |
| "text": "If an opinionated sentence is close to a TR-opinion, it is likely to be tourist-related, as well. Two features are introduced here:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TR-Opinion Context Feature (fto)", |
| "sec_num": null |
| }, |
| { |
| "text": "fto -1 : the sentence preceding S i is a TR-opinion fto d-: the distance score of the nearest TR-opinion preceding S i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TR-Opinion Context Feature (fto)", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that we do not know the values of these two features for a new article (nor should we when testing on the test set). In such a case, both feature values of the first sentence are set to be 0 because there is no preceding sentence. The predicted result of a sentence will be used to determine the two feature values of its following sentence. More ideas about these features are discussed in Section 4.4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TR-Opinion Context Feature (fto)", |
| "sec_num": null |
| }, |
| { |
| "text": "The second set of features is used to identify TA-targets, i.e. to determine whether a tourist attraction A j is the TA-target of an opinionated sentence S i . Therefore, these features are designed for a pair of <S i , A j > given an opinionated sentence S i and a tourist attraction A j . These features are quickly demonstrated in Table 7 and described more clearly in the following paragraphs. The candidates of TA-targets are the set of tourist attractions appearing in the article.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 334, |
| "end": 341, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Features for TR-Target Identification", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Similar to the idea of the Most-Frequent-Tourist-Attraction Rule, the occurrence of a tourist attraction is taken into account. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Frequency Feature (ffq)", |
| "sec_num": null |
| }, |
| { |
| "text": "fna d-/ fnac d-1 \u2212 (i\u2212Sid \u2212 (A j , S i ))/n fna d+ / fnac d+ 1 \u2212 (Sid + (A j , S i )\u2212i)/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Table 7. Definition of TR-Opinion Detection Features", |
| "sec_num": null |
| }, |
| { |
| "text": "The TR-Opinion Context Feature (fto) is very useful but also dangerous. We conducted an oracle model where the values of the TR-Opinion Context Feature of the test data were set correctly (denoted as fto # ), and found that the performance was the best (as depicted later in Table 10 ). Nevertheless, if the feature values came from the predictions of the classifier, the errors would propagate and harm the performance greatly (also depicted in Table 10 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 275, |
| "end": 283, |
| "text": "Table 10", |
| "ref_id": "TABREF13" |
| }, |
| { |
| "start": 446, |
| "end": 454, |
| "text": "Table 10", |
| "ref_id": "TABREF13" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Retraining by Prediction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We propose a retraining method to use the TR-Opinion Context Feature. Training is performed in three steps. First, set the values of the TR-Opinion Context Feature of the training data correctly to train a preliminary classifier. Use this preliminary classifier to predict the TR-opinions in the training set. Then, use the predictions to assign the values of the TR-Opinion Context Feature of the training data to train a classifier. The second classifier is used to construct the real TA-target identification system. The values of the TR-Opinion Context Feature predicted by the second classifier are denoted as fto 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retraining by Prediction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Our TA-target identification system is constructed as follows: each sentence in an article is paired with each of the tourist attractions appearing in the article and labeled by a classifier. If none of the pairs is classified as positive, this sentence is not a TR-opinion. Otherwise, the sentence is predicted as a TR-opinion and all the tourist attractions in the pairs receiving positive predictions are its TR-targets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Single-Layer and Dual-Layer Models", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The process of TA-target identification can be divided into two steps: detecting TR-opinions and assigning TR-targets to them. Hence, we can train two classifiers for the two steps separately, or train a single classifier to identify the TA-targets directly. Two different Tourism-Related Opinion Detection and Tourist-Attraction Target Identification 51 models are designed, given that the input is a pair of an opinionated sentence S i and a tourist attraction A j .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Single-Layer and Dual-Layer Models", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The classifier directly determines whether the tourist attraction A j is the TR-target of the sentence S i . All of the features introduced in Section 4.1 and 4.2 are used for training even if a feature only relates to the sentence S i only.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Single-Layer Model", |
| "sec_num": null |
| }, |
| { |
| "text": "The classification module consists of two classifiers. The first-layer classifier determines whether S i is a TR-opinion. Only features introduced in Section 4.1 are used to train the first-layer classifier. If S i is classified as a TR-opinion, the pair <S i , A j > is passed to the second-layer classifier. The second-layer classifier determines whether A j is the TR-target of S i . Only features introduced in Section 4.2 are used to train the second-layer classifier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dual-Layer Model", |
| "sec_num": null |
| }, |
| { |
| "text": "The experiments shown in this section were all conducted in a leave-one-out cross-validation fashion where each of the 156 articles in the experimental data set was kept out as the test data and the others as the training data in turn.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5." |
| }, |
| { |
| "text": "The number of the positive examples is relatively small compared to the negative examples. We did not evaluate the system by accuracy because the majority prefers guessing all sentences as \"not TR-opinion\". Additionally, in order to create a balanced training set, we randomly selected negative examples in the same amount of the positive examples in each training set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5." |
| }, |
| { |
| "text": "Both TR-opinion detection and TA-target identification are evaluated by the micro-average precision (P), recall (R), and F-measure (F), where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5." |
| }, |
| { |
| "text": "R P R P F + \u00d7 \u00d7 = 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5." |
| }, |
| { |
| "text": "For TR-opinion detection, ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5." |
| }, |
| { |
| "text": "As introduced in Section 2.3, we want to find opinion words highly related to tourism. A preliminary experiment was conducted to determine the threshold of TR-scores to select TR-opwords. The candidates of TR-opwords were the opinion words collected in NTUSD, the National Taiwan University Sentiment Dictionary (Ku & Chen, 2007) .", |
| "cite_spans": [ |
| { |
| "start": 312, |
| "end": 329, |
| "text": "(Ku & Chen, 2007)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tourism-Related Opinion Word Selection", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The threshold of the TR-scores was determined by the baseline experiment of TR-opinion detection. Set the threshold values varying from 0 to 1 with a step of 0.01 and selected those opinion words whose TR-scores were higher than the threshold to predict TR-opinions by the TR-Opword Rule only. Table 8 shows the results of TR-opinion detection under different threshold settings. The threshold value achieving the best performance was 0.25 and 0.26, but not significantly the best if compared to a nearby setting. We chose 0.25 as the threshold in the following experiments. Note that the sets of TR-opwords were not the same in different iterations of cross-validation because the training sets were different. The second column of Table 8 depicts the average number of TR-opwords selected in each iteration. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 294, |
| "end": 301, |
| "text": "Table 8", |
| "ref_id": "TABREF11" |
| }, |
| { |
| "start": 733, |
| "end": 740, |
| "text": "Table 8", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tourism-Related Opinion Word Selection", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We used the LIBSVM tool (Fan et al., 2005) to train the classifiers. We chose SVM because some features' domains were sets of real numbers, not strings.", |
| "cite_spans": [ |
| { |
| "start": 24, |
| "end": 42, |
| "text": "(Fan et al., 2005)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments of Machine Learning Approaches", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The dual-layer model first detects the TR-opinions then identifies the TA-targets. We evaluated the first-layer (for TR-opinion detection) and second-layer (for TA-target identification) classifiers separately. Table 10 presents the selected results of TR-opinion detection by different combinations of features where fxxdenotes all fxx features regarding objects preceding the sentence (i.e. fxx -1 and fxx d-), and fxx 0-denotes the feature combination of fxxand fxx 0 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 211, |
| "end": 219, |
| "text": "Table 10", |
| "ref_id": "TABREF13" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments of Machine Learning Approaches", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The results in Table 10 are represented in groups. The experiments in the first group only used the Tourist Attraction Distance Features (fta). The feature combinations in the second group were suggested by a feature selection method, WLLR, which will be introduced later. The experiments in the third and the fourth groups tried more feature combinations but used the TR-opinion Context Features in different ways. The fourth group used the TR-opinion Context Feature after Retraining (fto 2 ). The fourth group used correct values for the TR-opinion Context Features (fto # , as oracle model) and prediction by the previously trained model without retraining (fto).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 15, |
| "end": 23, |
| "text": "Table 10", |
| "ref_id": "TABREF13" |
| } |
| ], |
| "eq_spans": [], |
| "section": "TR-Opinion Detection Experiments", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "The fifth one has the best performance achieved by the rule-based model and the final group lists the performances of human annotators which can be regarded as upper bounds.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TR-Opinion Detection Experiments", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "The second and the third groups of results show that the TR-opinion Context Feature after Retraining (fto 2 ) is useful, for the best performances were achieved by those feature combinations containing fto 2 . Compared with the fourth group, the oracle model (containing Tourism-Related Opinion Detection and Tourist-Attraction Target Identification 55 fto # ) outperforms other combinations, which concludes that fto # is a great feature but, unfortunately, is unattainable. On the other hand, using the prediction by the classifier without retraining (fto) harmed the performance. We can say that the retraining process did improve the performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TR-Opinion Detection Experiments", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "The first group also suggests that the Preceding Tourist Attraction Distance Features with or without Coreferential Expressions (ftaand ftac -) are useful.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TR-Opinion Detection Experiments", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "To see the usefulness of features, we used an adapted version of WLLR (Weighted Log Likelihood Ratio) (Nigam et al., 2000) to measure the usefulness of the features. The adapted equation of WLLR in our work is: where f(x) is a feature function which defines a numerical feature value for a given example x, avg(v) means the average over a numerical set v, P and N are the sets of positive examples and negative examples in the training set, respectively. The adaptation is made to make it applicable for both Boolean features (treated as 0 and 1) and numerical features. Table 11 lists the WLLR and averages (over positive and negative examples) of the features. As we can see, the best features according to WLLR are the TR-Opinion Context Features (fto), the Tourist Attraction Distance Features (fta and ftac, with or without coreferential expressions), and the All-TR-Opword Feature (fow all ). The experiments inspired by feature selection are listed in the second group. The results in Table 10 support the predictions by WLLR as the feature combination fow all +ftac 0-+fto 2 performs very well.", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 122, |
| "text": "(Nigam et al., 2000)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 571, |
| "end": 579, |
| "text": "Table 11", |
| "ref_id": "TABREF14" |
| }, |
| { |
| "start": 992, |
| "end": 1000, |
| "text": "Table 10", |
| "ref_id": "TABREF13" |
| } |
| ], |
| "eq_spans": [], |
| "section": "TR-Opinion Detection Experiments", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": ")) ( ( )) ( ( log )) ( ( ) ( x f avg x f avg x f avg f WLLR N x P x P x \u2208 \u2208 \u2208 = (6)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TR-Opinion Detection Experiments", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "The best performance, however, where an F-measure score of 57.59% is achieved, is by the feature combination using all kinds of features. It outperforms the combination by feature selection significantly (p<0.001). Table 12 lists the experimental results of TA-target identification by different approaches. The second row gives the performance of the second-layer classifier where the first-layer was replaced by a perfect model, i.e. only known TR-opinions were assigned TA-targets. The precision and recall scores were 90.06% and 89.91%, respectively, and the F-measure score was around 90%. This means that the bottleneck of this work is TR-opinion detection. The third row shows the performance of the overall dual-layer system consisting of the best models of the two layers, which F-measure is 52.72% and is the best among all TA-target identification models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 215, |
| "end": 223, |
| "text": "Table 12", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "TR-Opinion Detection Experiments", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "The models of the fourth and the fifth rows are single-layer classifiers. Even when the correct values of TR-Opinion Context Features (fto # ) are used, they still cannot compete with the dual-layer model. This shows that dual-layer classification is a better approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TA-Target Identification Experiments", |
| "sec_num": "5.3.2" |
| }, |
| { |
| "text": "The sixth row of Table 12 gives the performance of TA-target identification by rules. Although the best rule-based approach performs well in TR-opinion detection, its ability to identify TA-targets is weaker.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 17, |
| "end": 25, |
| "text": "Table 12", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "TA-Target Identification Experiments", |
| "sec_num": "5.3.2" |
| }, |
| { |
| "text": "The last three rows present the performance of the results of the three annotators. We can see that the best F-measure of a ML-based system is about 60% to 75% of human ability. So, there is still room to improve. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TA-Target Identification Experiments", |
| "sec_num": "5.3.2" |
| }, |
| { |
| "text": "This paper aims at detecting tourism-related opinionated sentences and identifying their tourist attraction targets. Several rules and features were invented and tested in different combinations. The performance is improved by building a dual-layer classification system where the classifiers of TR-opinion detection and TA-target identification are trained separately. Retraining by the prediction method is introduced to decide the values of the TR-Opinion Context Features. This feature, together with the tourism-related opinion words and distances to the tourist attractions were verified to be useful. The best overall performance of TA-target identification is 52.72%, which is about 60% to 75% of human ability.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6." |
| }, |
| { |
| "text": "In the future, we would like to implement known methods to do opinion detection and tourist attraction recognition so we can build a real system and evaluate its performance. More features should be studied for TR-opinion detection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6." |
| }, |
| { |
| "text": "By the location information of the tourist attractions, it is also interesting to make a summary for a city or a country by the opinions about the tourist attractions located in that area. This will be our future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6." |
| }, |
| { |
| "text": "http://www.wretch.cc/blog", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://travel.network.com.tw/tourguide/twnmap/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Emotions from text: machine learning for text-based emotion prediction", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "O" |
| ], |
| "last": "Alm", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Sproat", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceeding of HLT/EMNLP 2005", |
| "volume": "", |
| "issue": "", |
| "pages": "579--586", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alm, C. O., Roth, D., & Sproat, R. (2005). Emotions from text: machine learning for text-based emotion prediction. In Proceeding of HLT/EMNLP 2005, 579-586.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Identifying Expressions of Opinion in Context", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Breck", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceeding of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "2683--2688", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Breck, E., Choi, Y., & Cardie, C. (2007). Identifying Expressions of Opinion in Context. In Proceeding of IJCAI 2007, 2683-2688.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Identifying Sources of Opinions with Conditional Random Fields and Extraction Patterns", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Riloff", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Patwardhan", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceeding of HLT/EMNLP 2005", |
| "volume": "", |
| "issue": "", |
| "pages": "355--362", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Choi, Y., Cardie, C., Riloff, E., & Patwardhan, S. (2005). Identifying Sources of Opinions with Conditional Random Fields and Extraction Patterns. In Proceeding of HLT/EMNLP 2005, 355-362.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Working Set Selection Using the Second Order Information for Training SVM", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [ |
| "E" |
| ], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "H" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "J" |
| ], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "In Journal of Machine Learning Research", |
| "volume": "6", |
| "issue": "", |
| "pages": "1889--1918", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fan, R.E., Chen, P.H., Lin, C.J., & Joachims, T. (2005). Working Set Selection Using the Second Order Information for Training SVM. In Journal of Machine Learning Research, 6, 1889-1918.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Text Mining for Product Attribute Extraction", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Ghani", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Probst", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Krema", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Fano", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "SIGKDD Explorations", |
| "volume": "1", |
| "issue": "", |
| "pages": "41--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ghani, R., Probst, K., Liu, Y., Krema, M., & Fano, A. (2006). Text Mining for Product Attribute Extraction. In SIGKDD Explorations, 1(8), 41-48.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Opinion Mining using Econometrics: A Case Study on Reputation", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ghose", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Ipeirotis", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Sundararajan", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceeding of ACL 2007", |
| "volume": "", |
| "issue": "", |
| "pages": "416--423", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ghose, A., Ipeirotis, P., & Sundararajan, A. (2007). Opinion Mining using Econometrics: A Case Study on Reputation. In Proceeding of ACL 2007, 416-423.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "LargeScale Sentiment Analysis for News and Blogs", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Godbole", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Srinivasaiah", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Skiena", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ICWSM 2007", |
| "volume": "", |
| "issue": "", |
| "pages": "219--222", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Godbole, N., Srinivasaiah, M., & Skiena, S. (2007). LargeScale Sentiment Analysis for News and Blogs. In Proceedings of ICWSM 2007, 219-222.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Mining Opinion Features in Customer Reviews", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceeding of AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "755--760", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hu M. & Liu, B. (2004). Mining Opinion Features in Customer Reviews. In Proceeding of AAAI 2004, 755-760.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Identifying Opinion Holders for Question Answering in Opinion Texts", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "M" |
| ], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of AAAI-05 Workshop on Question Answering in Restricted Domains", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kim, S.M., & Hovy, E. (2005). Identifying Opinion Holders for Question Answering in Opinion Texts. In Proceedings of AAAI-05 Workshop on Question Answering in Restricted Domains, 1-8.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Mining Opinions from the Web: Beyond Relevance Retrieval", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "W" |
| ], |
| "last": "Ku", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "H" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "In Journal of American Society for Information Science and Technology, Special Issue on Mining Web Resources for Enhancing Information Retrieval", |
| "volume": "58", |
| "issue": "12", |
| "pages": "1838--1850", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ku, L.W. & Chen, H.H. (2007). Mining Opinions from the Web: Beyond Relevance Retrieval. In Journal of American Society for Information Science and Technology, Special Issue on Mining Web Resources for Enhancing Information Retrieval, 58(12), 1838-1850.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Major Topic Detection and Its Application to Opinion Summarization", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "W" |
| ], |
| "last": "Ku", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "H" |
| ], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "H" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of SIGIR 2005", |
| "volume": "", |
| "issue": "", |
| "pages": "627--628", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ku, L.W., Lee, L.Y., Wu, T.H., & Chen, H.H. (2005). Major Topic Detection and Its Application to Opinion Summarization. In Proceedings of SIGIR 2005, 627-628.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Text Classification from Labeled and Unlabeled Documents Using EM", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Nigam", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "K" |
| ], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Thrun", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Machine Learning", |
| "volume": "39", |
| "issue": "", |
| "pages": "103--134", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nigam, K., McCallum, A. K., Thrun, S., & Mitchell, T. (2000). Text Classification from Labeled and Unlabeled Documents Using EM. In Machine Learning, 39(2-3), 103-134.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Locally Contextualized Smoothing of Language Models for Sentiment Sentence Retrieval", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Okamoto", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Honda", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Eguchi", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceeding of the 1st international CIKM 2009 Workshop on Topic-Sentiment Analysis for Mass Opinion", |
| "volume": "", |
| "issue": "", |
| "pages": "73--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Okamoto, T., Honda, T., & Eguchi, K. (2009). Locally Contextualized Smoothing of Language Models for Sentiment Sentence Retrieval. In Proceeding of the 1st international CIKM 2009 Workshop on Topic-Sentiment Analysis for Mass Opinion, 73-80.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Thumbs up? Sentiment Classification Using Machine Learning Techniques", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Vaithyanathan", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceeding of EMNLP 2002", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pang, B., Lee, L., & Vaithyanathan, S. (2002). Thumbs up? Sentiment Classification Using Machine Learning Techniques. In Proceeding of EMNLP 2002, 79-86.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Overview of Multilingual Opinion Analysis Task at NTCIR-8: A Step Toward Cross Lingual Opinion Analysis", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Seki", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "W" |
| ], |
| "last": "Ku", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "H" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Kando", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of NTCIR-8", |
| "volume": "", |
| "issue": "", |
| "pages": "209--220", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Seki, Y., Ku, L.W., Sun, L., Chen, H.H., & Kando, N. (2010). Overview of Multilingual Opinion Analysis Task at NTCIR-8: A Step Toward Cross Lingual Opinion Analysis. In Proceedings of NTCIR-8, 209-220.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Tourism-Related Opinion Detection and Tourist-Attraction Target Identification 59", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tourism-Related Opinion Detection and Tourist-Attraction Target Identification 59", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Identifying Collocations for Recognizing Opinions", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Bell", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceeding of ACL 2001 Workshop on Collocation", |
| "volume": "", |
| "issue": "", |
| "pages": "24--31", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wiebe, J., Wilson, T., & Bell, M. (2001). Identifying Collocations for Recognizing Opinions. In Proceeding of ACL 2001 Workshop on Collocation, 24-31.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Opinion Target Network and Bootstrapping Method for Chinese Opinion Target Extraction", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Hao", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "F" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Lecture Notes in Computer Science", |
| "volume": "5839", |
| "issue": "", |
| "pages": "339--350", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xia, Y., Hao, B., & Wong, K.F. (2009). Opinion Target Network and Bootstrapping Method for Chinese Opinion Target Extraction. In Lecture Notes in Computer Science, 5839, 339-350.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Improve the Effectiveness of the Opinion Retrieval and Opinion Polarity Classification", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Meng", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the CIKM", |
| "volume": "", |
| "issue": "", |
| "pages": "1415--1416", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, W., Jia, L., Yu, C., & Meng, W. (2008). Improve the Effectiveness of the Opinion Retrieval and Opinion Polarity Classification. In Proceedings of the CIKM 2008, 1415-1416.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "My family and I visited the Wufeng Resort last week.) \u4e00\u5230\u6e21\u5047\u6751 1 \u5c31\u5c0d\u90a3\u908a\u7684\u65b0\u9bae\u7a7a\u6c23\u5370\u8c61\u6df1\u523b (We were impressed by the fresh air when we arrived at the resort 1 .) \u4e94\u5cf0 2 \u4e5f\u8cbc\u5fc3\u5730\u63d0\u4f9b\uf9ba\u505c\uf902\u7684\u670d\u52d9 (Wufeng 2 also thoughtfully provides parking service.) \u5982\u679c\u53ea\u662f\u55ae\u7d14\u7684\u653e\u9b06\u81ea\u5df1\uf9fd\u9ebc\u90fd\uf967\u60f3 (If you simply want to relax and get away from it all,) \u4e94\u5cf0\u6e21\u5047\u6751\u662f\u500b\uf967\u932f\u7684\u9078\u64c7 (the Wufeng Resort will be a good choice.)" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Select a function of possible coreferential expressions ref c (a) of a tourist attraction a." |
| }, |
| "FIGREF2": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Rnt2 \u2203ax, a\u2208TA c and x\u2208ref c (a) and lst(x, i\u2212b, i) c and x\u2208ref c (a) and fst(x, i, i+b) \u2264 n The choice of S c , TA c , and ref c (a) in Rnt1 and Rnt2 defines different rules to detect TR-opinions and TA-targets. These settings are quickly demonstrated in" |
| }, |
| "FIGREF3": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "(1) S c = S op , i.e. all of the opinionated sentences are TR-opinion candidates; (2) TA c = TA, i.e. all of the tourist attractions in D are TA-target candidates; and (3) ref c (a) = {a}, i.e. only the full name of a tourist attraction is considered as a coreferential expression." |
| }, |
| "FIGREF4": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "kinds of coreferential expressions, as stated in Section 2.4, are considered when determining the occurrences of a tourist attraction a, i.e. ref c (a) = ref all (a)." |
| }, |
| "FIGREF5": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "fna n-/ fnac n-1 if Nta \u2212 (S i ) = A j ; 0 otherwise fna n+ / fnac n+ 1 if Nta + (S i ) = A j ; 0 otherwise" |
| }, |
| "TABREF1": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>#TA</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td><td>7</td><td>8</td><td>9</td><td colspan=\"2\">10 11~20</td><td>21~78 Average</td></tr><tr><td colspan=\"7\">#docs 28 19 23 12 13 14</td><td>9</td><td>5</td><td>6</td><td>3</td><td>17</td><td>7</td><td>6.378</td></tr></table>", |
| "text": "" |
| }, |
| "TABREF3": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td/><td colspan=\"9\">Tourism-Related Opinion Detection and Tourist-Attraction Target Identification</td><td>45</td></tr><tr><td colspan=\"2\">S op</td><td colspan=\"8\">the set of known opinionated sentences in D</td></tr><tr><td>S #</td><td>to</td><td colspan=\"8\">the set of known TR-opinions in D</td></tr><tr><td colspan=\"2\">trg(s)</td><td colspan=\"8\">the TA-target of a TR-opinion s</td></tr><tr><td colspan=\"2\">freq(a)</td><td colspan=\"8\">the frequency of a tourist attraction a, normalized by the maximal tourist attraction's frequency in D</td></tr><tr><td colspan=\"2\">A maxf</td><td>arg</td><td>max</td><td>TA a\u2208</td><td>freq</td><td>(</td><td>a</td><td>)</td><td>, the set of the most frequent tourist attractions in D</td></tr><tr><td colspan=\"2\">ref all (a)</td><td colspan=\"8\">the set of all possible coreferential expressions of a tourist attraction a</td></tr><tr><td colspan=\"2\">in(x, j, k)</td><td colspan=\"3\">1 if a string x</td><td/><td/><td/><td/></tr></table>", |
| "text": "{S 1 , S 2 , ..., S n }, the set of sentences in a document D TA {A 1 , A 2 , ..., A m }, the set of tourist attractions appearing in D OW {ow 1 , ow 2 , ..., ow p }, the set of known TR-opwords appears in one of the sentences S j , S j+1 ..., S k ; 0 otherwise fst(x, j, k) the index of the first sentence in S j , S j+1 ..., S k which contains a string x; \u221e if none of the sentences contains x lst(x, j, k) the index of the last sentence in S j , S j+1 ..., S k which contains a string x; 0 if none of the sentences contains x" |
| }, |
| "TABREF4": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>46</td><td/><td>Chuan-Jie Lin and Pin-Hsien Chao</td></tr><tr><td>Sid + (a, S i )</td><td>( ) x ref a fst x i ( , 1, ) c n \u2208 + follows S i and contains a min</td><td>, the ID of the nearest opinionated sentence which</td></tr><tr><td>Nid \u2212 (S i )</td><td colspan=\"2\">) attraction and precedes the sentence S i , ( max i TA a S a Sid c \u2212 \u2208 , the ID of the nearest sentence that contains a tourist</td></tr><tr><td>Nid + (S i )</td><td colspan=\"2\">) attraction and follows the sentence S i , ( min i TA a S a Sid c + \u2208 , the ID of the nearest sentence that contains a tourist</td></tr></table>", |
| "text": "the ID of the nearest opinionated sentence which precedes S i and contains a" |
| }, |
| "TABREF5": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "" |
| }, |
| "TABREF6": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "" |
| }, |
| "TABREF7": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Feature</td><td>Definition of feature(S i )</td></tr><tr><td>ffs</td><td>1 for S 1 ; 0 for other sentences in D</td></tr><tr><td>fow all</td><td>1 if \u2203x, x\u2208OW and in(x, i, i) = 1; 0 otherwise</td></tr><tr><td>fow k</td><td>1 if in(ow k , i, i) = 1; 0 otherwise</td></tr><tr><td colspan=\"2\">fta -1 / ftac -1 1 if \u2203ax, [a\u2208TA and x\u2208ref c (a) and in(x, i\u22121, i\u22121) = 1]; 0 otherwise</td></tr><tr><td>fta 0 / ftac 0</td><td>1 if \u2203ax, [a\u2208TA and x\u2208ref c (a) and in(x, i, i) = 1]; 0 otherwise</td></tr><tr><td colspan=\"2\">fta +1 / ftac +1 1 if \u2203ax, [a\u2208TA and x\u2208ref c (a) and in(x, i+1, i+1) = 1]; 0 otherwise</td></tr></table>", |
| "text": "" |
| }, |
| "TABREF8": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"2\">Tourism-Related Opinion Detection and Tourist-Attraction Target Identification</td><td>49</td></tr><tr><td colspan=\"2\">Opinion Context Feature (fop)</td></tr><tr><td colspan=\"2\">Four features come from the surrounding opinionated sentences.</td></tr><tr><td colspan=\"2\">fop -1 : check if the sentence preceding S i is an opinion</td></tr><tr><td colspan=\"2\">fop +1 : check if the sentence following S i is an opinion</td></tr><tr><td colspan=\"2\">fop d-: the distance score of the nearest opinion preceding S i</td></tr><tr><td>fop</td><td/></tr><tr><td/><td>n</td></tr><tr><td>fto -1</td><td>1 if the sentence preceding S i is a TR-opinion; 0 otherwise</td></tr><tr><td>fto d-</td><td>the distance score of the nearest TR-opinion preceding S i</td></tr><tr><td>fto #</td><td>the 2 fto features whose values are assigned correctly</td></tr><tr><td>fto 2</td><td>the 2 fto features whose values are predicted by a retrained classifier</td></tr></table>", |
| "text": "" |
| }, |
| "TABREF9": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>50</td><td>Chuan-Jie Lin and Pin-Hsien Chao</td></tr><tr><td>Distance Feature (fna and fnac)</td><td/></tr><tr><td/><td>n</td></tr></table>", |
| "text": "It is intuitive that a TR-opinion is often close to its targeting tourist attraction. Eight features are derived from the distance of an opinionated sentence S i and a tourist attraction A j . The first four fna features only consider full-name coreference, i.e. ref c (a) = {a}: fna n-: check if A j is the nearest tourist attraction preceding S i fna n+ : check if A j is the nearest tourist attraction following S i fna d-: the distance score of A j and S i when A j precedes S i fna d+ : the distance score of A j and S i when A j follows S" |
| }, |
| "TABREF11": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"2\">Threshold #TR-ow</td><td>P</td><td>R</td><td>F</td></tr><tr><td>0</td><td>482.1</td><td colspan=\"3\">37.71 46.46 41.63</td></tr><tr><td>0.1</td><td>475.2</td><td colspan=\"3\">38.71 46.04 42.06</td></tr><tr><td>0.2</td><td>443.5</td><td colspan=\"3\">41.42 43.29 42.33</td></tr><tr><td>0.25</td><td>418.6</td><td colspan=\"3\">43.17 41.62 42.38</td></tr><tr><td>0.26</td><td>418.6</td><td colspan=\"3\">43.17 41.62 42.38</td></tr><tr><td>0.3</td><td>408.8</td><td colspan=\"3\">42.82 39.78 41.25</td></tr><tr><td>0.4</td><td>359.7</td><td colspan=\"3\">46.58 31.78 37.78</td></tr><tr><td>0.5</td><td>266.2</td><td colspan=\"3\">49.28 22.77 31.15</td></tr><tr><td>0.6</td><td>251.3</td><td colspan=\"3\">50.23 18.18 26.70</td></tr><tr><td>0.7</td><td>218.4</td><td colspan=\"3\">49.06 10.93 17.87</td></tr><tr><td>0.8</td><td>202.5</td><td>50.50</td><td colspan=\"2\">8.42 14.44</td></tr></table>", |
| "text": "" |
| }, |
| "TABREF12": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>presents the results of the rule-based TA-target identification systems under different</td></tr><tr><td>rule combinations. The Nearest-TA-in-Window Rule (Rnt2) slightly outperformed the</td></tr><tr><td>Nearest-Preceding-TA Rule (Rnt1) in any combination. The rule combination achieving the</td></tr><tr><td>best performance was the Nearest-TA-in-Window Rule (Rnt2) combined with the</td></tr><tr><td>Coreferential Expression Rule (Rcr), which was significantly different from all the others.</td></tr></table>", |
| "text": "" |
| }, |
| "TABREF13": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Feature Combination</td><td>P</td><td>R</td><td>F</td></tr><tr><td>fta</td><td>42.15</td><td>60.88</td><td>49.81</td></tr><tr><td>fta -</td><td>40.92</td><td>80.23</td><td>54.20</td></tr><tr><td>fta 0-</td><td>61.18</td><td>36.28</td><td>45.55</td></tr><tr><td>ftac</td><td>56.90</td><td>47.79</td><td>51.95</td></tr><tr><td>ftac -</td><td>41.95</td><td>84.07</td><td>55.97</td></tr><tr><td>ftac 0-</td><td>62.28</td><td>44.20</td><td>51.71</td></tr><tr><td>fow all +ftac+fto 2</td><td>55.67</td><td>58.97</td><td>57.27</td></tr><tr><td>fow all +ftac 0-+fto 2</td><td>54.91</td><td>60.13</td><td>57.40</td></tr><tr><td>fow all +ffs+fop -+ftac -+fto 2</td><td>48.48</td><td>61.38</td><td>54.18</td></tr><tr><td>fow all +ffs+fop -+ftac 0-+fto 2</td><td>54.34</td><td>58.97</td><td>56.56</td></tr><tr><td>fow all +ffs+fop+ftac+fto 2</td><td>55.98</td><td>59.30</td><td>57.59</td></tr><tr><td>fow all +ffs+fop+fta+fto 2</td><td>50.68</td><td>53.13</td><td>51.87</td></tr><tr><td>fow all +ffs+fop -+fto #</td><td>58.77</td><td>79.40</td><td>67.54</td></tr><tr><td>fow all +ffs+fop -+ftac -+fto #</td><td>65.37</td><td>64.22</td><td>64.79</td></tr><tr><td>fow all +ffs+fop -+ftac -+fto</td><td>57.60</td><td>40.12</td><td>47.30</td></tr><tr><td>Rnt2+Rcr</td><td>43.14</td><td>81.82</td><td>56.49</td></tr><tr><td>Annotator 1</td><td>85.62</td><td>88.91</td><td>87.23</td></tr><tr><td>Annotator 2</td><td>89.17</td><td>82.40</td><td>85.65</td></tr><tr><td>Annotator 3</td><td>96.52</td><td>57.80</td><td>72.30</td></tr></table>", |
| "text": "" |
| }, |
| "TABREF14": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>fto # -1</td><td>0.371</td><td>8.204</td><td>0.781</td></tr><tr><td>ftac 0</td><td>0.272</td><td>5.588</td><td>0.468</td></tr><tr><td>fto # d-</td><td>0.853</td><td>1.599</td><td>0.401</td></tr><tr><td>fta 0</td><td>0.220</td><td>5.930</td><td>0.392</td></tr><tr><td>ftac -1</td><td>0.258</td><td>2.614</td><td>0.248</td></tr><tr><td>ftac d+</td><td>0.832</td><td>1.280</td><td>0.205</td></tr><tr><td>fta -1</td><td>0.210</td><td>2.438</td><td>0.187</td></tr><tr><td>fta d+</td><td>0.788</td><td>1.259</td><td>0.181</td></tr><tr><td>fow all</td><td>0.416</td><td>1.484</td><td>0.164</td></tr><tr><td>ftac d-</td><td>0.903</td><td>1.198</td><td>0.163</td></tr><tr><td>fta d-</td><td>0.875</td><td>1.185</td><td>0.148</td></tr><tr><td>ftac +1</td><td>0.192</td><td>1.677</td><td>0.099</td></tr><tr><td>fta +1</td><td>0.160</td><td>1.638</td><td>0.079</td></tr><tr><td>fop d+</td><td>0.938</td><td>1.028</td><td>0.026</td></tr><tr><td>fop d-</td><td>0.931</td><td>1.017</td><td>0.015</td></tr><tr><td>fop -1</td><td>0.463</td><td>1.033</td><td>0.015</td></tr><tr><td>fop +1</td><td>0.460</td><td>1.022</td><td>0.010</td></tr><tr><td>ffs</td><td>0.038</td><td>0.817</td><td>-0.008</td></tr></table>", |
| "text": "" |
| }, |
| "TABREF15": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Feature Combination</td><td>P</td><td>R</td><td>F</td></tr><tr><td colspan=\"2\">The second layer only (TA-Target Identification)</td><td/><td/></tr><tr><td>ffq+fnac</td><td>90.06</td><td>89.91</td><td>89.98</td></tr><tr><td>Dual-Layer Model</td><td/><td/><td/></tr><tr><td>1 st layer: fow all +ffs+fop+ftac+fto 2</td><td>51.30</td><td>54.21</td><td>52.72</td></tr><tr><td>2 nd layer: ffq+fnac</td><td/><td/><td/></tr><tr><td>Single-Layer Model</td><td/><td/><td/></tr><tr><td>fow all +ffs+fop -+fto # +ffq+fnac</td><td>32.83</td><td>88.91</td><td>47.95</td></tr><tr><td>fow all +ffs+fop -+ftac+fto # +ffq+fnac</td><td>32.75</td><td>88.74</td><td>47.84</td></tr><tr><td>Rnt2+Rcr</td><td>32.10</td><td>60.88</td><td>42.04</td></tr><tr><td>Annotator 1</td><td>84.10</td><td>87.32</td><td>85.68</td></tr><tr><td>Annotator 2</td><td>87.27</td><td>80.65</td><td>83.83</td></tr><tr><td>Annotator 3</td><td>94.71</td><td>56.71</td><td>70.94</td></tr></table>", |
| "text": "" |
| } |
| } |
| } |
| } |