| { |
| "paper_id": "Y13-1037", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:32:35.256159Z" |
| }, |
| "title": "KOSAC: A Full-fledged Korean Sentiment Analysis Corpus", |
| "authors": [ |
| { |
| "first": "Hayeon", |
| "middle": [], |
| "last": "Jang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Seoul National University Gwanak-no Gwanak-gu", |
| "location": { |
| "postCode": "151-741", |
| "settlement": "Seoul" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Munhyong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Seoul National University Gwanak-no Gwanak-gu", |
| "location": { |
| "postCode": "151-741", |
| "settlement": "Seoul" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hyopil", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Seoul National University Gwanak-no Gwanak-gu", |
| "location": { |
| "postCode": "151-741", |
| "settlement": "Seoul" |
| } |
| }, |
| "email": "hpshin@snu.ac.kr" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper aims to introduce the Korean Sentiment Analysis Corpus named KOSAC. KOSAC is a corpus consisting of 332 news articles taken from the Sejong Syntactic Parsed Corpus. These sentences have been manually-tagged for sentimental features. The corpus includes 7,713 sentence subjectivity tags and 17,615 opinionated expression tags based on the annotation scheme called KSML which reflects the characteristics of the Korean language. The results of sentence subjectivity and polarity classification experiements using the corpus show the wide possibilities of application the KSML scheme and the tagged information of the KOSAC comprehensively to other corpus. What is innovative about our work is that it pulls together both the concept of private states and nested-sources into one linguistic annotation scheme. We believe that this corpus could be used by researchers as a gold standard for various NLP tasks related to sentiment analysis.", |
| "pdf_parse": { |
| "paper_id": "Y13-1037", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper aims to introduce the Korean Sentiment Analysis Corpus named KOSAC. KOSAC is a corpus consisting of 332 news articles taken from the Sejong Syntactic Parsed Corpus. These sentences have been manually-tagged for sentimental features. The corpus includes 7,713 sentence subjectivity tags and 17,615 opinionated expression tags based on the annotation scheme called KSML which reflects the characteristics of the Korean language. The results of sentence subjectivity and polarity classification experiements using the corpus show the wide possibilities of application the KSML scheme and the tagged information of the KOSAC comprehensively to other corpus. What is innovative about our work is that it pulls together both the concept of private states and nested-sources into one linguistic annotation scheme. We believe that this corpus could be used by researchers as a gold standard for various NLP tasks related to sentiment analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "There has been much research on the automatic identification and extraction of opinions and sentiments in text. Researchers from many subareas of Artificial Intelligence and Natural Language Processing (NLP) have been working on the automatic identification of opinions and related tasks. To date, most such work has focused on opinion, sentiment or subjectivity classification at the document or sentence level. A common sentiment analysis task is to classify documents or sentences by whether they are subjective or objective, and, if the target text is subjective, to classify it as positive or negative (Pang et al., 2002; Wiebe et al., 2005) .", |
| "cite_spans": [ |
| { |
| "start": 607, |
| "end": 626, |
| "text": "(Pang et al., 2002;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 627, |
| "end": 646, |
| "text": "Wiebe et al., 2005)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Along with these lines of research, a need for corpora annotated with rich information about opinions and emotions has emerged. In particular, statistical and machine learning approaches have become the method of choice for constructing a wide variety of practical NLP applications. These methods, however, typically require training and test corpora that have been manually annotated with respect to each language-processing task to be acquired. As such a resource, the Multiperspective Question Answering (MPQA) Opinion Corpus plays an important role in sentiment analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The goal of this paper is to introduce the Korean Sentiment Analysis Corpus, KOSAC 1 . We received two years of support (May, 2011 -April, 2013 in this corpus construction project from the Korean Research Foundation (KRF). In the first year of the project, we focused on a finegrained annotation scheme called KSML (Shin et al., 2012) that identifies key components and properties of sentiments based on solid theoretical background. The annotation scheme has been employed in the manual annotation of a 7,713-sentence corpus of 332 news articles from the Sejong syntactic parsed corpus. This manually-tagged corpus includes 17,615 opinionated expression tags.", |
| "cite_spans": [ |
| { |
| "start": 120, |
| "end": 130, |
| "text": "(May, 2011", |
| "ref_id": null |
| }, |
| { |
| "start": 131, |
| "end": 143, |
| "text": "-April, 2013", |
| "ref_id": null |
| }, |
| { |
| "start": 315, |
| "end": 334, |
| "text": "(Shin et al., 2012)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The remainder of this paper is organized as follows. Section 2 gives an overview of KSML focused on differences with the annotation scheme of the MPQA. Section 3 describes observations about KOSAC. Section 4 presents the results of subjectivity and polarity classification experiments using the corpus. Section 5 presents conclusions and discusses future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The MPQA Opinion Corpus began with the conceptual structure for private states in Wiebe (2002) and developed manual annotation instructions (Wiebe et al., 2005; Wilson, 2008) . Documents contained in the MPQA version 2.0 corpus are mostly news articles. It contains 461 documents spanning 80,706 sentences, 216,080 tokens, and 10,315 subjective expressions annotated with links. These subjective expressions are annotated with \"attitude types\" indicating what type of subjectivity they invoked. 5,127 of these subjective expressions convey sentiment. Since this corpus provides rich annotated expressions based on a fine-grained annotation scheme, it is widely used as a source for training data in machine learning approaches and serves as the gold standard in many sentiment analysis tasks. Since, we took advantage of the MPQA as a fundamental resource for sentiment corpus construction in Korean.", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 94, |
| "text": "Wiebe (2002)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 140, |
| "end": 160, |
| "text": "(Wiebe et al., 2005;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 161, |
| "end": 174, |
| "text": "Wilson, 2008)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Markup Language: KSML", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In the first year of the project constructing the Korean Sentiment Analysis Corpus, we focused on the theoretical background for the annotation scheme named the Korean Subjectivity Markup Language (KSML). Shin et al. (2012) provides a solid theoretical background for the corpus and described the results of inter-annotator agreement test with a view to improving the annotation scheme. Our work essentially follows the idea of the annotation scheme of the MPQA, but we have modified the existing framework and attributes in order to address the characteristics of Korean. In this section, we give an overview of KSML focused on differences with the annotation scheme of the MPQA.", |
| "cite_spans": [ |
| { |
| "start": 205, |
| "end": 223, |
| "text": "Shin et al. (2012)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Markup Language: KSML", |
| "sec_num": "2" |
| }, |
| { |
| "text": "First of all, the annotation frame of the MPQA is classified as six types by functions and meanings of the expressions regardless of the tagging unit: type-agent, expressive-subjectivity, directsubjective, objective-speech-event, attitude, and target. Each unit could connect by various links such as target-links or attitude-links.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Framework", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The KSML, however, divides tagging units as the whole sentences and smaller expressions included in the sentences. The subjectivity and objectivity present the subjectivity of the whole sentence by reflecting whether an annotator feel the sentence is objectively true or not in terms of the speech event. In a SEED tag, each individual unit which is smaller than a sentence expresses a private state. The KSML describes information related to subjectivity such as source, target, and subjectivity-type by using attributes of a SEED tag without any links. Table 1 shows the attributes.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 555, |
| "end": 562, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotation Framework", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Type attributes specify either speech events (acts) that express private states or non-speech events. These fit into five subtypes: directexplicit, direct-speech, direct-action, indirect, and writing-device. The expressive-subjectivity of the MPQA corpus matches the indirect type in the KSML. The attitude of the MPQA is expressed by subjectivity-type in the KSML. The directsubjective of the MPQA corpus classifies direct-explicit, action, or speech types in the KSML depending on the exact nature of the subjectivity. These tags group direct expressions together by the way of express opinions or emotions. Such classification could show different shades of expressed sentiments. The MPQA does not have a specific tag for direct subjective speech events. The objectivespeech-event of the MPQA is direct-speech type expressions of a sentence having an objectivity tag in the KSML frame.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Change of Attributes", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The writing-device is a newly added attribute to KSML in order to show writers' own subjectivity through non-predicate expressions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Change of Attributes", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Modal expressions, speaker-oriented adverbials, conjunctive endings, and special functional particles get writing-device tags as kinds of devices reflecting sentiments in texts. As a basic annotation unit, we chose a morpheme rather than a word because Korean is an agglutinative language having many meaning-bearing particles and sentence endings which can carry private states. We need to be able to pinpoint precise segments as a basic unit, especially when finding writing-device expressions. Since some endings and particles show the subjectivity of a sentence having no direct opinionated expressions, writing-device expressions usually have high intensity of subjectivity. Various expressive techniques like contrast, inferred, repetition, and sarcastic of the MPQA could be classified as writing-device in the KSML.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Change of Attributes", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The framework of the MPQA is similar to that of Appraisal Theory by Martin (2002) and White (2002) . The Appraisal framework is composed of concepts including Affect, Judgment, Appreciation, Engagement, and Amplification. Affect, Judgment, and Appreciation represent different types of positive and negative attitudes. Nonetheless, the MPQA corpus does not distinguish different types of private states like Affect and Judgment, which can provide useful information in sentiment analysis. On the other hand, the MPQA corpus distinguished different ways that private states may be expressed, such as directly or indirectly. The KSML, however, not only cover many types of attitudes as in Appraisal theory but also several expressive types as in the MPQA corpus. For example, we added a Judgment attribute to the subjectivity-type in KSML.", |
| "cite_spans": [ |
| { |
| "start": 68, |
| "end": 81, |
| "text": "Martin (2002)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 86, |
| "end": 98, |
| "text": "White (2002)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Change of Attributes", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Each attributes of subjectivity-type except others has directional cues like positive, negative, complex, and neutral. Unlike the MPQA, the KSML adds neutral and complex directional cues. In addition, the speculation attribute also has directional cues. Directional cues express semantic orientations of subjectivity-type tags. Such detailed classification provides the benefits in the process of sentiment analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Change of Attributes", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "So far we describe the KSML as an annotation scheme for the Korean Sentiment Analysis Corpus with a focus on the differences with the MPQA annotation scheme. As an end of this section, the sentence tagging examples in Table 2 show the different tagging aspects according to the annotation schemes. The sample sentence and the example tags of the MPQA are brought from the existing MPQA corpus, and the tagging example of the KSML is made by an annotator who participated in the project constructing the Korean Sentiment Analysis Corpus. Compared to the MPQA scheme, the frame of the KSML is simpler and easier to understand in terms of subjectivity included in the sentence because the KSML grabs opinionated expressions in detail.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 218, |
| "end": 225, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentence Tagging Examples", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Unlike English, Korean is a morphologically rich language, so, rather than words, morphemes should be the units of annotations. However, it is PACLIC-27 too time consuming to build a flawless morphologically parsed corpus due to the inaccuracy of part of speech (POS) taggers. For this reason, the Sejong syntactic parsed corpus, which is semi-automatically built, was used as the basis for the sentiment annotation corpus. Syntactic information of sentences is also available, enabling further logical inference on agents or targets of sentimental expressions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Selection", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "A subset containing a total of 332 articles made up of 7,713 sentences was selected from the Sejong corpus newspaper articles. These articles were taken from the society and life subsections of Hankyoreh and Chosun, the editorial section of Hankook.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Selection", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The size of corpus largely depends on the speed of annotation work. Without an appropriate annotation tool, it is almost impossible to build a large annotated corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Process", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Though the MPQA opinion corpus was built with GATE annotation tool, we developed a morpheme based annotation tool for Korean text (Cattle et al., 2013) for three reasons. First, none of current annotation tools, such as GATE or brat, supported switching between word and morpheme views. Second, there are noncontinuous sentiment expressions that cannot be annotated by current tools. Third, targets and nested-sources of sentiment expressions need to be annotated in advance of sentiment expressions within those tools, which is not intuitive and in turn makes process of annotation slow. Moreover, to ensure the quality of annotations, three welltrained linguistics students annotated separately, and then double cross-checked the annotations until all annotators agree on the same annotations. ", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 151, |
| "text": "(Cattle et al., 2013)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Process", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The accuracy of an annotated corpus is difficult to measure. For KOSAC, twenty frequently occurring sentiment expressions were chosen from six subjectivity types to see how consistently people annotated those expressions. For measurement, the ratio of annotated times to the number of occurring times for each of those expressions is shown in Table 3 Of the 17,615 SEED annotations, the frequencies of type and subjectivity-type are given in Table 4 . As seen above, the judgment subjectivity type is the most predominant type since judgment subjectivity type expressions include not just short sentiment words or phrases, but also clauses that show speakers' judgments. Among subtypes of type, indirect expressions include all sentiment expressions except all main predicates and writing-device expressions; accordingly indirect type is also the most frequent type of all. A large portion of writing-device expressions are categorized others subjectivity type because they do not usually belong to any other subjectivity types. To help understand which expressions belong to such types above and how they are annotated, Table 5 shows some examples of some types. For an objective or a subjective sentence, how many types and subjectivity types it has on average is shown in Table 6 . A subjective sentence tends to have more direct-explicit, indirect, writing-device types than an objective sentence. The frequency of the direct-speech type is higher for objective sentences due to the reporting predicates. For subjectivity type, a subjective sentence has particularly higher frequency of judgment, speculation, emotion, and others than an objective sentence. Also the number of SEED Agree.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 343, |
| "end": 350, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 442, |
| "end": 449, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 1121, |
| "end": 1128, |
| "text": "Table 5", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 1275, |
| "end": 1282, |
| "text": "Table 6", |
| "ref_id": "TABREF9" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotated Expressions", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Argu. Emotion Intention Judgment Speculation Others Dir-Action 1 9 71 8 38 0 1 Dir-Explicit 156 277 341 276 2740 157 40 Dir-Speech 8 1149 22 28 86 13 7 Indirect 255 321 720 409 6086 63 22 Writing-Device 5 98 9 306 764 172 2957 tags for a subjective sentence is the double of that for an objective.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 52, |
| "end": 269, |
| "text": "Dir-Action 1 9 71 8 38 0 1 Dir-Explicit 156 277 341 276 2740 157 40 Dir-Speech 8 1149 22 28 86 13 7 Indirect 255 321 720 409 6086 63 22 Writing-Device 5 98 9 306 764 172 2957", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotated Expressions", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Firstly, a subjectivity classification test was done by using frequency features from sentence tag attributes. To guarantee the experiment result, a 10-fold cross validation was used; 1/10 is used as a test set and 9/10 as a training set. As a classification model, SVMlight (Joachims, 2002) was chosen using a linear kernel and default options.", |
| "cite_spans": [ |
| { |
| "start": 275, |
| "end": 291, |
| "text": "(Joachims, 2002)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Subjectivity Classification", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Since there could be too many frequency features from attributes, a pair of features was tested to classify sentence subjectivity, and then features were added one by one until the accuracy of SVM began to drop to find the most effective feature set. In detail, we identified the effectiveness of the attributes of SEED tags in terms of classifying polarity of a sentence by adding each attribute feature to the most efficient pairs as per the previous experiment. If an added attribute showed a better result, the combination would be the base pair for the next experiment. Figure 2 shows experimental results of subjectivity classification. The best pair of features was the number of SEED tags and the direct-speech frequency, so another feature was added to the pair until the accuracy dropped. In the end, it was found that the best result was a feature set of the number of SEEDs, directspeech, nested-source, agreement, out (nested-source), and negative value of polarity. The best performance of the SVM classifier was accuracy 65.72%, precision 59.76%, recall 96.41%, F-measure 73.78%.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 575, |
| "end": 583, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Subjectivity Classification", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "However, the best classification result by SVM is not satisfactory, even though this test was done within a gold standard data. The reason was that sentence subjectivity surprisingly does not depend on the frequency of attributes. Rather, it is decided how a sentence ends. It is intuitively noticeable that a subjective sentence has features that make it subjective, and an objective sentence does not. We found almost all subjective sentences end with expressions that have a direct-explicit tag or include a writingdevice seed. Among subjective sentences, 84.9% included a direct-explicit or writing-device seed. Table 7 shows how much sentence subjectivity depends on directexplicit and writing-device expressions. Furthermore, the position of writing-device expression is important for the subjectivity of a sentence; a subjective sentence tends to have it within a main clause or close to main predicate.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 616, |
| "end": 623, |
| "text": "Table 7", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Subjectivity Classification", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Subjective Sent. 1Objective Sent", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Type", |
| "sec_num": null |
| }, |
| { |
| "text": "( ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Type", |
| "sec_num": null |
| }, |
| { |
| "text": "Secondly, sentence polarity classification experiments were conducted. The experimental method was the same as the sentence subjectivity classification experiments. The following Figure 3 shows the best results and the experimental result of using all attributes. Attributes leading the best results (Accuracy 82.52%, Precision 77.64%, Recall 93.93%, Fmeasure 84.96%) in the sentence polarity classification experiments were the number of nested-source, positive (polarity), negative (polarity), direct-speech (type), and complex (directional cue of subjectivity-type).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 179, |
| "end": 188, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Polarity Classification", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Among the contributory features in the experiment, the directional cue complex, which combines only with emotion, judgment, and argument subtype of subjectivity-type, is worthy of notice. These subtypes express private states in a relatively direct way and so the intensity of expressions is usually higher than other subtypes. In such aspects, polarity of expressions classified as these subtypes would be easier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Polarity Classification", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We suppose that the characteristics of news articles are the reason why nested-source and direct-speech (type) are the main features in the experimental results. In general, writers of news articles try to maintain objective distance. When citing other people's comments or statements, however, they have to convey the exact words of the speaker. Therefore, cited sentences could include more direct opinionated expressions showing obvious polarity. A number of nested-source and direct-speech (type) are important factors to distinguish whether an expression is a writer's own thinking or a citation of another's utterance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Polarity Classification", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In another manner, we can classify polarity of a sentence simply by checking for the inclusion of specific attributes. Checking attributes can be different according to the corpus. In the experiment using KOSAC corpus, we only used three attributes of SEED tags: type (only direct subtypes), polarity, and intensity. Table 8 describes the algorithm to classify polarity of a sentence by checking these attributes.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 317, |
| "end": 324, |
| "text": "Table 8", |
| "ref_id": "TABREF12" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Polarity Classification", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Through this checking algorithm, we obtained an 82.15% accuracy on sentence polarity classification. This result is slightly lower than the best experimental result using the SVMlight. However, considering that many sentences could slip through the net of checking at any phase of the algorithm since the algorithm is too simple, such accuracy can be rated high. In addition, this method does not need any other classifier, and we can get good results by using attributes which are understood intuitively as important factors in classification of polarity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Polarity Classification", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For all sentences in the KOSAC corpus, 1. if a sentences have SEED tags of direct subtypes, for only corresponding SEED tags, A. if a number of positive polarity tags and a number of negative polarity tags are different, classify the sentence as the bigger polarity. B. else, i.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Polarity Classification", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "if intensity values of the polarity tags are different, classify the sentence as the polarity having the highest intensity value. ii.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Polarity Classification", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "else, classify the sentence as the polarity having dir-explicit type value. 2. else, for every SEED tags, do the same process of phase 1. Therefore, we confirm that the theoretical background forming the KSML annotation scheme is highly effective at describing subjectivity and polarity of opinionated expressions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Polarity Classification", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "This paper described a fine-grained annotation scheme KSML and the manually-annotated Korean Sentiment Analysis Corpus, KOSAC. This scheme pulls together into one linguistic annotation scheme both the concept of private states and nested source based on the MPQA. However, the frame and some attributes were modified in order to reflect the characteristics of Korean language. The scheme was applied comprehensively to a large 7,713-sentence corpus. Several examples illustrating the scheme and basic observations of the corpus were described in section 3. The results of sentence subjectivity and polarity classification experiments using the corpus were also presented in section 4. Such experimental results show wide possibilities of application of the KSML annotation scheme and the KOSAC corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The main goal behind the KSML and KOSAC is to support the development and evaluation of NLP systems that exploit opinions and sentiments in applications. Our hope is that including rich information of opinionated expressions in our corpus annotations will contribute to a new understanding of how sentiments are expressed linguistically in Korean language. We hope this work will be useful to others working in corpus-based explorations of subjective language and that it will encourage NLP researchers to experiment with subjective language in their applications.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by the National Research Foundation of Korea Grant funded by the Korean Government (NRF-2011-327-A00322).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Morpheme-based Annotation Tool for Korean Text", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Cattle", |
| "suffix": "" |
| }, |
| { |
| "first": "Munhyong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Hyopil", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the American Association for Corpus Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cattle, Andrew, Munhyong Kim, and Hyopil Shin. 2013. Morpheme-based Annotation Tool for Korean Text. In Proceedings of the American Association for Corpus Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Learning to Classify Text Using Support Vector Machines", |
| "authors": [ |
| { |
| "first": "Thorsten", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joachims, Thorsten. 2002. Learning to Classify Text Using Support Vector Machines. Ph.D Dissertation, Cornell University.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Appraisal: An overview", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Martin", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin, J.R. 2002. Appraisal: An overview. http://www.grammatics.com/appraisal/AppraisalG uide/UnFramed/Appraisal-Overview.htm", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Thumbs up? Sentiment Classification Using Machine Learning Techniques", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Shivakumar", |
| "middle": [], |
| "last": "Vaithyanathan", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pang, Bo, Lillian Lee, and Shivakumar Vaithyanathan. 2002. Thumbs up? Sentiment Classification Using Machine Learning Techniques. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, pages 79-86.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Annotation Scheme for Constructing Sentiment Corpus in Korean", |
| "authors": [ |
| { |
| "first": "Hyopil", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| }, |
| { |
| "first": "Munhyong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu-Mi", |
| "middle": [], |
| "last": "Jo", |
| "suffix": "" |
| }, |
| { |
| "first": "Hayeon", |
| "middle": [], |
| "last": "Jang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Cattle", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "proceedings of the 26 th Pacific Asia Conference on Language, Information and Compuation", |
| "volume": "", |
| "issue": "", |
| "pages": "181--190", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shin, Hyopil, Munhyong Kim, Yu-Mi Jo, Hayeon Jang, and Andrew Cattle. 2012. Annotation Scheme for Constructing Sentiment Corpus in Korean. In proceedings of the 26 th Pacific Asia Conference on Language, Information and Compuation, pages 181-190.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Appraisal-the language of evaluation and stance", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "R" |
| ], |
| "last": "White", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Handbook of Pragmatics", |
| "volume": "", |
| "issue": "", |
| "pages": "1--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "White, P.R. 2002. Appraisal-the language of evaluation and stance. In Jef Verschueren, Jan-Ola Ostman, Jan Blommaert, and Chris Bulcaen, editors, Handbook of Pragmatics, pages 1-27.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Instructions for Annotating Opinions in Newspaper Articles", |
| "authors": [ |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wiebe, Janyce. 2002. Instructions for Annotating Opinions in Newspaper Articles. Departiment of Computer Science Technical Report TR-02-101, University of Pittsburgh.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Annotating Expressions of Opinions and Emotions in Language. Language Resources and Evaluation", |
| "authors": [ |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "39", |
| "issue": "", |
| "pages": "164--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wiebe, Janyce, Theresa Wilson, and Claire Cardie. 2005. Annotating Expressions of Opinions and Emotions in Language. Language Resources and Evaluation, 39(2/3):164-210.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Fine-grained Subjectivity and Sentiment Analysis: Recognizing the Intensity, Polarity, and Attitudes of Private States. Ph.D Dissertation", |
| "authors": [ |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ann", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wilson, Theresa Ann. 2008. Fine-grained Subjectivity and Sentiment Analysis: Recognizing the Intensity, Polarity, and Attitudes of Private States. Ph.D Dissertation, Brandeis University.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Morpheme Based Annotation Tool", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "The result of polarity classification tests", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "text": "The result of polarity classification tests", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "num": null, |
| "text": "The list of SEED tag attributes", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td>The MPQA annotation scheme</td></tr><tr><td>GATE_objective-speech-event</td></tr><tr><td>nested-source=w implicit=true</td></tr><tr><td>GATE_direct-subjective: clashed violently</td></tr><tr><td>nested-souce=w,warlords polarity=negative</td></tr><tr><td>expression-intensity=high intensity=high</td></tr><tr><td>GATE_agent: two warlords</td></tr><tr><td>id=warlords nested-source=w,warlords</td></tr><tr><td>The KSML annotation scheme</td></tr><tr><td>Objectivity tag</td></tr><tr><td>SEED: clashed over</td></tr><tr><td>nested-souce=w,warlords type=dir-explicit</td></tr><tr><td>subjectivity-type=agreement-negative</td></tr><tr><td>polarity=negative intensity=high</td></tr><tr><td>target=who should be governor in eastern</td></tr><tr><td>Paktia province</td></tr><tr><td>SEED: violently</td></tr><tr><td>nested-souce=w type=indirect</td></tr><tr><td>subjectivity-type=judgment-negative</td></tr><tr><td>polarity=negative intensity=high</td></tr><tr><td>target=clashed over</td></tr></table>", |
| "num": null, |
| "text": "On Saturday he met representatives of two warlords who clashed violently last week over who should be governor in eastern Paktia province.", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Tagging examples of the MPQA and KSML", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td>PACLIC-27</td></tr></table>", |
| "num": null, |
| "text": "Frequency Cross Table of Expressive and Subjectivity Type", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF6": { |
| "content": "<table><tr><td>Among the 7,713 sentences, 2,658 are</td></tr><tr><td>annotated as subjective and 5,055 sentences as</td></tr><tr><td>objective. There are 17,615 SEED tags, indicating</td></tr><tr><td>on average 2.3 expressions tagged as SEED per</td></tr><tr><td>sentence.</td></tr></table>", |
| "num": null, |
| "text": "Frequency Cross Table of Expressive and Subjectivity Type", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF7": { |
| "content": "<table><tr><td>\ub73b\uc744 \ubaa8\uc73c-</td><td>ttusul mou-</td><td>'agree'</td></tr><tr><td>\uacb0\uc758\ud558-</td><td>kyeluyha-</td><td>'resolve'</td></tr><tr><td colspan=\"3\">\ubc18\ubc1c\uc774 \uac15\ud558-panpali kangha-'strongly oppose'</td></tr><tr><td/><td colspan=\"2\">Direct-action & Emotion</td></tr><tr><td colspan=\"3\">\ub208\ubb3c\uc774 \ud750\ub974-nwunmwuli hulu-'tear drops'</td></tr><tr><td>\uc5bc\uc2f8\uc548-</td><td>elssaan-</td><td>'hug'</td></tr><tr><td>\ud0a5\ud0a5\uac70\ub9ac-</td><td>khikkhikkeli-</td><td>'giggle'</td></tr><tr><td colspan=\"3\">Writing-device & Judgment</td></tr><tr><td colspan=\"3\">\ud558\uc9c0\ubabb\ud558\uba74 haci moshamyen 'if do not do (it)'</td></tr><tr><td>\uc81c\uc544\ubb34\ub9ac</td><td>ceyamwuli</td><td>'even if'</td></tr><tr><td>\uc624\ud788\ub824</td><td>ohilye</td><td>'rather'</td></tr></table>", |
| "num": null, |
| "text": "Direct-explicit & Agreement", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF8": { |
| "content": "<table><tr><td colspan=\"3\">From the examples above, it can be seen that</td></tr><tr><td colspan=\"3\">annotated expressions are not restricted to</td></tr><tr><td colspan=\"3\">specific syntactic segments, but rather capture</td></tr><tr><td colspan=\"3\">segments which reveal one's subjectivity. Also,</td></tr><tr><td colspan=\"3\">it is noticeable that intensifiers are not separated</td></tr><tr><td colspan=\"2\">from sentiment expressions.</td><td/></tr><tr><td colspan=\"3\">From the fine-grained annotated corpus,</td></tr><tr><td colspan=\"3\">characteristics of a subjective or an objective</td></tr><tr><td colspan=\"3\">sentence could be described by frequencies of</td></tr><tr><td colspan=\"2\">type and subjectivity types.</td><td/></tr><tr><td>Type</td><td colspan=\"2\">Objective Subjective</td></tr><tr><td>direct-action</td><td colspan=\"2\">0.015772 0.017097</td></tr><tr><td>direct-explicit</td><td colspan=\"2\">0.374925 0.794073</td></tr><tr><td>direct-speech</td><td colspan=\"2\">0.225594 0.067629</td></tr><tr><td>indirect</td><td colspan=\"2\">0.678179 1.679711</td></tr><tr><td>writing-device</td><td colspan=\"2\">0.354761 0.946809</td></tr><tr><td>Subjectivity Type</td><td colspan=\"2\">Objective Subjective</td></tr><tr><td>Agreement</td><td colspan=\"2\">0.041925 0.079787</td></tr><tr><td>Argument</td><td colspan=\"2\">0.270313 0.18845</td></tr><tr><td>Emotion</td><td colspan=\"2\">0.116191 0.216565</td></tr><tr><td>Intention</td><td colspan=\"2\">0.118387 0.162234</td></tr><tr><td>Judgment</td><td colspan=\"2\">0.830904 2.087006</td></tr><tr><td>Speculation</td><td colspan=\"2\">0.030146 0.094225</td></tr><tr><td>Others</td><td colspan=\"2\">0.241366 0.677052</td></tr><tr><td>Number of SEEDs</td><td>1.649231</td><td>3.505319</td></tr></table>", |
| "num": null, |
| "text": "Examples of Annotated Expressions", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF9": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Average Frequencies of Types for Objective and Subjective Sentences.", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF11": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Ratio of direct-explicit and writing-device for Sentence Subjectivity", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF12": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Checking algorithm for polarity classification", |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |