| { |
| "paper_id": "Y17-1046", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:33:18.169664Z" |
| }, |
| "title": "Automatic Categorization of Tagalog Documents Using Support Vector Machines", |
| "authors": [ |
| { |
| "first": "Joan", |
| "middle": [ |
| "O" |
| ], |
| "last": "Vicente", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "aovicente@up.edu.ph" |
| }, |
| { |
| "first": "Erlyn", |
| "middle": [], |
| "last": "Manguilimotan", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "erlynqm@gmail.com" |
| }, |
| { |
| "first": "April", |
| "middle": [], |
| "last": "Dae", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Bation", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "acbation@up.edu.ph" |
| }, |
| { |
| "first": "Erlyn", |
| "middle": [ |
| "Q Manguilimotan" |
| ], |
| "last": "Aileen", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Automatic document classification is now a growing research topic in Natural Language Processing. Several techniques were incorporated to build a classifier that can categorize documents written in specific languages into their designated categories. This study builds an automatic document classifier using machine learning which is suited for Tagalog documents. The documents used were news articles scraped from Tagalog news portals. These documents were manually annotated into different categories and later on, underwent preprocessing techniques such as stemming and removal of stopwords. Different document representations were also used to explore which representation performed best with the classifiers. The SVM classifier using the stemmed dataset which was represented using TF-IDF values yielded an F-score of 91.99% and an overall accuracy of 92%. It outperformed all other combinations of document representations and classifiers.", |
| "pdf_parse": { |
| "paper_id": "Y17-1046", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Automatic document classification is now a growing research topic in Natural Language Processing. Several techniques were incorporated to build a classifier that can categorize documents written in specific languages into their designated categories. This study builds an automatic document classifier using machine learning which is suited for Tagalog documents. The documents used were news articles scraped from Tagalog news portals. These documents were manually annotated into different categories and later on, underwent preprocessing techniques such as stemming and removal of stopwords. Different document representations were also used to explore which representation performed best with the classifiers. The SVM classifier using the stemmed dataset which was represented using TF-IDF values yielded an F-score of 91.99% and an overall accuracy of 92%. It outperformed all other combinations of document representations and classifiers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Due to the explosive growth of documents in digital form, automatic text categorization has become an important area of research. It is the task of assigning documents, based solely on its contents, to predefined classes or categories.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Through time, approaches to this field of study evolved from knowledge engineering to machine learning. In the machine learning approach, the defining characteristics of each document are learned by the model from a set of annotated documents used as \"training\" data. Such includes Na\u00efve Bayes and Support Vector Machine classifiers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Different standard machine learning techniques treat text categorization as a standard classification problem, and thereby reducing the learning process into two steps -feature selection and classification learning over the feature space (Peng et. al., 2003) . Of these two steps, feature selection is more critical since identifying the right features will guarantee any reasonable machine learning technique or classifier to perform well (Scott & Matwin, 1999) . However, feature selection is language-dependent.", |
| "cite_spans": [ |
| { |
| "start": 238, |
| "end": 258, |
| "text": "(Peng et. al., 2003)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 440, |
| "end": 462, |
| "text": "(Scott & Matwin, 1999)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Several preprocessing methods such as stopword removal, lemmatization and root-word extraction require domain knowledge of the language used (Peng et. al., 2003) .", |
| "cite_spans": [ |
| { |
| "start": 141, |
| "end": 161, |
| "text": "(Peng et. al., 2003)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Methodologies used in researches concerning automatic document categorization are unique from language to language, depending on the structure and morphological rules of the specific language. Although automatic text categorization is becoming a great area of research in most languages aside from English such as Chinese and Arabic, researchers have paid little to no attention in categorizing Tagalog documents. Tagalog exhibits morphological phenomena that makes it a little different than the English language. Thus, this study aims to investigate the factors and explore on different methods that will affect the process of building a Tagalog document classifier. Specifically, this study intends to:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Collect Tagalog news articles and label them according to their category \u2022 Represent and extract features from documents using NLP techniques \u2022 Build an SVM Classifier \u2022 Evaluate classification performance and present results", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Different researchers have already explored on automatic document categorization to help manage documents efficiently. Over the years, many approaches have already been adopted to such research problem -from data mining techniques to machine learning models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Categorization and Machine Learning", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Although many approaches have been proposed, text categorization is still a major area of interest since these classifiers have been devoted and focused on English documents and can still be improved.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Categorization and Machine Learning", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Several studies used different machine learning models in document categorization. McCallum and Niggam (1998) compared two different types of na\u00efve bayes which assumes that all attributes of the examples are independent of each other. Eyheramendy et. al (2003) used multinomial na\u00efve bayes but found out that it is often outperformed by support vector machines. The use of decision trees for multi-class categorization was explored by Weiss et. al (1999) . K-Nearest Neighbors algorithm is also applied in text categorization such as that in a study by Soucy and Mineau (2001) where the model performed better with only few features. Zhang and Zhou (2006) experimented on the use of neural networks for multilabel categorization.", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 109, |
| "text": "McCallum and Niggam (1998)", |
| "ref_id": null |
| }, |
| { |
| "start": 235, |
| "end": 260, |
| "text": "Eyheramendy et. al (2003)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 435, |
| "end": 454, |
| "text": "Weiss et. al (1999)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 553, |
| "end": 576, |
| "text": "Soucy and Mineau (2001)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 634, |
| "end": 655, |
| "text": "Zhang and Zhou (2006)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Categorization and Machine Learning", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Although there were several researches on document categorization, none had replaced Support Vector Machines as the state-of-the-art method in this research area. A study by Joachims (1998) showed that Support Vector Machines are suited for text categorization, and has consistently showed good performance in all experiments. Yang and Liu (1999) conducted a controlled study and re-examined five of machine learning text categorization methods where SVM outperformed all other methods.", |
| "cite_spans": [ |
| { |
| "start": 174, |
| "end": 189, |
| "text": "Joachims (1998)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 327, |
| "end": 346, |
| "text": "Yang and Liu (1999)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Categorization and Machine Learning", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "This type of classifier, proposed by Vladimir Vapnik and Alexey Chervonenkis, began to establish as the state-of-the-art method for text categorization in 1992. Figure 1 shows the framework for SVM on text categorization. Joachims (1998) concluded that SVM will work well for text categorization since (1) it uses overfitting protection which gives it the potential to handle large feature spaces, especially that learning text classifiers deal with more than 10000 features, (2) document vectors are sparse which means that only few entries in it have non-zero values.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 161, |
| "end": 169, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Support Vector Machines", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Since feature extraction is language-dependent and requires language-specific knowledge, building a classifier for documents in different languages will introduce different challenges.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Existing Classifiers in Other Languages", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "In an automatic Arabic document categorizer by Kourdi et. al. (2004) , the word morphology was considered. A root extraction technique suited for the non-concatenative nature of Arabic and the challenge of their plural and hollow verbs was used.", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 68, |
| "text": "Kourdi et. al. (2004)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Existing Classifiers in Other Languages", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "In Chinese document categorization, word segmentation became a challenging issue since the language does not have a natural delimiter between words, unlike English and other Indo-European languages. He et. al. (2003) adopted a word-class bigram model to segment each training document into a feature vector.", |
| "cite_spans": [ |
| { |
| "start": 199, |
| "end": 216, |
| "text": "He et. al. (2003)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Existing Classifiers in Other Languages", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "With regards to Indian languages, Nidhi (2012) stated that using only statistical approaches to classify Punjabi documents won't provide good classification results since the language has a very rich inflectional morphology compared to the English language. This means that there is a need of linguistic approaches and a good understanding of the language's morphology for the selection of the features that will increase efficiency. Nidhi (2012) used a rule-based approach to extract language-dependent features.", |
| "cite_spans": [ |
| { |
| "start": 34, |
| "end": 46, |
| "text": "Nidhi (2012)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Existing Classifiers in Other Languages", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Concerning Tagalog, no work has been done to classify Tagalog documents. Although recently, there are morphological analysis tools for the Tagalog language such as the Two-level Engine for Tagalog Morphology (Nelson, 2004) , the Tagalog Stemming Algorithm (TagSA) (Bonus, 2012), different proposed POS taggers including the works of Cheng (n.d.) and Reyes et al., (2014), none of which are being applied in the automatic categorization of Tagalog documents.", |
| "cite_spans": [ |
| { |
| "start": 208, |
| "end": 222, |
| "text": "(Nelson, 2004)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Existing Classifiers in Other Languages", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "This study follows the basic framework for document categorization which is divided into three, namely: data preparation and preprocessing, feature extraction and selection, and the building of classifier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In the preprocessing of data, the first step was removing the whitespaces and punctuations. The documents were also transformed into lowercase.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing of Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In the next step, stopwords were removed. This includes words such as ang, mga, si, dahil, etc. These are frequent occurring words in Tagalog language which do not offer information about the category of the document.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing of Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Lastly, stemming was done. This is used to reduce the words in the documents into its canonical form. Words with the same canonical form is counted as one. For example, maaga, pinakamaaga, and umaga, will be counted as one since they all have the same canonical form, aga.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing of Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In Tagalog, there are four types of affixation: (1) prefixation, (2) infixation, (3) suffixation, and (4) circumfixation. Prefixation is when the bound morpheme is attached before the root word, infixation is when it is attached within the root word, and suffixation is when it is attached at the end. Circumfixation is when the bound morpheme can occur as prefix, infix, or suffix. Reduplication of these affixes is also common in the language. The stemmer created by the researcher was meant to remove the affixes, including the reduplicated parts, and retrieve the root word only", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing of Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The stemmer retrieves the canonical form by removing all affixes that can occur as prefix, infix, and suffix. Affixes in Tagalog include um, ma, and in. Words with these affixes include k(um)ain, (ma)bilis, s(in)abi. After stemming these words, kain, bilis and sabi will be retrieved respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing of Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The stemmer also removes reduplicated parts. In the word pupunta, the morpheme pu-was reduplicated; hence it will be removed. After stemming, its canonical form, punta, will be retrieved.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing of Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "On the other hand, Non-Tagalog words were considered foreign words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing of Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "After the preprocessing method, a Bag-of-Words model, containing all words in the documents, was created. This is used as the basis for extracting features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Representation and Feature Extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Typically, the feature space consists of an m\u00d7n matrix where m is equal to the number of documents and n is equal to the number of tokens in the Bag-of-Words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Vectorization", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In this study, three schemes in numerical representation were used, namely: Binary Representation, Word Counts, and the TF-IDF.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Vectorization", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "After vectorizing the documents into different numerical representations, they were then shuffled and divided into two: the training set and testing set. 80% of the dataset went to the training set while the remaining 20% went to the testing set. Sklearn's train_test_split was used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification of Documents", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "In this study, two classifiers were experimented, namely: Na\u00efve Bayes and Support Vector Machines. Both were implemented using Python's sklearn.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification of Documents", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "In this study, a linear kernel and a one-vs-all strategy were used where a single classifier per class is trained, with the samples of that class as positive samples and all other samples as negatives The OneVsRestClassifier, together with the LinearSVC of sklearn were utilized.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Support Vector Machines", |
| "sec_num": null |
| }, |
| { |
| "text": "For the second classifier in this study, a Multinomial Naive-Bayes, which estimates probabilities of a given document to belong to a specific category, was used. The MultinomialNB of sklearn was used in this study.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multinomial Na\u00efve Bayes", |
| "sec_num": null |
| }, |
| { |
| "text": "Several experiment setups with the different document representations and machine learning classifiers were conducted. Out of the 2,121 news articles, 1,696 news articles (80%) went to the training set. The remaining 425 news articles (20%) went to the testing set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "4." |
| }, |
| { |
| "text": "The dataset is comprised of Tagalog news articles retrieved from Philippine news websites from August 2016 to January 2017 using scrapy (https://scrapy.org/). The collected data comprised of 2,121 manually annotated news articles. Table 1 summarizes the distribution of data for each category.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 231, |
| "end": 238, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In this study, three document representations were used for the experiments that were conducted -Binary Feature Representation, Word Count Representation, TF-IDF Representation. From the training set, 22,824 total terms/words were retrieved and stored in the Bag-of-Words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Representation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Some words included in the Bag-of-Words are not part of the Tagalog vocabulary. These includes frequently occurring foreign words and proper nouns such as are city and duterte. Some proper nouns were also stemmed such as philippe which is originally philippine but -in-was removed because the stemmer thought it is an infix.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Representation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For the core experiment, an SVM classifier is used together with the TF-IDF representation for all documents. The overall accuracy of this classifier is 92%. Table 2 summarizes the performance metrics of the classifier.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 158, |
| "end": 165, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Core Experiment", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Based on Table 2 , the classifier was able to yield relatively high F-Scores, except that of Terrorism which yielded an F-Score of only 78.78%. This was expected since the amount of news articles that belong to this category was relatively low compared to that of other categories. On another note, it can be seen in the table that the Entertainment category got a recall of 100%, Health and Sports categories both got a precision of 100%. Also, Economic and Political categories both got an F-score below 90%. This could stem from the nature of the two categories -both talk about the government or the status of the country, which makes it hard for the classifier to distinguish the difference between the two.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 9, |
| "end": 16, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Core Experiment", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Based on the core experiment, the performance measure of the classifier is already acceptable. To ascertain the contribution of Tagalog language processing in the classification of Tagalog document, the following experiments were conducted:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Validation and Evaluation", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "To show the contribution of stemming to the whole process of building the classifier, an unstemmed dataset was fed to the SVM classifier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Stemmer", |
| "sec_num": null |
| }, |
| { |
| "text": "As seen in Figure 2 , the classifier with the stemmed data performed better than that with unstemmed data. Although the stemmer wasn't perfect, the process of reducing words to their word stems has helped significantly in improving the performance of classifier.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 11, |
| "end": 19, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Stemmer", |
| "sec_num": null |
| }, |
| { |
| "text": "A Multinomial Na\u00efve Bayes (MultiNB) classifier was tested to see if stemming data still achieves high performance, like in the SVM classifier. Both datasets were fed to the MultiNB classifier. Using TF-IDF, the classifier with the stemmed data yielded an F-Score of 83.55% while the other yielded only 81.41%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Stemmer", |
| "sec_num": null |
| }, |
| { |
| "text": "Based on the previous experiments, it can be seen that TF-IDF representation yielded impressive performance measures for the SVM classifier. For comparison purposes, two other document representation were used -Binary Representation and Word Count. Table 3 summarizes the performance measures of the SVM classifier for the three different document representations where TF-IDF resulted to the highest F-Score of 91.99%.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 249, |
| "end": 256, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Document Representation", |
| "sec_num": null |
| }, |
| { |
| "text": "For the sake of comparison, all three document representation were fed to the MultiNB classifier. Table 4 shows the performance measures for the MultiNB classifier. It can be seen that, unlike in SVM, TF-IDF yielded the lowest F-Score of 83.55% while Word Count yielded 91.41%. The Multinomial Naive Bayes implements the Naive Bayes algorithm for multinomially distributed data, which means that it models the data based on probability counts. Since multinomial distribution normally requires integer feature counts, TF-IDF representation is likely to produce poor results.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 98, |
| "end": 105, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Document Representation", |
| "sec_num": null |
| }, |
| { |
| "text": "Furthermore, TF-IDF with SVM yields a higher Fscore compared to that of Word Count with Na\u00efve Bayes, and it in fact outperformed all other combinations of document representation with the classifiers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Document Representation", |
| "sec_num": null |
| }, |
| { |
| "text": "A 10-fold cross validation scheme was used to validate the performance of the multinomial SVM classifier. Training and testing were repeated 10 times on stratified folds for the whole dataset. Table 5 summarizes the result of the performance of all categories averaged at each fold.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 193, |
| "end": 200, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Cross-Validation", |
| "sec_num": null |
| }, |
| { |
| "text": "The ten-fold cross validated classifier yielded an average accuracy of 90.8%. The test shows that although randomness was introduced to the experiment by means of the folds, the performance is generally the same.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Validation", |
| "sec_num": null |
| }, |
| { |
| "text": "Tagalog document categorization, like in other languages, is affected by many factors. Such includes the size of the corpus, the classifier type, the feature selection and feature reduction method, and the weighting scheme. In this study, stemming each document, representing it with TF-IDF values and using it to train an SVM classifier yielded the highest F-Score of 91.99% among all other combination of methods and experiment setups.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Although the stemming process wasn't perfect, it still served the purpose of conflating and integrating different word forms into their common canonical form; therefore, reducing the number of terms in the whole corpus. This method in computational linguistic can result to either poor or good performance, depending on some cases. In this study, it was shown that stemming, which performs iterative affix removal, is effective in Tagalog documents and that it has contributed to the high performance of the machine learning classifier which automatically classifies documents into categories.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In this study, it was also proven that an SVM classifier performs well in categorizing text data. More than 10000 features were used in this study and each document vector was sparse; however, the SVM classifier was able to handle the large feature space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Although high performance measures were achieved in building a machine learning classifier that can automatically categorize text documents, it would be better to use a larger dataset with a more even distribution for each class. Future researches could also experiment on more complicated feature representations such as the use of POS tags or Ngrams to explore more on their performance on Tagalog documents. Also, researches could try on the use of lemmatization instead of just stemming the Tagalog words. In this research, Tagalog words that weren't stemmed properly by the stemmer, such as nam and sabg, were included. While stemming only chops off morphemes in words to remove the derivational affixes, lemmatization refers to the use of a vocabulary and morphological analysis of words to be able to do return the correct base or dictionary form of a word. More categories can also be incorporated; for example, Sports can be divided into more specific categories such as Basketball, Volleyball, etc. Lastly, Future researches should also be able to build a classifier that can label the Tagalog documents with more than one category (multi-labeled instead of just multiclass).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recommendations for Future Work", |
| "sec_num": "5.2" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Inductive learning algorithms and representations for text categorization", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Dumais", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Platt", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Heckerman", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Sahami", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of the seventh international conference on Information and knowledge management -CIKM '98", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/288627.288651" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dumais, S., Platt, J., Heckerman, D., & Sahami, M. (1998). Inductive learning algorithms and representations for text categorization. In Proceedings of the seventh international conference on Information and knowledge management - CIKM '98. doi:10.1145/288627.288651", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "On the naive bayes model for text categorization", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Eyheramendy", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "D" |
| ], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Madigan", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eyheramendy, S., Lewis, D. D., & Madigan, D. (2003). On the naive bayes model for text categorization.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "On Machine Learning Methods for Chinese Document Categorization", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Applied Intelligence", |
| "volume": "18", |
| "issue": "", |
| "pages": "311--322", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "He, J., Tan, A., & Tan, C. (2003). On Machine Learning Methods for Chinese Document Categorization. Applied Intelligence, 18, 311-322. Retrieved from https://www.comp.nus.edu.sg/~tancl/publica tions/j2003/he03apin.pdf", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Text categorization with support vector machines: Learning with many relevant features", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "European conference on machine learning", |
| "volume": "", |
| "issue": "", |
| "pages": "137--142", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joachims, T. (1998, April). Text categorization with support vector machines: Learning with many relevant features. In European conference on machine learning (pp. 137- 142). Springer Berlin Heidelberg.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Automatic Arabic document categorization based on the Na\u00efve Bayes algorithm", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "E" |
| ], |
| "last": "Kourdi", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bensaid", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Rachidi", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Workshop on Computational Approaches to Arabic Script-based Languages -Semitic '04", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1621804.1621819" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kourdi, M. E., Bensaid, A., & Rachidi, T. (2004). Automatic Arabic document categorization based on the Na\u00efve Bayes algorithm. Proceedings of the Workshop on Computational Approaches to Arabic Script-based Languages -Semitic '04. doi:10.3115/1621804.1621819", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Feature selection and feature extraction for text categorization", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Proceedings of a Workshop on Speech and Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "212--217", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lewis, D. (1992). Feature selection and feature extraction for text categorization. In Proceedings of a Workshop on Speech and Natural Language Processing, (pp. 212- 217). San Mateo, CA: Morgan Kaufmann.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A comparison of event models for naive bayes text classification", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Nigam", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "AAAI-98 workshop on learning for text categorization", |
| "volume": "752", |
| "issue": "", |
| "pages": "41--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "McCallum, A., & Nigam, K. (1998, July). A comparison of event models for naive bayes text classification. In AAAI-98 workshop on learning for text categorization (Vol. 752, pp. 41-48).", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A Two-level Engine for Tagalog Morphology and a Structured XML Output for PC-Kimmo", |
| "authors": [ |
| { |
| "first": "Hans", |
| "middle": [ |
| "J" |
| ], |
| "last": "Nelson", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nelson, Hans J., \"A Two-level Engine for Tagalog Morphology and a Structured XML Output for PC-Kimmo\" (2004). All Theses and Dissertations. Paper 133. Retrieved from http://scholarsarchive.byu.edu/cgi/viewconte nt.cgi?article=1132&context=etd", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Domain Based Classification of Punjabi Text Documents using Ontology and Hybrid Based Approach", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [ |
| "G" |
| ], |
| "last": "Nidhi", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 3rd Workshop on South and Southeast Asian Natural Language Processing (SANLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "109--122", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nidhi, V. G. (2012). Domain Based Classification of Punjabi Text Documents using Ontology and Hybrid Based Approach. Proceedings of the 3rd Workshop on South and Southeast Asian Natural Language Processing (SANLP) (pp. 109- 122). Retrieved from http://www.aclweb.org/anthology/W12- 5009", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Language and task independent text categorization with simple language models", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Schuurmans", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 2003 Conference of the North American Chapter of the Association for Computational Linguistics on Human Language Technology -NAACL '03", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1073445.1073470" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peng, F., Schuurmans, D., & Wang, S. (2003). Language and task independent text categorization with simple language models. Proceedings of the 2003 Conference of the North American Chapter of the Association for Computational Linguistics on Human Language Technology -NAACL '03. doi:10.3115/1073445.1073470", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Machine Translation from English to Filipino: A Prototype", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Roxas", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "International Symposium of Multilingual Information Technology (MLIT '97)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roxas, R. (1997). Machine Translation from English to Filipino: A Prototype. International Symposium of Multilingual Information Technology (MLIT '97), Singapore.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Feature Engineering for Text Classification", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Scott", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Matwin", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of the Sixteenth International Conference on Machine Learning (ICML '99)", |
| "volume": "", |
| "issue": "", |
| "pages": "379--388", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Scott, S., & Matwin, S. (1999). Feature Engineering for Text Classification. In Proceedings of the Sixteenth International Conference on Machine Learning (ICML '99), pp. 379-388.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A simple KNN algorithm for text categorization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Soucy", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "W" |
| ], |
| "last": "Mineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings IEEE International Conference on", |
| "volume": "", |
| "issue": "", |
| "pages": "647--648", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Soucy, P., & Mineau, G. W. (2001). A simple KNN algorithm for text categorization. In Data Mining, 2001. ICDM 2001, Proceedings IEEE International Conference on (pp. 647-648). IEEE.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Maximizing Text-Mining Performance", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "M" |
| ], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Apte", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Damerau", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "E" |
| ], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "J" |
| ], |
| "last": "Oles", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Goetz", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Hampp", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "IEEE Intelligent Systems", |
| "volume": "14", |
| "issue": "", |
| "pages": "63--69", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Weiss, S. M., Apte, S., Damerau, F., Johnson, D. E., Oles, F. J., Goetz, T., and Hampp, T. (1999). Maximizing Text-Mining Performance. IEEE Intelligent Systems, 14, 63-69.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A Re-examination of Text Categorization Methods", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang, Y., & Liu, X. (1999). A Re-examination of Text Categorization Methods. Carnegie Mellon University. Retrieved from http://www2.hawaii.edu/~chin/702/sigir99.p df", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Multilabel Neural Networks with Applications to Functional Genomics and Text Categorization", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "IEEE Transactions on Knowledge and Data Engineering", |
| "volume": "18", |
| "issue": "10", |
| "pages": "1338--1351", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/tkde.2006.162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, M., & Zhou, Z. (2006). Multilabel Neural Networks with Applications to Functional Genomics and Text Categorization. IEEE Transactions on Knowledge and Data Engineering, 18(10), 1338-1351. doi:10.1109/tkde.2006.162", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Classification Infrastructure of SVM on Text Categorization(Mertsalov and McCreary, 2009)" |
| } |
| } |
| } |
| } |