| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:02:13.995542Z" |
| }, |
| "title": "Imbalanced Chinese Multi-label Text Classification Based on Alternating Attention", |
| "authors": [ |
| { |
| "first": "Hongliang", |
| "middle": [], |
| "last": "Bi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Beijing Language and Culture University", |
| "location": { |
| "postCode": "100083, 100083", |
| "settlement": "Beijing, Beijing", |
| "country": "China, China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Han", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Beijing Language and Culture University", |
| "location": { |
| "postCode": "100083, 100083", |
| "settlement": "Beijing, Beijing", |
| "country": "China, China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Pengyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Beijing Language and Culture University", |
| "location": { |
| "postCode": "100083, 100083", |
| "settlement": "Beijing, Beijing", |
| "country": "China, China" |
| } |
| }, |
| "email": "liupengyuan@blcu.edu.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this work, we construct an imbalanced Chinese multi-label text classification dataset, IMCM. The imbalance is mainly reflected in: (1) The degree of discrimination among labels is different. (2) The distribution of labels is moderately imbalanced. Then, we adopt several methods for multi-label classification and conduct thorough evaluation of them, which show that even the most competitive models struggle on this dataset. Therefore, to tackle these imbalanced problems, we proposed an alternating attention model, AltXML. Two attention heads which alternately reading sequence enable the model capture different parts of the document rather than one point. Experimental results show that our proposed model significantly outperforms the state-ofthe-art baselines in our IMCM dataset, and also achieves quite good results in several public datasets. * Corresponding Author. per instance inevitably makes the MLC task much more difficult to solve. Therefore, the key challenge of this task lies in the overwhelming and uncontrollable size of output space.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this work, we construct an imbalanced Chinese multi-label text classification dataset, IMCM. The imbalance is mainly reflected in: (1) The degree of discrimination among labels is different. (2) The distribution of labels is moderately imbalanced. Then, we adopt several methods for multi-label classification and conduct thorough evaluation of them, which show that even the most competitive models struggle on this dataset. Therefore, to tackle these imbalanced problems, we proposed an alternating attention model, AltXML. Two attention heads which alternately reading sequence enable the model capture different parts of the document rather than one point. Experimental results show that our proposed model significantly outperforms the state-ofthe-art baselines in our IMCM dataset, and also achieves quite good results in several public datasets. * Corresponding Author. per instance inevitably makes the MLC task much more difficult to solve. Therefore, the key challenge of this task lies in the overwhelming and uncontrollable size of output space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Multi-label classification (MLC) is an important task in natural language processing (NLP) due to the increasing number of fields where it can be applied, such as text classification, tag suggestion, information retrieval, and so on. Compared to singlelabel classification task, multi-label classification task aims to assign a set of labels to a single instance simultaneously. However, the number of label sets grows exponentially as the number of class labels increases and the uncertainty in the number of labels Large amount of efforts have been done towards MLC task, including Binary Relevance (BR) (Boutell et al., 2004) , Classifier Chains (CC) (Read et al., 2011) , Label Powerset (LP) (Tsoumakas and Vlahavas, 2007) , PD-Spare (Yen et al., 2016) , SLEEC (Bhatia et al., 2015) , AnnexML (Tagami, 2017) , PfastreXML (Jain et al., 2016) , Parabel (Prabhu et al., 2018) .. In addition to the above methods, neural networks provide some new approaches: CNN (Kim, 2014) , CNN-RNN (Chen et al., 2017) , SGM , etc. These methods have made great progress in capturing label correlations to cope with the exponential-sized output space, but still face the problem of high computational complexity and poor scalability.", |
| "cite_spans": [ |
| { |
| "start": 606, |
| "end": 628, |
| "text": "(Boutell et al., 2004)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 654, |
| "end": 673, |
| "text": "(Read et al., 2011)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 696, |
| "end": 726, |
| "text": "(Tsoumakas and Vlahavas, 2007)", |
| "ref_id": null |
| }, |
| { |
| "start": 729, |
| "end": 756, |
| "text": "PD-Spare (Yen et al., 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 765, |
| "end": 786, |
| "text": "(Bhatia et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 797, |
| "end": 811, |
| "text": "(Tagami, 2017)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 825, |
| "end": 844, |
| "text": "(Jain et al., 2016)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 855, |
| "end": 876, |
| "text": "(Prabhu et al., 2018)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 963, |
| "end": 974, |
| "text": "(Kim, 2014)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 985, |
| "end": 1004, |
| "text": "(Chen et al., 2017)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "While utilizing correlations among labels is essential for MLC task, in real-word scenarios, there are no obvious semantic boundaries among some labels and some seemingly distinct labels may appear together, especially for text. Moreover, the distribution of labels may be imbalanced. On the one hand, the number of instance belonging to a certain label may outnumber other labels. On the other hand, there may be a relatively high number of examples associated with the most common labels or infrequent labels (Gibaja and Ventura, 2015) . These may affect the performance of models utilizing correlations of labels. Therefore, it is important to explore the balance between using correlation to reduce output space and improving the ability to refine labels.", |
| "cite_spans": [ |
| { |
| "start": 511, |
| "end": 537, |
| "text": "(Gibaja and Ventura, 2015)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We inspect the commonly used multi-label text classification datasets consist of Rcv1v2 (Lewis et al., 2004) , AAPD , etc. Some of them has been used as benchmarks, but still can not meet the actual demand. The numbers of class labels or labels per instance is small, and the semantic boundaries among the labels are obvious to some extend. Therefore, to further explore this field, we propose an imbalanced Chinese multi-label text classification dataset, IMCM 1 .", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 108, |
| "text": "(Lewis et al., 2004)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Furthermore, we conduct a detailed evaluation for diverse MLC models on our dataset and two public datasets. Experimental results show that several models that perform well on other datasets struggle on our dataset. Our point of view is that, different from single label classification models which need to focus on the most important part of the document, multi-label classification models need to be aware of different parts. That means that models can't be bound by a certainly associated label.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Therefore, inspired by the idea of dilated convolution which has become popular in semantic segmentation (Yu and Koltun, 2016) , we propose our alternating attention model, AltXML. Two attention heads which alternate reading sequence enable the model capture different parts of the document rather than one point. We evaluate our model on different datasets. Comparison with other models indicates that the trade-off between using correlation to reduce output space and improving the ability to refine labels needs further research. In summary, our contribution is three-fold:", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 126, |
| "text": "(Yu and Koltun, 2016)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We construct an imbalanced Chinese multilabel text classification dataset, IMCM.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We implement diverse MLC models and propose our alternating attention model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We conduct a detailed evaluation for these models on three datasets with different imbalance ratios, by comparing on them, our model achieves promising performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Multi-label classification studies the problem where each example is represented by a single instance 1 https://github.com/NLPBLCU/imcm-dataset while associated with a set of labels simultaneously. There are two main types of methods for MLC task: problem transformation methods and algorithm adaptation methods. Binary Relevance (BR) transforms the task of multi-label classification into the task of binary classification, which is simple and reversible but ignores potential correlations among labels and may lead to the issue of sample imbalance. Label powerset (LP) generates a new class for each possible combination of labels and then solves the problem as a singlelabel multi-class one. Classifier Chains (CC) treats this task as a sequence labeling problem and overcomes the label independence assumption of BR due to classifiers are built upon the previous predictions. In addition to traditional machine learning methods, Neural networks provide some new approaches to MLC task. These methods have made great progress in multi-label classification task, but still face the problem of high computational complexity and poor scalability to meet high-order label correlations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "CNN uses multiple convolution kernels to extract text feature, which is then input to the linear transformation layer followed by a sigmoid function to output the probability distribution over the label space. CNN-RNN incorporated CNN and RNN so as to capture both global and local semantic information and model high-order label correlations. Nam et al. (2017) also treat the multi-label classification task as a sequence labeling problem but replace classifier chains with RNN. It allows to focus on the prediction of the positive labels only, a much smaller set than the full set of possible labels. propose to view the MLC task as a sequence generation problem to take the correlations between labels into account.", |
| "cite_spans": [ |
| { |
| "start": 344, |
| "end": 361, |
| "text": "Nam et al. (2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Typically, there are two main available multi-label text classification datasets, which all stem from English reading materials. Rcv1v2 (Lewis et al., 2004) is widely used in multi-Label classification task. It consists more than 80,000 manually classified English newswire stories, which divided by Lin et al. (2018) . The total number of topic labels is 103.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 156, |
| "text": "(Lewis et al., 2004)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 300, |
| "end": 317, |
| "text": "Lin et al. (2018)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "AAPD ) is a large English multilabel text classification dataset. It contains abstract and corresponding topics of 55,840 papers in the computer science field on the Arxiv. The total number of subjects is 54. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For the purpose of constructing highly reliable multi-label text classification dataset, we have collected nearly 60,000 books' information from Douban 2 , which consists of content summary and author introduction. Labels of each book are manually marked by members of Douban. Unlike the above describled datasets, the difference among some labels in the IMCM is very subtle, such as Humanistic and Human nature. And distribution of labels is very imbalanced, which can be seen in figure 1. These characteristics make it not feasible for labels to be classified in an extensive way. Therefore, we limited the number of words per instance no less than 50 to provide adequate information. Finally, we got 52,286 documents. In order to evaluate the data effectively, we carry on the same distribution sampling to the data. In the end, we got 41,829 training data, 5,228 validation data and 5,229 test data. The total number of labels is 158, the average number of labels per instance is 3.7 (can be seen in figure 2), the average length of the instance is 348.91 and the imbalanced ratio of labels is 10.35. Comparison of IMCM dataset with existing MLC datasets can be seen in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1174, |
| "end": 1181, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "IMCM Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We can see that our dataset is longer than the other two. Besides, neither like the extreme imbalance of the labels of the Rcv1v2 dataset nor like the smallscale imbalance of the labels of the AAPD dataset, our dataset makes a trade-off. This avoids the overwhelming interference caused by the extreme imbalance of data, and allows us to make some explorations on this basis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "IMCM Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We introduce our proposed model in detail in this section. First, we give an overview of the model in Figure 3 . It consists of four layers: Word Representation Layer, Bidirectional LSTM Layer, Alternating Attention Layer, and Classification Layer. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 102, |
| "end": 110, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Alternating Attention Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The input of AltXML is raw tokenized text, each word is represented by word embedding. Let T and d respectively represent the length of the input text and the dimension of word representation. The output of word representation as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Representation Layer", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "X = (x 1 , x 2 , ..., x T )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Representation Layer", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where x t is a dense vector for each word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Representation Layer", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We use a Bidirectional LSTM (Hochreiter and Schmidhuber, 1997) to capture both the left-sides and right-sides context at each time step, the output of BiLSTM can be obtained as follows:", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 62, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bidirectional LSTM Layer", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2212 \u2192 h t = LST M (x t , \u2212 \u2192 h t , C t\u22121 ) \u2190 \u2212 h t = LST M (x t , \u2190 \u2212 h t , C t\u22121 ) h t = [ \u2212 \u2192 h t ; \u2190 \u2212 h t ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bidirectional LSTM Layer", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where h t is obtained by concatenating forward \u2212 \u2192 h t and backward \u2190 \u2212 h t .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bidirectional LSTM Layer", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We alternately send the output of the BiLSTM to the two attention layers, reduce the coupling between information, so that it is able to remove the negative effects such as information loss caused by general attention mechanism, such as focus on one key point. The output of alternating attention can be obtained as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Alternating Attention Layer", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "m 2i = e h 2i w T m T t=1 e h 2i w T m ; m 2i+1 = 0 n 2i+1 = e h 2i+1 w T n T t=1 e h 2i+1 w T n ; n 2i = 0 a = T i=1 Relu(m + n) * h i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Alternating Attention Layer", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where m i and n i is the normalized coefficient of h i . Besides, it is able to expand the attention at the polynomial level without increasing the number of parameters. Thus, it becomes possible for alternating attention to capture longer-term dependency and avoid gridding effects caused by dilation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Alternating Attention Layer", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "AltXML has one fully connected layers as output layer. Then, predicted probability\u0177 for the label can be obtained as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification Layer", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "y = f (aw T + b)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification Layer", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "where, function f is sigmoid activation function.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification Layer", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We uses the binary cross-entropy loss function, which was used in XML-CNN (Liu et al., 2017) as the loss function. The loss function is given as follow:", |
| "cite_spans": [ |
| { |
| "start": 74, |
| "end": 92, |
| "text": "(Liu et al., 2017)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Loss Function", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "L(\u03b8) = \u2212 1 N L N i=1 N j=1 y ij log(\u0177 ij )+(1\u2212y ij ) log(1\u2212\u0177 ij )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Loss Function", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "where N is the number of samples, L is the number of labels,\u0177 ij \u2208 [0, 1] and y ij \u2208 {0, 1} are the predicted probability and true values, respectively, for the i-th sample and the j-th label.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Loss Function", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Training details of neural network models are illustrated as follows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 Vocabulary: For training efficiency and generalization, in all datasets, we truncate the full vocabulary and set a shortlist of 60,000. Note that, for Chinese, we use Jieba 3 to cut words and not use domain dictionary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 Embedding layer: We set word embedding dimension to 256 and use randomly initialized embedding matrix with the normal distribution N (0, 1). Note that, no pre-trained word embeddings are used in our experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 BiLSTM layer: We use single-layered bidirectional LSTM that output dimension in each direction is 100, and randomly initialized it with uniform distribution U(\u2212 \u221a k, \u221a k), where k = 1 hidden size . As LSTM still suffers from the gradient exploding problem, we set gradient clipping threshold to 10 in our experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 Dropout: We used Dropout after embedding layer and set dropout ratio to 0.5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 Optimization: We used the AdamW optimizer (Loshchilov and Hutter, 2018) with an initial lr = 0.001 and wd=0.01. The batch size is set to 64.", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 73, |
| "text": "(Loshchilov and Hutter, 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 Training: We trained model for 20 epochs and choose the best model according to the performance of validation set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "3 https://github.com/fxsjy/jieba Note that, the hyperparameters are consistent across all datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We used the micro-F1 score as our main evaluation metrics. micro-F1 (Mi-F1) can be interpreted as a weighted average of the precision and recall. It is calculated globally by counting the total true positives, false positives, and false negatives.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "P recision = T P T P + F P Recall = T P T P + F N micro-F 1 = 2 * P recision * Recall P recision + Recall", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "\u2022 Binary Relevance (BR) (Boutell et al., 2004) transforms the task of multi-label classification into the task of binary classification, which is simple and reversible but ignores potential correlations among labels and may lead to the issue of sample imbalance.", |
| "cite_spans": [ |
| { |
| "start": 24, |
| "end": 46, |
| "text": "(Boutell et al., 2004)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 Label powerset (LP) (Tsoumakas and Vlahavas, 2007) generates a new class for each possible combination of labels and then solves the problem as a single-label multi-class one.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 Classifier Chains (CC) (Read et al., 2011) treats this task as a sequence labeling problem and overcomes the label independence assumption due to classifiers are built upon the previous predictions.", |
| "cite_spans": [ |
| { |
| "start": 25, |
| "end": 44, |
| "text": "(Read et al., 2011)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 CNN (Kim, 2014) uses multiple convolution kernels to extract text feature, which is then input to the linear transformation layer followed by a sigmoid function to output the probability distribution over the label space.", |
| "cite_spans": [ |
| { |
| "start": 6, |
| "end": 17, |
| "text": "(Kim, 2014)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 CNN-RNN (Chen et al., 2017) incorporated CNN and RNN so as to capture both global and local semantic information and model highorder label correlations.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 29, |
| "text": "(Chen et al., 2017)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 SGM ) (state-of-the-art) views the multi-label classification task as a sequence generation problem, and apply a sequence generation model with a novel decoder structure to solve it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 RNN+att is our implementation of the RNNbased model with the normal attention mechanism.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The results of AltXML and baseline models on our IMCM dataset are presented in Table 2 . From the results of the conventional baselines, it can be found that the machine-learning-based methods for multilabel text classification still own competitiveness compared with the deep-learning-based methods. For the generating model, the SGM+GE achieve significant improvements on the IMCM dataset, compared with the machine-learning-based models. However, there is still a certain gap compared with the classification model. By contrast, our proposed model can capture more key features at the same time and achieve the best performance in the evaluation of micro-F1 score, which improves 6.1% of micro-F1 score compared with the SGM+GE.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 79, |
| "end": 86, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Mi We also implement our experiments on public datasets. On the AAPD dataset, similar to the models' performance on the IMCM dataset, our AltXML model achieved good performance, with a 0.8% increase in micro-F1 scores compared to the best, as shown in Table 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 252, |
| "end": 259, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "On the Rcv1v2 dataset, our AltXML model still achieves similar performance on micro-F1 on this dataset comparded with Seq2Seq model (SGM+GE), which illustrates the robustness of our model. Because we have not adjusted the hyperparameters, there is still a lot of space for improvement. The results can be seen in An interesting finding is that, by comparing on three datasets, although the Seq2Seq models achieves the state-of-the-art performance on the Rcv1v2 English dataset, the generalization on our IMCM dataset is insufficient. We think there are two reasons: (1) Compared to the other two datasets, the number of labels for each instance in our dataset is more and there are no obvious semantic boundaries among some labels. (2) Due to the attention mechanism cannot improve the performance of the Seq2Seq model in this task (Lin et al., 2018) , Seq2Seq model cannot capture some useful information.", |
| "cite_spans": [ |
| { |
| "start": 832, |
| "end": 850, |
| "text": "(Lin et al., 2018)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "By comparing on the three datasets, our model achieves promising performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "In this paper, we introduce the first Chinese multilabel text classification dataset, IMCM. This dataset focuses on imbalanced multi-label classification. Among many datasets, our model could also give significant improvements over various state-of-theart baselines. Furthermore, we propose an alternat-ing attention model to handle the imbalanced problems, and further analysis of experimental results demonstrates that our proposed model not only capture the correlations between labels, but also capture the more features when predicting different labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": "https://book.douban.com", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by Beijing Natural Science Foundation(4192057). We thank anonymous reviewers for their helpful feedback and suggestions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Sparse local embeddings for extreme multi-label classification", |
| "authors": [ |
| { |
| "first": "Kush", |
| "middle": [], |
| "last": "Bhatia", |
| "suffix": "" |
| }, |
| { |
| "first": "Himanshu", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "Purushottam", |
| "middle": [], |
| "last": "Kar", |
| "suffix": "" |
| }, |
| { |
| "first": "Manik", |
| "middle": [], |
| "last": "Varma", |
| "suffix": "" |
| }, |
| { |
| "first": "Prateek", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "28", |
| "issue": "", |
| "pages": "730--738", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kush Bhatia, Himanshu Jain, Purushottam Kar, Manik Varma, and Prateek Jain. 2015. Sparse local em- beddings for extreme multi-label classification. In C. Cortes, N. D. Lawrence, D. D. Lee, M. Sugiyama, and R. Garnett, editors, Advances in Neural Informa- tion Processing Systems 28, pages 730-738. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Learning multi-label scene classification", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [ |
| "R" |
| ], |
| "last": "Boutell", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiebo", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| }, |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "M" |
| ], |
| "last": "Brown", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Pattern Recognition", |
| "volume": "37", |
| "issue": "9", |
| "pages": "1757--1771", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew R. Boutell, Jiebo Luo, Xipeng Shen, and Christopher M. Brown. 2004. Learning multi-label scene classification. Pattern Recognition, 37(9):1757 -1771.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Ensemble application of convolutional and recurrent neural networks for multi-label text categorization", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Ye", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Xing", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Cambria", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "2017 International Joint Conference on Neural Networks (IJCNN)", |
| "volume": "", |
| "issue": "", |
| "pages": "2377--2383", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G. Chen, D. Ye, Z. Xing, J. Chen, and E. Cambria. 2017. Ensemble application of convolutional and recurrent neural networks for multi-label text categorization. In 2017 International Joint Conference on Neural Net- works (IJCNN), pages 2377-2383, May.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A tutorial on multilabel learning", |
| "authors": [ |
| { |
| "first": "Eva", |
| "middle": [], |
| "last": "Gibaja", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebasti\u00e1n", |
| "middle": [], |
| "last": "Ventura", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "ACM Comput. Surv", |
| "volume": "47", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eva Gibaja and Sebasti\u00e1n Ventura. 2015. A tutorial on multilabel learning. ACM Comput. Surv., 47(3):52:1- 52:38, April.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "Jurgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and Jurgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735- 1780.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Extreme multi-label loss functions for recommendation, tagging, ranking & other missing label applications", |
| "authors": [ |
| { |
| "first": "Himanshu", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "Yashoteja", |
| "middle": [], |
| "last": "Prabhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Manik", |
| "middle": [], |
| "last": "Varma", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "KDD", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Himanshu Jain, Yashoteja Prabhu, and Manik Varma. 2016. Extreme multi-label loss functions for recom- mendation, tagging, ranking & other missing label ap- plications. In KDD.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Convolutional neural networks for sentence classification. empirical methods in natural language processing", |
| "authors": [ |
| { |
| "first": "Yoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1746--1751", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoon Kim. 2014. Convolutional neural networks for sen- tence classification. empirical methods in natural lan- guage processing, pages 1746-1751.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Rcv1: A new benchmark collection for text categorization research", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tony", |
| "middle": [ |
| "G" |
| ], |
| "last": "Rose", |
| "suffix": "" |
| }, |
| { |
| "first": "Fan", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "5", |
| "issue": "", |
| "pages": "361--397", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Lewis, Yiming Yang, Tony G Rose, and Fan Li. 2004. Rcv1: A new benchmark collection for text categorization research. Journal of Machine Learning Research, 5:361-397.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Semantic-unit-based dilated convolution for multi-label text classification. empirical methods in natural language processing", |
| "authors": [ |
| { |
| "first": "Junyang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Pengcheng", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuming", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "4554--4564", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junyang Lin, Xu Sun, Pengcheng Yang, Shuming Ma, and Qi Su. 2018. Semantic-unit-based dilated con- volution for multi-label text classification. empirical methods in natural language processing, pages 4554- 4564.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Deep learning for extreme multilabel text classification", |
| "authors": [ |
| { |
| "first": "Jingzhou", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Cheng", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuexin", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "115--124", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jingzhou Liu, Wei-Cheng Chang, Yuexin Wu, and Yim- ing Yang. 2017. Deep learning for extreme multi- label text classification. In Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 115- 124. ACM.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Fixing weight decay regularization in adam", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Loshchilov", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Hutter", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Loshchilov and Frank Hutter. 2018. Fixing weight decay regularization in adam. arXiv: Learning.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Maximizing subset accuracy with recurrent neural networks in multi-label classification", |
| "authors": [ |
| { |
| "first": "Jinseok", |
| "middle": [], |
| "last": "Nam", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneldo", |
| "middle": [], |
| "last": "Loza Menc\u00eda", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Hyunwoo", |
| "suffix": "" |
| }, |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "F\u00fcrnkranz", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "5413--5423", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinseok Nam, Eneldo Loza Menc\u00eda, Hyunwoo J Kim, and Johannes F\u00fcrnkranz. 2017. Maximizing subset ac- curacy with recurrent neural networks in multi-label classification. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Process- ing Systems 30, pages 5413-5423. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Parabel: Partitioned label trees for extreme classification with application to dynamic search advertising", |
| "authors": [ |
| { |
| "first": "Yashoteja", |
| "middle": [], |
| "last": "Prabhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Anil", |
| "middle": [], |
| "last": "Kag", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrutendra", |
| "middle": [], |
| "last": "Harsola", |
| "suffix": "" |
| }, |
| { |
| "first": "Rahul", |
| "middle": [], |
| "last": "Agrawal", |
| "suffix": "" |
| }, |
| { |
| "first": "Manik", |
| "middle": [], |
| "last": "Varma", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 World Wide Web Conference, WWW '18", |
| "volume": "", |
| "issue": "", |
| "pages": "993--1002", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yashoteja Prabhu, Anil Kag, Shrutendra Harsola, Rahul Agrawal, and Manik Varma. 2018. Parabel: Parti- tioned label trees for extreme classification with ap- plication to dynamic search advertising. In Proceed- ings of the 2018 World Wide Web Conference, WWW '18, pages 993-1002, Republic and Canton of Geneva, Switzerland. International World Wide Web Confer- ences Steering Committee.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Classifier chains for multi-label classification", |
| "authors": [ |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Read", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernhard", |
| "middle": [], |
| "last": "Pfahringer", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoff", |
| "middle": [], |
| "last": "Holmes", |
| "suffix": "" |
| }, |
| { |
| "first": "Eibe", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Machine Learning", |
| "volume": "85", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jesse Read, Bernhard Pfahringer, Geoff Holmes, and Eibe Frank. 2011. Classifier chains for multi-label classification. Machine Learning, 85(3):333, Jun.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Annexml: Approximate nearest neighbor search for extreme multi-label classification", |
| "authors": [ |
| { |
| "first": "Yukihiro", |
| "middle": [], |
| "last": "Tagami", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "the 23rd ACM SIGKDD International Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yukihiro Tagami. 2017. Annexml: Approximate nearest neighbor search for extreme multi-label classification. pages 455-464. the 23rd ACM SIGKDD International Conference, 08.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Random k-labelsets: An ensemble method for multilabel classification", |
| "authors": [], |
| "year": 2007, |
| "venue": "Machine Learning: ECML 2007", |
| "volume": "", |
| "issue": "", |
| "pages": "406--417", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Grigorios Tsoumakas and Ioannis Vlahavas. 2007. Ran- dom k-labelsets: An ensemble method for multilabel classification. In Joost N. Kok, Jacek Koronacki, Raomon Lopez de Mantaras, Stan Matwin, Dunja Mladeni\u010d, and Andrzej Skowron, editors, Machine Learning: ECML 2007, pages 406-417, Berlin, Hei- delberg. Springer Berlin Heidelberg.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Sgm: Sequence generation model for multi-label classification", |
| "authors": [ |
| { |
| "first": "Pengcheng", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuming", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Houfeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3915--3926", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pengcheng Yang, Xu Sun, Wei Li, Shuming Ma, Wei Wu, and Houfeng Wang. 2018. Sgm: Sequence generation model for multi-label classification. In Proceedings of the 27th International Conference on Computational Linguistics, pages 3915-3926. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Pd-sparse : A primal and dual sparse approach to extreme multiclass and multilabel classification", |
| "authors": [ |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "En-Hsu Yen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangru", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Pradeep", |
| "middle": [], |
| "last": "Ravikumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Zhong", |
| "suffix": "" |
| }, |
| { |
| "first": "Inderjit", |
| "middle": [], |
| "last": "Dhillon", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of Machine Learning Research", |
| "volume": "48", |
| "issue": "", |
| "pages": "20--22", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ian En-Hsu Yen, Xiangru Huang, Pradeep Ravikumar, Kai Zhong, and Inderjit Dhillon. 2016. Pd-sparse : A primal and dual sparse approach to extreme multiclass and multilabel classification. In Maria Florina Balcan and Kilian Q. Weinberger, editors, ICML, volume 48 of Proceedings of Machine Learning Research, pages 3069-3077, New York, New York, USA, 20-22 Jun. PMLR.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Multi-scale context aggregation by dilated convolutions", |
| "authors": [ |
| { |
| "first": "Fisher", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Vladlen", |
| "middle": [], |
| "last": "Koltun", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fisher Yu and Vladlen Koltun. 2016. Multi-scale context aggregation by dilated convolutions. In ICLR.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Distribution and Imbalanced Ratio of labels on IMCM dataet. Imbalanced Ratio is the ratio of the frequency of the label to the highest frequency." |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Distribution of the number of labels per instance." |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Overview of the AltXML model" |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Comparison of IMCM dataset with existing MLC datasets. Inst and Lab denote the total number of instances and labels, respectively. Card means the average number of labels per instance. DENS normalizes Card by the Lab. Len refers to the average length of the instance. IR indicates how imbalanced the top 50 percentage of labels are.", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Results on IMCM Dataset.", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td>Model</td><td colspan=\"2\">Mi-P Mi-R Mi-F1</td></tr><tr><td>BR</td><td>64.4 64.8</td><td>64.6</td></tr><tr><td>CC</td><td>65.7 65.1</td><td>65.4</td></tr><tr><td>LP</td><td>66.2 60.8</td><td>63.4</td></tr><tr><td colspan=\"2\">SGM+GE 74.6 67.5</td><td>71.0</td></tr><tr><td colspan=\"2\">RNN+Att 72.0 69.7</td><td>70.8</td></tr><tr><td>AltXML</td><td>71.8 71.9</td><td>71.8</td></tr></table>", |
| "num": null, |
| "text": "", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td>Model</td><td colspan=\"2\">Mi-P Mi-R Mi-F1</td></tr><tr><td>BR</td><td>90.4 81.6</td><td>85.8</td></tr><tr><td>CC</td><td>88.7 82.8</td><td>85.7</td></tr><tr><td>LP</td><td>89.6 82.4</td><td>85.8</td></tr><tr><td>CNN</td><td>92.2 79.8</td><td>85.5</td></tr><tr><td colspan=\"2\">CNN-RNN 88.9 82.5</td><td>85.6</td></tr><tr><td colspan=\"2\">SGM+GE 89.7 86.0</td><td>87.8</td></tr><tr><td>RNN+Att</td><td>89.1 85.2</td><td>87.1</td></tr><tr><td>AltXML</td><td>90.1 84.6</td><td>87.2</td></tr></table>", |
| "num": null, |
| "text": "Results on AAPD Dataset.", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF6": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Results on Rcv1v2 Dataset.", |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |