| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T02:10:24.205823Z" |
| }, |
| "title": "Evaluating Hierarchical Document Categorisation", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne \u2662 Fujitsu Limited", |
| "location": {} |
| }, |
| "email": "qiasun@student.unimelb.edu.au" |
| }, |
| { |
| "first": "Aili", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne \u2662 Fujitsu Limited", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hiyori", |
| "middle": [], |
| "last": "Yoshikawa", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne \u2662 Fujitsu Limited", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "\u2662", |
| "middle": [], |
| "last": "Chunpeng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne \u2662 Fujitsu Limited", |
| "location": {} |
| }, |
| "email": "ma.chunpeng@fujitsu.com" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Beck", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne \u2662 Fujitsu Limited", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Tomoya", |
| "middle": [], |
| "last": "Iwakura", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne \u2662 Fujitsu Limited", |
| "location": {} |
| }, |
| "email": "iwakura.tomoya@fujitsu.com" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne \u2662 Fujitsu Limited", |
| "location": {} |
| }, |
| "email": "tbaldwin@unimelb.edu.au" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Hierarchical document categorisation is a special case of multi-label document categorisation, where there is a taxonomic hierarchy among the labels. While various approaches have been proposed for hierarchical document categorisation, there is no standard benchmark dataset, resulting in different methods being evaluated independently and there being no empirical consensus on what methods perform best. In this work, we examine different combinations of neural text encoders and hierarchical methods in an end-to-end framework, and evaluate over three datasets. We find that the performance of hierarchical document categorisation is determined not only by how the hierarchical information is modelled, but also the structure of the label hierarchy and class distribution.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Hierarchical document categorisation is a special case of multi-label document categorisation, where there is a taxonomic hierarchy among the labels. While various approaches have been proposed for hierarchical document categorisation, there is no standard benchmark dataset, resulting in different methods being evaluated independently and there being no empirical consensus on what methods perform best. In this work, we examine different combinations of neural text encoders and hierarchical methods in an end-to-end framework, and evaluate over three datasets. We find that the performance of hierarchical document categorisation is determined not only by how the hierarchical information is modelled, but also the structure of the label hierarchy and class distribution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Document categorisation is a core task in information retrieval and natural language processing, whereby documents are categorised relative to a pre-defined set of labels. While the majority of research on document categorisation assumes a flat label structure, in practice in large-scale document categorisation tasks, there is often hierarchical label structure, in the form of either a tree or directed acyclic graph (Zhou et al., 2020; Azarbonyad et al., 2021) , where \"child\" labels inherit the properties of their parents. The goal of hierarchical document categorisation is to classify documents into a set of labels, where there is a hierarchical relationship among the labels.", |
| "cite_spans": [ |
| { |
| "start": 420, |
| "end": 439, |
| "text": "(Zhou et al., 2020;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 440, |
| "end": 464, |
| "text": "Azarbonyad et al., 2021)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Hierarchical document categorisation methods explicitly capture the label structure during training. There has been a resurgence of interest in document categorisation in recent years, in part driven by breakthroughs in representation learning and pre-trained language models (Mikolov et al., 2013; Pennington et al., 2014; Peters et al., 2018; Kim, 2014; Wang et al., 2017; Devlin et al., 2019) , which generate more expressive, general-purpose representations, thereby leading to performance gains across a range of NLP tasks. Despite this, there has been relatively little recent work specifically on hierarchical document categorisation. What recent work does has varied wildly in the choice of text encoder and dataset, with no systematic, controlled cross-dataset evaluation to be able to make solid conclusions as to whether the reported performance gains are attributable to the proposed hierarchical document categorisation method or just the text encoders used. Our work focuses on examining the capacity of existing methods dealing with labels with a hierarchical structure, which is different from the work of Yang et al. (2016) , which focuses on modelling documents in a hierarchical way to perform classic document classification task.", |
| "cite_spans": [ |
| { |
| "start": 276, |
| "end": 298, |
| "text": "(Mikolov et al., 2013;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 299, |
| "end": 323, |
| "text": "Pennington et al., 2014;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 324, |
| "end": 344, |
| "text": "Peters et al., 2018;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 345, |
| "end": 355, |
| "text": "Kim, 2014;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 356, |
| "end": 374, |
| "text": "Wang et al., 2017;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 375, |
| "end": 395, |
| "text": "Devlin et al., 2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1122, |
| "end": 1140, |
| "text": "Yang et al. (2016)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we carry out systematic evaluation of a range of contemporary hierarchical document categorisation approaches, using a range of neural text encoders, based on three document collections with hierarchical label sets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Hierarchical document categorisation methods can be grouped into: flat approaches, local approaches, global approaches, and hybrid methods, based on how they utilise the label hierarchy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Flat approaches (Eisner et al., 2005; Freitas and Carvalho, 2007) simply ignore the label hierarchy, and assume all classes are independent. As such, they are unable to capture the label structure and are poor at handling mutual exclusivity, especially among sibling nodes in multi-label categorisation tasks.", |
| "cite_spans": [ |
| { |
| "start": 16, |
| "end": 37, |
| "text": "(Eisner et al., 2005;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 38, |
| "end": 65, |
| "text": "Freitas and Carvalho, 2007)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Flat Approaches", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Local approaches generally make predictions topdown recursively, along paths in the label hierarchy. They can be divided into three groups (Silla and Freitas, 2011): a local classifier per node (LCN), a local classifier per parent node (LCPN), or a local classifier per level (LCL). In LCN, there is a binary classifier for each node, which determines whether a document belongs to that node or not (Eisner et al., 2005; Freitas and Carvalho, 2007) . In contrast, LCPN (Davies et al., 2007; Secker et al., 2010; Shimura et al., 2018; Banerjee et al., 2019) employs a multi-class classifier at each parent node, predicting which child node the document should be assigned to. Compared with LCN, LCPN significantly reduces the number of local classifiers, and can be applied in either single-label or multilabel settings. In contrast, LCL (Kowsari et al., 2017 ) employs a multi-class classifier at each layer in the hierarchy. This method usually fails to capture parent-child information between layers. For all three approaches, a top-down approach is often used to avoid label inconsistency, making them prone to error propagation.", |
| "cite_spans": [ |
| { |
| "start": 399, |
| "end": 420, |
| "text": "(Eisner et al., 2005;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 421, |
| "end": 448, |
| "text": "Freitas and Carvalho, 2007)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 464, |
| "end": 490, |
| "text": "LCPN (Davies et al., 2007;", |
| "ref_id": null |
| }, |
| { |
| "start": 491, |
| "end": 511, |
| "text": "Secker et al., 2010;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 512, |
| "end": 533, |
| "text": "Shimura et al., 2018;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 534, |
| "end": 556, |
| "text": "Banerjee et al., 2019)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 837, |
| "end": 858, |
| "text": "(Kowsari et al., 2017", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Local Approaches", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Global approaches (Mao et al., 2019; Zhou et al., 2020) optimise across all labels simultaneously, taking the label hierarchy into account. The simplest global approach converts the hierarchical categorisation task into a multi-label categorisation task, where each original label is replaced with its ancestors and itself. Similar to local approaches, this potentially results in label inconsistency during inference. A more popular global approach is to include a loss term which captures the hierarchy in some way (Gopal and Yang, 2013; Peng et al., 2018) , such as an entropy term (Clare and King, 2003) or distance metric (Vens et al., 2008) . For example, Zhou et al. 2020proposed a hierarchyaware structure encoder to model the label hierarchy as a directed graph. It can capture global hierarchical information as it models both top-down and bottom-up label dependencies. Moreover, all nodes are linked with each other, meaning that pairwise co-occurrence can be modelled in addition to parent-child relationships.", |
| "cite_spans": [ |
| { |
| "start": 18, |
| "end": 36, |
| "text": "(Mao et al., 2019;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 37, |
| "end": 55, |
| "text": "Zhou et al., 2020)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 517, |
| "end": 539, |
| "text": "(Gopal and Yang, 2013;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 540, |
| "end": 558, |
| "text": "Peng et al., 2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 585, |
| "end": 607, |
| "text": "(Clare and King, 2003)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 627, |
| "end": 646, |
| "text": "(Vens et al., 2008)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Global Approaches", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "There are also hybrid methods which combine the methods mentioned above (Wehrmann et al., 2018; Huang et al., 2019) . For example, Gopal and Yang (2013) used simple recursive regularisation to encourage parameter smoothness between linked nodes, with positive results independently reported by Peng et al. (2018) and Zhou et al. (2020) .", |
| "cite_spans": [ |
| { |
| "start": 72, |
| "end": 95, |
| "text": "(Wehrmann et al., 2018;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 96, |
| "end": 115, |
| "text": "Huang et al., 2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 131, |
| "end": 152, |
| "text": "Gopal and Yang (2013)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 294, |
| "end": 312, |
| "text": "Peng et al. (2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 317, |
| "end": 335, |
| "text": "Zhou et al. (2020)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hybrid Methods", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "In our work, each model consists of a text encoder and a hierarchical method, where the text encoder is used to obtain text representations, and the hierarchical method makes predictions with the assistance of hierarchical label information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "TextCNN (Kim, 2014) : A CNN made up of convolutional and max-pooling layers. In this work, we apply convolution kernels with width 2, 3, and 4 (3 for each width size) to word embeddings, and use a max-pooling layer.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 19, |
| "text": "(Kim, 2014)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text Encoders", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "TextRNN: A single-layer Bi-LSTM (Wang et al., 2017 ) with a cell size of 64 where the concatenated hidden state at the last timestep makes up the document representation.", |
| "cite_spans": [ |
| { |
| "start": 32, |
| "end": 50, |
| "text": "(Wang et al., 2017", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text Encoders", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "TextRCNN: A combination of TextCNN and TextRNN, where we first employ a single-layer Bi-LSTM with a cell size of 64 and obtain outputs across all timesteps by concatenating outputs from both directions, then apply convolution kernels with width 2, 3, and 4 (3 for each width size), followed by a max-pooling layer. This method has achieved state-of-the-art on RCV1 for both flat and hierarchical categorisation (Zhou et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 411, |
| "end": 430, |
| "text": "(Zhou et al., 2020)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text Encoders", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "BERT (Devlin et al., 2019) : The hidden state of \"CLS\" from BERT is used as the document representation, using the base-uncased version.", |
| "cite_spans": [ |
| { |
| "start": 5, |
| "end": 26, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text Encoders", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "Flat: Baseline method where all nodes are treated as candidate classes, ignoring hierarchical information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hierarchical Methods", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Recursive Regularization (RR: Gopal and Yang (2013) the model corresponds to a level in the label hierarchy. The global model consists of multiple linear layers with ReLU as the activation function. The input to each layer includes the original sequence and the output from its immediate last layer, where the hidden size for each layer is 384 as in Wehrmann et al. (2018) . Passing information from the first layer to the last layer, we obtain the global output. In addition, the output from each layer is also fed into a local layer, where the hidden size is the number of nodes/classes in the corresponding hierarchical level. Then the sum of the global output and concatenated local outputs is fed into a sigmoid function to predict the classes. 1", |
| "cite_spans": [ |
| { |
| "start": 30, |
| "end": 51, |
| "text": "Gopal and Yang (2013)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 350, |
| "end": 372, |
| "text": "Wehrmann et al. (2018)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hierarchical Methods", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Hi-GCN (Zhou et al., 2020) : An end-to-end hierarchy-aware global model that extracts the label hierarchy information to achieve label-wise text features. A graph convolutional network is used as the structure/hierarchy encoder, where each edge represents the correlation between a pair of nodes. There are three types of edges in the graph: topdown, bottom-up, and self-loop edges, where the weights for bottom-up and self-loop edges are 1, and the weights for top-down edges are determined by the predefined hierarchy and dataset distributions. To obtain label-wise text features, hierarchical text feature propagation is used. Specifically, the text representation from a text encoder is reshaped to act as the node input, which is updated through the hierarchy-aware structure encoder. The output of a node is based on its neighbourhood: itself, its child nodes, and its parent nodes. The output hidden state is then fed into the final classifier.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 26, |
| "text": "(Zhou et al., 2020)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hierarchical Methods", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "We evaluate each text encoder+hierarchical method combination in an end-to-end framework over three datasets: RCV1 (Lewis et al., 2004), SHINRA (Sekine et al., 2020) , and WoS (Kowsari et al., 2017) . Here, RCV1 is a collection of news articles published by the Reuters News between 1996 and 1997. SHINRA contains English Wikipedia articles from the SHINRA2020-ML shared-task (Sekine et al., 2020) , where each Wikipedia article is labelled according to a fine-grained named entity label set known as Extended Named Entity (ENE). 2 WoS is a collection of abstracts from academic papers across different research domains and areas. The statistics of each dataset is given in Table 1 . Looking at the document distributions in terms of label hierarchy levels, we find that the relationship between the number of documents and label classes conforms to a power-law function for RCV1 and SHINRA, especially at lower (2+) levels. For WoS, the number of documents per class at level 1 and 2 is relatively balanced.", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 165, |
| "text": "(Sekine et al., 2020)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 176, |
| "end": 198, |
| "text": "(Kowsari et al., 2017)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 376, |
| "end": 397, |
| "text": "(Sekine et al., 2020)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 530, |
| "end": 531, |
| "text": "2", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 674, |
| "end": 681, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We evaluate model performance in terms of Micro-F 1 and Macro-F 1 , two standard evaluation metrics for document categorisation. Micro-F 1 is instancelevel F-score, and thereby gives more weight to frequent labels. Macro-F 1 is class-level F-score, and gives equal weight to all labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Each document is truncated/padded to a fixed length of 256 tokens, where stopwords are removed for all models except BERT. For all models except BERT, we use 100-dimensional pre-trained word embeddings from GloVe (Pennington et al., 2014) to initialise the word embeddings. The vocabulary contains at most 100,000 words ranked by frequency. For OOV words, the word embeddings are randomly initialised. We train all models with Table 2 : Experimental results for different combinations of encoders and hierarchical document categorisation methods. The best result for each text encoder on each dataset is indicated in bold. Micro and Macro indicate micro and macro F 1 score, resp.. a batch size of 32 using Adam (Kingma and Ba, 2014), and an initial learning rate of 1e-3 (1e-5 for BERT) for at most 20 epochs.", |
| "cite_spans": [ |
| { |
| "start": 213, |
| "end": 238, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 427, |
| "end": 434, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "For hierarchical categorisation methods, the penalty coefficient of recursive regularisation is set to 1e-6, while the output dimension of internal linear layers in HMCN is set to 384. For the hyperparameters of Hi-GCN, we follow the recommendations of the authors in the original paper (Zhou et al., 2020) . Note that in some cases, both HMCN and Hi-GCN suffer from the vanishing/exploding gradient problem, to counter which we apply batch normalisation to the outputs of the linear layers in HMCN and Hi-GCN where necessary. Table 2 presents the experimental results of different combinations of text encoders and hierarchical categorisation methods across the three datasets. Model performance is heavily influenced by the choice of text encoder, with BERT outperforming other encoders by a large margin on RCV1 and SHINRA in terms of both Micro-F 1 and Macro-F 1 , but underperforming on WoS, irrespective of which hierarchical method it is combined with. We hypothesis that the performance drop for BERT on WoS is mainly due to domain shift, in that it has been pre-trained on Wikipedia articles and the Google Books corpus, which differ substantially from academic writing. 3 Among TextCNN, Text-RNN, and TextRCNN, TextCNN underperforms TextRNN and TextRCNN on all three datasets, especially on RCV1 and SHINRA. The reason is that TextCNN can only capture local features, but the fine-grained hierarchical distinctions captured in the different label sets often require longer-distance semantic dependencies.", |
| "cite_spans": [ |
| { |
| "start": 287, |
| "end": 306, |
| "text": "(Zhou et al., 2020)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 527, |
| "end": 534, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "With regards to the hierarchical categorisation methods, compared with Flat on RCV1 and SHINRA, RR improves Macro-F 1 in most cases at the cost of Micro-F 1 , indicating that RR can improve the performance of classes with fewer training samples. In contrast, HMCN improves Micro-F 1 at the cost of Macro-F 1 , indicating that HMCN is biased towards classes that are better represented in the dataset. However, on WoS, RR achieves better performance in terms of both Micro-F 1 and Macro-F 1 -with the one exception of Micro-F 1 with TextRNN-while HMCN achieves worse performance in terms of both Micro-F 1 and Macro-F 1 . All these results can be attributed to the fact that RR and HMCN leverage hierarchical information differently: RR utilises parent-child relationships, while HMCN adopts layer-wise hierarchical information. As a result of error propagation due to the greedy top-down approach, HMCN performs relatively worse the deeper the label hierarchy. For example, Flat with TextCNN achieves a Micro-F 1 of 88.53 at level-1 (7 classes) and a Micro-F 1 of 83.41 at level-2 (134 classes) on WoS, where both Micro-F 1 scores at these two levels are higher than 80.24 achieved by HMCN, indicating that the categorisation errors of HMCN at level-1 propagate to level-2 and lead to worse results on WoS.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Looking to Hi-GCN, we find that Hi-GCN with any text encoder consistently outperforms other methods on all three datasets in terms of both Micro-F 1 and Macro-F 1 , by aggregating hierarchical information in a more flexible way. In addition to passing information from parent to child nodes, it also passes information from child to parent nodes, thereby improving categorisation performance at level-1 and categorisation at subsequent levels. Both RCV1 and SHINRA datasets have extremely imbalanced data distributions while WoS is relatively more balanced, which is also revealed by the greater differences between Micro-F 1 and Macro-F 1 on RCV1 and SHINRA, than on WoS.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "These experiments indicate that the performance of hierarchical document categorisation not only depends on the text encoder and particular hierarchical methods, but also the intrinsic hierarchy label structure and the label distribution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "We examine various combinations of text encoders and hierarchical categorisation methods in an endto-end fashion over three datasets. We find that the choice of text encoder is a strong determinant of performance than the choice of hierarchical method, and indeed that local hierarchical methods don't consistently outperform baseline flat classification methods. With regards to hierarchical methods, RR improves Macro-F 1 at the cost of Micro-F 1 on RCV1 and SHINRA, while HMCN improves Micro-F 1 at the cost of Macro-F 1 on RCV1 and SHINRA. An opposite trend is observed on WoS, namely an improvement for RR and deterioration for HMCN. These different behaviours are determined by how the hierarchical label information is modelled during training. The global model Hi-GCN achieves superior performance in terms of both Micro-F 1 and Macro-F 1 on all three datasets, indicating the necessity of capturing the hierarchy label structure holistically.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In the original work ofWehrmann et al. (2018), the authors first apply the sigmoid function to the global output and local outputs, respectively, resulting into extremely bad performance in some settings, indicating that applying sigmoid separately to the global and local outputs is not as effective as applying it to the combined global and local information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://ene-project.info/ene8/?lang=en", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "It would be interesting to experiment with SciBERT (Beltagy et al., 2019), which has been pre-trained on papers from the scientific domain, which we leave to future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Learning to rank for multilabel text classification: Combining different sources of information", |
| "authors": [ |
| { |
| "first": "Hosein", |
| "middle": [], |
| "last": "Azarbonyad", |
| "suffix": "" |
| }, |
| { |
| "first": "Mostafa", |
| "middle": [], |
| "last": "Dehghani", |
| "suffix": "" |
| }, |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Marx", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaap", |
| "middle": [], |
| "last": "Kamps", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Natural Language Engineering", |
| "volume": "27", |
| "issue": "1", |
| "pages": "89--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hosein Azarbonyad, Mostafa Dehghani, Maarten Marx, and Jaap Kamps. 2021. Learning to rank for multi- label text classification: Combining different sources of information. Natural Language Engineering, 27(1):89-111.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Hierarchical transfer learning for multi-label text classification", |
| "authors": [ |
| { |
| "first": "Siddhartha", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Cem", |
| "middle": [], |
| "last": "Akkaya", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Perez-Sorrosal", |
| "suffix": "" |
| }, |
| { |
| "first": "Kostas", |
| "middle": [], |
| "last": "Tsioutsiouliklis", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "6295--6300", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siddhartha Banerjee, Cem Akkaya, Francisco Perez- Sorrosal, and Kostas Tsioutsiouliklis. 2019. Hierar- chical transfer learning for multi-label text classifica- tion. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 6295-6300.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "SciBERT: A pretrained language model for scientific text", |
| "authors": [ |
| { |
| "first": "Iz", |
| "middle": [], |
| "last": "Beltagy", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Arman", |
| "middle": [], |
| "last": "Cohan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3615--3620", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. SciBERT: A pretrained language model for scientific text. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing, pages 3615-3620.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Predicting gene function in saccharomyces cerevisiae", |
| "authors": [ |
| { |
| "first": "Amanda", |
| "middle": [], |
| "last": "Clare", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Ross", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "King", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Bioinformatics", |
| "volume": "19", |
| "issue": "suppl_2", |
| "pages": "42--49", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amanda Clare and Ross D King. 2003. Predicting gene function in saccharomyces cerevisiae. Bioinformat- ics, 19(suppl_2):ii42-ii49.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "On the hierarchical classification of G proteincoupled receptors", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Davies", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "A" |
| ], |
| "last": "Secker", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Freitas", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [], |
| "last": "Mendao", |
| "suffix": "" |
| }, |
| { |
| "first": "Darren", |
| "middle": [ |
| "R" |
| ], |
| "last": "Timmis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Flower", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Bioinformatics", |
| "volume": "23", |
| "issue": "23", |
| "pages": "3113--3118", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew N Davies, Andrew Secker, Alex A Freitas, Miguel Mendao, Jon Timmis, and Darren R Flower. 2007. On the hierarchical classification of G protein- coupled receptors. Bioinformatics, 23(23):3113- 3118.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Improving protein function prediction using the hierarchical structure of the gene ontology", |
| "authors": [ |
| { |
| "first": "Roman", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| }, |
| { |
| "first": "Brett", |
| "middle": [], |
| "last": "Poulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Duane", |
| "middle": [], |
| "last": "Szafron", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Russell", |
| "middle": [], |
| "last": "Greiner", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 2005 IEEE Symposium on Computational Intelligence in Bioinformatics and Computational Biology", |
| "volume": "", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roman Eisner, Brett Poulin, Duane Szafron, Paul Lu, and Russell Greiner. 2005. Improving protein func- tion prediction using the hierarchical structure of the gene ontology. In Proceedings of the 2005 IEEE Symposium on Computational Intelligence in Bioin- formatics and Computational Biology, pages 1-10.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A tutorial on hierarchical classification with applications in bioinformatics", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Freitas", |
| "suffix": "" |
| }, |
| { |
| "first": "Andr\u00e9", |
| "middle": [], |
| "last": "Carvalho", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Research and Trends in Data Mining Technologies and Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "175--208", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Freitas and Andr\u00e9 Carvalho. 2007. A tutorial on hierarchical classification with applications in bioin- formatics. Research and Trends in Data Mining Tech- nologies and Applications, pages 175-208.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Recursive regularization for large-scale classification with hierarchical and graphical dependencies", |
| "authors": [ |
| { |
| "first": "Siddharth", |
| "middle": [], |
| "last": "Gopal", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 19th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "257--265", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siddharth Gopal and Yiming Yang. 2013. Recursive regularization for large-scale classification with hier- archical and graphical dependencies. In Proceedings of the 19th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 257-265.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Hierarchical multi-label text classification: An attention-based recurrent network approach", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Enhong", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuying", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zai", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhou", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shijin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 28th ACM International Conference on Information and Knowledge Management", |
| "volume": "", |
| "issue": "", |
| "pages": "1051--1060", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Huang, Enhong Chen, Qi Liu, Yuying Chen, Zai Huang, Yang Liu, Zhou Zhao, Dan Zhang, and Shijin Wang. 2019. Hierarchical multi-label text classifica- tion: An attention-based recurrent network approach. In Proceedings of the 28th ACM International Con- ference on Information and Knowledge Management, pages 1051-1060.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Convolutional neural networks for sentence classification", |
| "authors": [ |
| { |
| "first": "Yoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1746--1751", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoon Kim. 2014. Convolutional neural networks for sentence classification. In Proceedings of the 2014 Conference on Empirical Methods in Natural Lan- guage Processing, pages 1746-1751.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Hdltex: Hierarchical deep learning for text classification", |
| "authors": [ |
| { |
| "first": "Kamran", |
| "middle": [], |
| "last": "Kowsari", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Donald", |
| "suffix": "" |
| }, |
| { |
| "first": "Mojtaba", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "Kiana", |
| "middle": [ |
| "Jafari" |
| ], |
| "last": "Heidarysafa", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Meimandi", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [ |
| "E" |
| ], |
| "last": "Gerber", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Barnes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "16th IEEE International Conference on Machine Learning and Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "364--371", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kamran Kowsari, Donald E Brown, Mojtaba Hei- darysafa, Kiana Jafari Meimandi, Matthew S Ger- ber, and Laura E Barnes. 2017. Hdltex: Hierarchical deep learning for text classification. In 16th IEEE International Conference on Machine Learning and Applications, pages 364-371.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Rcv1: A new benchmark collection for text categorization research", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Tony", |
| "middle": [ |
| "G" |
| ], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Fan", |
| "middle": [], |
| "last": "Rose", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "5", |
| "issue": "", |
| "pages": "361--397", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David D Lewis, Yiming Yang, Tony G Rose, and Fan Li. 2004. Rcv1: A new benchmark collection for text categorization research. Journal of Machine Learning Research, 5(Apr):361-397.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Hierarchical text classification with reinforced label assignment", |
| "authors": [ |
| { |
| "first": "Yuning", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingjing", |
| "middle": [], |
| "last": "Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiawei", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "445--455", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuning Mao, Jingjing Tian, Jiawei Han, and Xiang Ren. 2019. Hierarchical text classification with reinforced label assignment. In Proceedings of the 2019 Confer- ence on Empirical Methods in Natural Language Pro- cessing and the 9th International Joint Conference on Natural Language Processing, pages 445-455.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tom\u00e1s", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of Advances in Neural Information Processing Systems", |
| "volume": "26", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom\u00e1s Mikolov, Ilya Sutskever, Kai Chen, Gregory S. Corrado, and Jeffrey Dean. 2013. Distributed repre- sentations of words and phrases and their composi- tionality. In Proceedings of Advances in Neural In- formation Processing Systems 26, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Large-scale hierarchical text classification with recursively regularized deep graph-cnn", |
| "authors": [ |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianxin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaopeng", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mengjiao", |
| "middle": [], |
| "last": "Bao", |
| "suffix": "" |
| }, |
| { |
| "first": "Lihong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangqiu", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 World Wide Web Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "1063--1072", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hao Peng, Jianxin Li, Yu He, Yaopeng Liu, Mengjiao Bao, Lihong Wang, Yangqiu Song, and Qiang Yang. 2018. Large-scale hierarchical text classification with recursively regularized deep graph-cnn. In Pro- ceedings of the 2018 World Wide Web Conference, pages 1063-1072.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "GloVe: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word rep- resentation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Process- ing, pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long Papers), pages 2227-2237.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Hierarchical classification of Gprotein-coupled receptors with data-driven selection of attributes and classifiers", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Secker", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "Alves" |
| ], |
| "last": "Davies", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Freitas", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Darren", |
| "middle": [ |
| "R" |
| ], |
| "last": "Timmis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Flower", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "International Journal of Data Mining and Bioinformatics", |
| "volume": "4", |
| "issue": "2", |
| "pages": "191--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Secker, Matthew N Davies, Alex Alves Fre- itas, EB Clark, Jonathan Timmis, and Darren R Flower. 2010. Hierarchical classification of G- protein-coupled receptors with data-driven selection of attributes and classifiers. International Journal of Data Mining and Bioinformatics, 4(2):191-210.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Overview of shinra2020-ml task", |
| "authors": [ |
| { |
| "first": "Satoshi", |
| "middle": [], |
| "last": "Sekine", |
| "suffix": "" |
| }, |
| { |
| "first": "Masako", |
| "middle": [], |
| "last": "Nomoto", |
| "suffix": "" |
| }, |
| { |
| "first": "Kouta", |
| "middle": [], |
| "last": "Nakayama", |
| "suffix": "" |
| }, |
| { |
| "first": "Asuka", |
| "middle": [], |
| "last": "Sumida", |
| "suffix": "" |
| }, |
| { |
| "first": "Koji", |
| "middle": [], |
| "last": "Matsuda", |
| "suffix": "" |
| }, |
| { |
| "first": "Maya", |
| "middle": [], |
| "last": "Ando", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the NTCIR-15 Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Satoshi Sekine, Masako Nomoto, Kouta Nakayama, Asuka Sumida, Koji Matsuda, and Maya Ando. 2020. Overview of shinra2020-ml task. In Proceedings of the NTCIR-15 Conference.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Hft-cnn: Learning hierarchical category structure for multi-label short text categorization", |
| "authors": [ |
| { |
| "first": "Kazuya", |
| "middle": [], |
| "last": "Shimura", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiyi", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Fumiyo", |
| "middle": [], |
| "last": "Fukumoto", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "811--816", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kazuya Shimura, Jiyi Li, and Fumiyo Fukumoto. 2018. Hft-cnn: Learning hierarchical category structure for multi-label short text categorization. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 811-816.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A survey of hierarchical classification across different application domains", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Carlos", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "A" |
| ], |
| "last": "Silla", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Freitas", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Data Mining and Knowledge Discovery", |
| "volume": "22", |
| "issue": "1", |
| "pages": "31--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carlos N Silla and Alex A Freitas. 2011. A survey of hierarchical classification across different application domains. Data Mining and Knowledge Discovery, 22(1):31-72.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Decision trees for hierarchical multi-label classification. Machine Learning", |
| "authors": [ |
| { |
| "first": "Celine", |
| "middle": [], |
| "last": "Vens", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Struyf", |
| "suffix": "" |
| }, |
| { |
| "first": "Leander", |
| "middle": [], |
| "last": "Schietgat", |
| "suffix": "" |
| }, |
| { |
| "first": "Sa\u0161o", |
| "middle": [], |
| "last": "D\u017eeroski", |
| "suffix": "" |
| }, |
| { |
| "first": "Hendrik", |
| "middle": [], |
| "last": "Blockeel", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "73", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Celine Vens, Jan Struyf, Leander Schietgat, Sa\u0161o D\u017eeroski, and Hendrik Blockeel. 2008. Decision trees for hierarchical multi-label classification. Ma- chine Learning, 73(2):185.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Bilateral multi-perspective matching for natural language sentences", |
| "authors": [ |
| { |
| "first": "Zhiguo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wael", |
| "middle": [], |
| "last": "Hamza", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Florian", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "4144--4150", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiguo Wang, Wael Hamza, and Radu Florian. 2017. Bilateral multi-perspective matching for natural lan- guage sentences. In Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelli- gence, pages 4144-4150.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Hierarchical multi-label classification networks", |
| "authors": [ |
| { |
| "first": "Jonatas", |
| "middle": [], |
| "last": "Wehrmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Cerri", |
| "suffix": "" |
| }, |
| { |
| "first": "Rodrigo", |
| "middle": [], |
| "last": "Barros", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 35th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "5075--5084", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonatas Wehrmann, Ricardo Cerri, and Rodrigo Bar- ros. 2018. Hierarchical multi-label classification net- works. In Proceedings of the 35th International Con- ference on Machine Learning, pages 5075-5084.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Hierarchical attention networks for document classification", |
| "authors": [ |
| { |
| "first": "Zichao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Diyi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Smola", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 conference of the North American chapter of the association for computational linguistics: human language technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1480--1489", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alex Smola, and Eduard Hovy. 2016. Hierarchical at- tention networks for document classification. In Pro- ceedings of the 2016 conference of the North Ameri- can chapter of the association for computational lin- guistics: human language technologies, pages 1480- 1489.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Hierarchy-aware global model for hierarchical text classification", |
| "authors": [ |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Chunping", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Dingkun", |
| "middle": [], |
| "last": "Long", |
| "suffix": "" |
| }, |
| { |
| "first": "Guangwei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ning", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Haoyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Pengjun", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Gongshen", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1106--1117", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jie Zhou, Chunping Ma, Dingkun Long, Guangwei Xu, Ning Ding, Haoyu Zhang, Pengjun Xie, and Gong- shen Liu. 2020. Hierarchy-aware global model for hierarchical text classification. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 1106-1117.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "num": null, |
| "content": "<table><tr><td>Dataset</td><td colspan=\"4\">|L| Avg(|L|) Depth Training</td><td>Test</td></tr><tr><td>RCV1</td><td>103</td><td>3.24</td><td>4</td><td colspan=\"2\">23,149 592,688</td></tr><tr><td colspan=\"2\">SHINRA 237</td><td>3.16</td><td>4</td><td>390,433</td><td>43,382</td></tr><tr><td>WoS</td><td>141</td><td>2.00</td><td>2</td><td>42,286</td><td>4,699</td></tr><tr><td/><td/><td/><td colspan=\"3\">Hierarchical Multi-Label Classification Net-</td></tr><tr><td/><td/><td/><td colspan=\"3\">works (HMCN: Wehrmann et al. (2018)): A</td></tr><tr><td/><td/><td/><td colspan=\"3\">hybrid local/global approach, where each level in</td></tr></table>", |
| "text": "): A hybrid method, utilising simple recursive regularisation to encourage parameter smoothness between linked nodes.", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "num": null, |
| "content": "<table/>", |
| "text": "Statistics of datasets: \"|L|\" is the total number of labels; \"Avg(|L|)\" is the average number of labels per document; and \"Depth\" indicates the maximum hierarchy depth.", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "num": null, |
| "content": "<table><tr><td>Dataset</td><td>RCV1</td><td>SHINRA</td><td>WoS</td></tr><tr><td colspan=\"4\">Method Micro Macro Micro Macro Micro Macro</td></tr><tr><td/><td/><td>TextCNN</td><td/></tr><tr><td>Flat</td><td colspan=\"2\">75.63 TextRNN</td><td/></tr><tr><td>Flat</td><td colspan=\"3\">78.46 49.18 88.43 60.11 83.72 77.55</td></tr><tr><td>RR</td><td colspan=\"3\">78.52 55.48 87.22 60.07 83.57 78.08</td></tr><tr><td>HMCN</td><td colspan=\"3\">80.52 48.97 88.71 59.76 82.09 75.90</td></tr><tr><td colspan=\"4\">Hi-GCN 81.57 56.29 88.74 61.20 84.11 77.95</td></tr><tr><td/><td/><td>TextRCNN</td><td/></tr><tr><td>Flat</td><td colspan=\"3\">79.92 51.54 88.12 60.34 84.05 77.95</td></tr><tr><td>RR</td><td colspan=\"3\">79.81 56.37 88.06 60.32 84.14 78.03</td></tr><tr><td>HMCN</td><td colspan=\"3\">81.13 50.44 88.56 59.71 82.86 76.11</td></tr><tr><td colspan=\"4\">Hi-GCN 82.96 58.05 88.69 61.05 84.54 78.28</td></tr><tr><td/><td/><td>BERT</td><td/></tr><tr><td>Flat</td><td colspan=\"3\">82.64 55.61 90.86 66.35 75.73 69.22</td></tr><tr><td>RR</td><td colspan=\"3\">82.13 59.41 90.70 66.59 75.77 69.43</td></tr><tr><td>HMCN</td><td colspan=\"3\">82.68 53.65 91.32 64.13 72.28 64.62</td></tr><tr><td>Hi-</td><td/><td/><td/></tr></table>", |
| "text": "45.24 86.94 56.46 83.41 77.00 RR 75.56 50.81 85.31 56.62 83.51 77.32 HMCN 78.22 43.49 87.03 56.28 80.24 74.38 Hi-GCN 77.80 51.34 86.91 58.61 84.09 77.37 GCN 83.20 60.32 91.90 67.79 75.94 70.81", |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |