diff --git "a/2025/A Unified Framework for Heterogeneous Semi-supervised Learning/layout.json" "b/2025/A Unified Framework for Heterogeneous Semi-supervised Learning/layout.json" new file mode 100644--- /dev/null +++ "b/2025/A Unified Framework for Heterogeneous Semi-supervised Learning/layout.json" @@ -0,0 +1,8499 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 96, + 103, + 515, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 103, + 515, + 121 + ], + "spans": [ + { + "bbox": [ + 96, + 103, + 515, + 121 + ], + "type": "text", + "content": "A Unified Framework for Heterogeneous Semi-supervised Learning" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 142, + 143, + 469, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 143, + 469, + 185 + ], + "spans": [ + { + "bbox": [ + 142, + 143, + 469, + 185 + ], + "type": "text", + "content": "Marzi Heidari*, Abdullah Alchihabi*, Hao Yan*, Yuhong Guo*† \n*School of Computer Science, Carleton University, Ottawa, Canada†Canada CIFAR AI Chair, Amii, Canada" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 187, + 539, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 187, + 539, + 200 + ], + "spans": [ + { + "bbox": [ + 70, + 187, + 539, + 200 + ], + "type": "text", + "content": "{marziheidari@cmail., abdullahalchihibi@cmail., haoyan6@cmail., yuhong.guo@}carleton.ca" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 151, + 227, + 200, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 227, + 200, + 239 + ], + "spans": [ + { + "bbox": [ + 151, + 227, + 200, + 239 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 251, + 297, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 251, + 297, + 574 + ], + "spans": [ + { + "bbox": [ + 53, + 251, + 297, + 574 + ], + "type": "text", + "content": "In this work, we introduce a novel problem setup termed as Heterogeneous Semi-Supervised Learning (HSSL), which presents unique challenges by bridging the semi-supervised learning (SSL) task and the unsupervised domain adaptation (UDA) task, and expanding standard semi-supervised learning to cope with heterogeneous training data. At its core, HSSL aims to learn a prediction model using a combination of labeled and unlabeled training data drawn separately from heterogeneous domains that share a common set of semantic categories. This model is intended to differentiate the semantic categories of test instances sampled from both the labeled and unlabeled domains. In particular, the labeled and unlabeled domains have dissimilar label distributions and class feature distributions. This heterogeneity, coupled with the assorted sources of the test data, introduces significant challenges to standard SSL and UDA methods. Therefore, we propose a novel method, Unified Framework for Heterogeneous Semi-supervised Learning (Uni-HSSL), to address HSSL by directly learning a fine-grained classifier from the heterogeneous data, which adaptively handles the inter-domain heterogeneity while leveraging both the unlabeled data and the inter-domain semantic class relationships for cross-domain knowledge transfer and adaptation. We conduct comprehensive experiments and the experimental results validate the efficacy and superior performance of the proposed Uni-HSSL over state-of-the-art semi-supervised learning and unsupervised domain adaptation methods." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 597, + 135, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 597, + 135, + 609 + ], + "spans": [ + { + "bbox": [ + 56, + 597, + 135, + 609 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 617, + 297, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 297, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 297, + 714 + ], + "type": "text", + "content": "Deep learning models, owing to their hierarchical learned representations and intricate architectures, have monumentally advanced the state-of-the-art across a myriad of tasks [18]. Nonetheless, the success of deep learning has been often contingent on the availability of copious amounts of labeled data. Data annotation, especially in specialized domains, is not only resource-intensive but can also entail exorbitant costs [32]. Consequently, semi-supervised learn" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 228, + 555, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 228, + 555, + 264 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 555, + 264 + ], + "type": "text", + "content": "ing (SSL) has been popularly studied, aiming to successfully utilize the free available unlabeled data to help train deep models in an annotation efficient manner [35]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 312, + 267, + 556, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 267, + 556, + 448 + ], + "spans": [ + { + "bbox": [ + 312, + 267, + 556, + 448 + ], + "type": "text", + "content": "However, current SSL methods assume that the unlabeled and labeled data are sampled from similar (homogeneous) distributions [25]. Such an assumption presents substantial practical limitations to applying traditional SSL methods to a wide range of application domains, where labeled and unlabeled data can have different distributions. For example, in the field of medical imaging, it is common for labeled MRI scans to be sourced from state-of-the-art research hospitals, while an influx of unlabeled scans could emanate from a myriad of rural clinics, each with its distinct scanning equipment and calibration idiosyncrasies. Similar heterogeneity patterns manifest in domains like aerial imagery, wildlife monitoring, and retail product classification. In such settings, the challenge lies in leveraging the unlabeled data given its dissimilarity with its labeled counterpart." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 450, + 557, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 450, + 557, + 714 + ], + "spans": [ + { + "bbox": [ + 312, + 450, + 557, + 714 + ], + "type": "text", + "content": "Therefore, to address the current limitations of the traditional SSL, we propose a novel heterogeneous semi-supervised learning (HSSL) task, where the training data consist of labeled and unlabeled data sampled from different distribution domains. The two domains contain a common set of semantic classes, but have different label and class feature distributions. The goal of HSSL is to train a model using the heterogeneous training data so that it can perform well on a held-out test set sampled from both the labeled and unlabeled domains. Without posing distribution similarity assumptions between the labeled and unlabeled data, HSSL is expected to be applicable to a broader range of real-world scenarios compared to standard SSL. This novel heterogeneous semi-supervised learning task however is much more challenging due to the following characteristics: (1) The domain gap, expressed as divergence between class feature distributions across the labeled and unlabeled domains, presents a significant impediment to model generalization and learning. (2) The absence of annotated samples from the unlabeled domain during training further compounds the complexity of the task. (3) Considering that the test set comprises samples from both domains, the devised so" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15371" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 57, + 72, + 296, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 72, + 296, + 382 + ], + "spans": [ + { + "bbox": [ + 57, + 72, + 296, + 382 + ], + "type": "text", + "content": "lution methods need to accurately model the distributions inherent to each domain. It is imperative for the models to discern not only the domain from which a sample originates but also the specific semantic class it belongs to. This requires either an explicit or implicit methodology to categorize samples accurately with respect to both domain origin and semantic class categories, distinguishing the task from both conventional SSL and unsupervised domain adaptation (UDA)—traditional SSL overlooks the domain heterogeneity within both the training and testing data, whereas UDA exclusively concentrates on the unlabeled domain as the target domain [11, 21]. Therefore, traditional SSL and UDA methods are not readily applicable or effective in addressing the proposed HSSL task. A recent work [14] has made an effort to expand the traditional SSL task beyond its homogeneous assumptions. However, the proposed solution method learns separately in different domains using distinct components where an off-the-shelf UDA technique is employed to generate pseudo-labels for the unlabeled samples, bypassing the opportunity to train a unified cohesive model that could harness insights from both domains. Furthermore, their test set is confined to a labeled domain, while HSSL aims to train a model that generalizes across labeled and unlabeled domains. HSSL presents a more complex challenge, requiring the model to adapt and perform accurately across heterogeneous test data." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 391, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 391, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 391, + 296, + 713 + ], + "type": "text", + "content": "In this work, we propose a novel method, named as Unified framework for Heterogeneous Semi-Supervised Learning (Uni-HSSL), to address the HSSL problem. The proposed method learns a fine-grained classification model cohesively under a unified framework by amalgamating the labeled and unlabeled class categories within an extended and precisely doubled label space. The framework consists of three technical components designed to tackle the HSSL challenges: a weighted moving average pseudo-labeling component, a cross-domain prototype alignment component, and a progressive inter-domain mixup component. The pseudo-labeling component leverages a weighted moving average strategy to assign and update pseudo-labels for the unlabeled data. In this manner, it generates smooth and adaptive assignment of pseudo-labels, reducing the potential pitfalls of oscillating updates or noisy label assignments, which is crucial given the significant domain gap between labeled data and unlabeled data. The cross-domain prototype alignment ensures that the inherent semantic structures of similar classes across the labeled and unlabeled domains are aligned. This alignment of class-centric prototypes between domains leverages inter-domain semantic class relationships, enabling knowledge transfer from the labeled domain to the unlabeled domain. The progressive inter-domain mixup component generates new synthetic instances by interpolating between labeled and unlabeled samples and bridges the gap between the two domains. By adopting a progressive" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 316, + 72, + 553, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 553, + 167 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 553, + 167 + ], + "type": "text", + "content": "augmentation schedule, it gradually adapts the model to the distribution of the unlabeled domain, facilitating a steady and reliable knowledge transfer. Comprehensive experiments are conducted on several benchmark datasets. The empirical results demonstrate the efficacy and superior performance of our proposed unified framework compared to multiple state-of-the-art SSL and unsupervised domain adaptation baselines for HSSL." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 316, + 179, + 405, + 191 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 179, + 405, + 191 + ], + "spans": [ + { + "bbox": [ + 316, + 179, + 405, + 191 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 316, + 199, + 460, + 212 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 199, + 460, + 212 + ], + "spans": [ + { + "bbox": [ + 316, + 199, + 460, + 212 + ], + "type": "text", + "content": "2.1. Semi-Supervised Learning" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 315, + 217, + 555, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 217, + 555, + 384 + ], + "spans": [ + { + "bbox": [ + 315, + 217, + 555, + 384 + ], + "type": "text", + "content": "Conventional Semi-Supervised Learning (SSL) In conventional SSL, the labeled and unlabeled segments of the dataset encompass identical classes, sharing consistent class and feature distributions. SSL methods are primarily classified into three categories: regularization-based techniques, teacher-student models, and pseudo-labeling strategies. Regularization-based techniques like II-model [16] modify the loss function with additional terms for model refinement. Teacher-student models like MT [33] and ICT [37] involve training a student network to mimic a teacher model using unlabeled data. Pseudo-labeling strategies like Pseudo-Label [19], FixMatch [31], FlexMatch [39], and SimMatch [41] expand labeled datasets using unlabeled data with pseudo-labels in various ways." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 315, + 399, + 555, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 399, + 555, + 613 + ], + "spans": [ + { + "bbox": [ + 315, + 399, + 555, + 613 + ], + "type": "text", + "content": "Open-Set Semi-Supervised Learning (OS-SSL) OS-SSL deals with unknown or additional classes present in the unlabeled data but absent in the labeled set. OS-SSL assumes the same feature distribution over labeled and unlabeled sets. This is different from HSSL, which operates under the assumption that labeled and unlabeled data come from separate domains with different feature distributions. The concept of OS-SSL, introduced in [25], focuses on class distribution mismatches in open-set scenarios. Methods for OS-SSL like UASD [6] use self-distillation to exclude outliers from unlabeled data. DS3L [12] and MTCF [38] employ diverse weighting strategies for subset mismatches, minimizing the impact of private data in unlabeled sets. OpenMatch [3] utilizes one-vs-all classifiers for outlier detection but faces difficulties with unseen categories. While OS-SSL has advanced SSL towards practical use, it lacks capacity to handle feature distribution mismatches between labeled and unlabeled data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 315, + 629, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 629, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 315, + 629, + 553, + 713 + ], + "type": "text", + "content": "Universal Semi-Supervised Learning (USSL) Universal SSL [13] involves both shared and unique classes across the labeled and unlabeled sets, with the test set matching the labeled set's class distribution. HSSL, however, assumes shared classes across the labeled and unlabeled domains and tests on samples from both domains without their domain identities, adding complexity." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "15372" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 168 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 168 + ], + "type": "text", + "content": "Similar to our work, bidirectional Adaptation [14] addresses the disparity between limited labeled and abundant unlabeled data, but it tests only within the labeled domain's feature distribution. It uses UDA techniques for pseudolabeling, avoiding the complexities and benefits of cross-domain modeling. In contrast, HSSL aims for effective generalization across both domains, posing a more intricate challenge in model adaptation and generalization." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 176, + 238, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 176, + 238, + 190 + ], + "spans": [ + { + "bbox": [ + 55, + 176, + 238, + 190 + ], + "type": "text", + "content": "2.2. Unsupervised Domain Adaptation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 194, + 296, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 194, + 296, + 386 + ], + "spans": [ + { + "bbox": [ + 55, + 194, + 296, + 386 + ], + "type": "text", + "content": "Unsupervised domain adaptation aims at learning a target model given labeled data from a source domain and unlabeled data from a target domain. Typical deep UDA approaches can be categorized into three types: alignment-based, regularization-based, and self-training-based methods. Alignment-based methods aim to reduce the cross-domain feature discrepancy with adversarial alignment [11, 22] and distance-based methods [4, 21, 27, 29]. Regularization-based methods utilize regularization terms to leverage knowledge from the unlabeled target data. Typical regularization terms include entropy minimization [30], virtual adversarial training [30], batch spectral penalization [5], batch nuclear-norm maximization [9], and mutual information maximization [17]. Self-training-based methods explore effective pseudo-labeling for unlabeled target data fitting, including confidence threshold [2, 42] and cycle self-training [20]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 398, + 111, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 398, + 111, + 410 + ], + "spans": [ + { + "bbox": [ + 55, + 398, + 111, + 410 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 418, + 148, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 418, + 148, + 430 + ], + "spans": [ + { + "bbox": [ + 55, + 418, + 148, + 430 + ], + "type": "text", + "content": "3.1. Problem Setup" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "spans": [ + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "content": "We consider the following Heterogeneous Semi-Supervised Learning (HSSL) setup. The training data consist of a set of labeled instances " + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_L = \\{(\\mathbf{x}_i^l,\\mathbf{y}_i^l)\\}_{i = 1}^{N_l}" + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "content": ", where each instance " + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i^l" + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "content": " is annotated with a one-hot label indicator vector " + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_i^l" + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "content": " with length " + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "content": ", and a set of unlabeled instances " + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_U = \\{\\mathbf{x}_i^u\\}_{i = 1}^{N_u}" + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "content": ". The labeled data and unlabeled data are from two different domains that have dissimilar label distributions such that " + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "inline_equation", + "content": "p_L(\\mathbf{y})\\neq p_U(\\mathbf{y})" + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "content": " and heterogeneous feature distributions such that " + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "inline_equation", + "content": "p_L(\\mathbf{x}|\\mathbf{y})\\neq p_U(\\mathbf{x}|\\mathbf{y})" + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "content": ", but share the same set of " + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "content": " semantic classes. The goal is to train a prediction model using both the labeled set " + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_L" + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "content": " and unlabeled set " + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_U" + }, + { + "bbox": [ + 55, + 435, + 296, + 602 + ], + "type": "text", + "content": " so that the trained model would generalize well on a held-out test set that is indistinguishably sampled from both the labeled and unlabeled domains." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 612, + 161, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 612, + 161, + 624 + ], + "spans": [ + { + "bbox": [ + 55, + 612, + 161, + 624 + ], + "type": "text", + "content": "3.2. Proposed Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": "In this section, we present the proposed Uni-HSSL method, which tackles the " + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": "-class HSSL problem by combining the labeled and unlabeled class categories to a doubled label space and learning a fine-grained " + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": "-class classification model under a unified framework, aiming to adaptively handle the heterogeneous distributions across domains and gain better generalization over test instances randomly sampled" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "type": "text", + "content": "from both the labeled and unlabeled domains. The core idea centers on simultaneously facilitating effective knowledge transfer from the labeled domain to the unlabeled domain while harnessing the information within the unlabeled data." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "spans": [ + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "text", + "content": "We start by first pre-training a feature encoder and a " + }, + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "text", + "content": "-class semantic classifier on the labeled dataset, which can be used to produce the initial pseudo-labels of the unlabeled training data and provide partial initialization for our Uni-HSSL model. Then the " + }, + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "text", + "content": "-class Uni-HSSL model, which consists of a feature encoder " + }, + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "text", + "content": " and a " + }, + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "text", + "content": "-class classifier " + }, + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 313, + 121, + 555, + 324 + ], + "type": "text", + "content": ", will be learned within the proposed unified semi-supervised framework shown in Figure 1. The framework introduces three technical components to facilitate heterogeneous SSL. The weighted-moving-average (WMA) based pseudo-labeling component is deployed to support the effective exploitation of the unlabeled data, while the cross-domain prototype alignment component and progressive inter-domain mixup component are designed to promote information sharing and efficient and steady knowledge transfer from the labeled domain to the unlabeled domain. Further elaboration will be provided in the following sections." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 329, + 444, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 329, + 444, + 341 + ], + "spans": [ + { + "bbox": [ + 313, + 329, + 444, + 341 + ], + "type": "text", + "content": "3.2.1. Supervised Pre-training" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 344, + 554, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 344, + 554, + 439 + ], + "spans": [ + { + "bbox": [ + 313, + 344, + 554, + 439 + ], + "type": "text", + "content": "The initial challenge in training a " + }, + { + "bbox": [ + 313, + 344, + 554, + 439 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 313, + 344, + 554, + 439 + ], + "type": "text", + "content": "-class classification model with the given heterogeneous data is the absence of labeled instances entirely in the unlabeled domain. To tackle this problem, we exploit the assumption that the labeled and unlabeled domains share the same set of " + }, + { + "bbox": [ + 313, + 344, + 554, + 439 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 344, + 554, + 439 + ], + "type": "text", + "content": " semantic class categories, and pre-train a " + }, + { + "bbox": [ + 313, + 344, + 554, + 439 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 344, + 554, + 439 + ], + "type": "text", + "content": "-class classification model in the labeled domain to provide initial pseudo-labels for the training instances in the unlabeled domain." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "spans": [ + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "text", + "content": "Specifically, we pre-train a " + }, + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "text", + "content": "-class model, which consists of a feature encoder " + }, + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "text", + "content": " and a " + }, + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "text", + "content": "-class probabilistic classifier " + }, + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "text", + "content": ", on the labeled data " + }, + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_L" + }, + { + "bbox": [ + 313, + 440, + 555, + 488 + ], + "type": "text", + "content": " by minimizing the following supervised cross-entropy loss:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 357, + 495, + 555, + 513 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 495, + 555, + 513 + ], + "spans": [ + { + "bbox": [ + 357, + 495, + 555, + 513 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {c e} ^ {L} = \\mathbb {E} _ {\\left(\\mathbf {x} _ {i} ^ {l}, \\mathbf {y} _ {i} ^ {l}\\right) \\in \\mathcal {D} _ {L}} \\left[ \\ell_ {c e} \\left(\\mathbf {y} _ {i} ^ {l}, g (f (\\mathbf {x} _ {i} ^ {l}))\\right) \\right] \\tag {1}", + "image_path": "ed5ee6bf08aa1c00eebcfcf347c814fe2da6ac10a987c86d20b38544cdeebb59.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 519, + 556, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 519, + 556, + 567 + ], + "spans": [ + { + "bbox": [ + 313, + 519, + 556, + 567 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 519, + 556, + 567 + ], + "type": "inline_equation", + "content": "\\ell_{ce}" + }, + { + "bbox": [ + 313, + 519, + 556, + 567 + ], + "type": "text", + "content": " denotes the cross-entropy function. Then we deploy the pre-trained classification model to make predictions on the unlabeled training instances in " + }, + { + "bbox": [ + 313, + 519, + 556, + 567 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_U" + }, + { + "bbox": [ + 313, + 519, + 556, + 567 + ], + "type": "text", + "content": " to generate their initial pseudo-labels:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 372, + 575, + 555, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 575, + 555, + 590 + ], + "spans": [ + { + "bbox": [ + 372, + 575, + 555, + 590 + ], + "type": "interline_equation", + "content": "\\bar {\\mathbf {y}} _ {i} ^ {0} = g \\left(f \\left(\\mathbf {x} _ {i} ^ {u}\\right)\\right), \\quad \\forall \\mathbf {x} _ {i} ^ {u} \\in \\mathcal {D} _ {U} \\tag {2}", + "image_path": "56abf667d06700e05bb1f950eff51388fc7d23e0cfb5db6cadfd2f3d2a0a2667.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "inline_equation", + "content": "\\bar{\\mathbf{y}}_i^0" + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "text", + "content": " denotes the predicted class probability vector with length " + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "text", + "content": " for the unlabeled instance " + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i^u" + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "text", + "content": ". To provide initial labels on the unlabeled data for training the " + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "text", + "content": "-class model, we further expand each " + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "inline_equation", + "content": "\\bar{\\mathbf{y}}_i^0" + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "text", + "content": " by concatenating it with a zero vector with length " + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "inline_equation", + "content": "\\mathbf{0}_C" + }, + { + "bbox": [ + 313, + 597, + 556, + 658 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 389, + 666, + 555, + 681 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 666, + 555, + 681 + ], + "spans": [ + { + "bbox": [ + 389, + 666, + 555, + 681 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {y}} _ {i} ^ {0} = \\operatorname {c o n c a t} \\left(\\mathbf {0} _ {C}, \\bar {\\mathbf {y}} _ {i} ^ {0}\\right) \\tag {3}", + "image_path": "01ba431795715b8cdf8b0fe14b26bfdee617abb039fabe60853bc21f8e095491.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "content": "This results in the first set of " + }, + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "content": " classes out of the " + }, + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "content": " classes corresponding to the classes in the labeled domain, with" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15373" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 100, + 74, + 509, + 266 + ], + "blocks": [ + { + "bbox": [ + 100, + 74, + 509, + 266 + ], + "lines": [ + { + "bbox": [ + 100, + 74, + 509, + 266 + ], + "spans": [ + { + "bbox": [ + 100, + 74, + 509, + 266 + ], + "type": "image", + "image_path": "b2dc7b8e254ecc5e7a924b0093c898cef1b0802eae32003cf24e947489c5e0c3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "lines": [ + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "spans": [ + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "text", + "content": "Figure 1. An overview of the proposed Uni-HSSL training framework. The classification model consists of a feature encoder " + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "text", + "content": " and a " + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "text", + "content": "-class classifier " + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "text", + "content": ". After initialization with pre-training, the model is trained by jointly minimizing the combination of a supervised loss " + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cl}^{L}" + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "text", + "content": " on the labeled data, a WMA pseudo-labeling loss " + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{pl}^{U}" + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "text", + "content": " on the unlabeled data, a cross-domain prototype alignment loss " + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{pa}" + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "text", + "content": ", and a prediction loss " + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{Mixup}}" + }, + { + "bbox": [ + 54, + 277, + 555, + 323 + ], + "type": "text", + "content": " on the augmentation data produced via progressive inter-domain mixup." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "spans": [ + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "text", + "content": "the remaining set of " + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "text", + "content": " classes corresponding to the classes in the unlabeled domain. Moreover, the parameters of the pre-trained " + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "text", + "content": "-class model " + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "inline_equation", + "content": "(g \\circ f)" + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "text", + "content": " can also be utilized to initialize the feature encoder " + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "text", + "content": " and part of the classifier " + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "text", + "content": " corresponding to the first " + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "text", + "content": " classes in the " + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "text", + "content": "-class model, while the other part of " + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 54, + 336, + 296, + 411 + ], + "type": "text", + "content": " will be randomly initialized." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 417, + 296, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 417, + 296, + 441 + ], + "spans": [ + { + "bbox": [ + 55, + 417, + 296, + 441 + ], + "type": "text", + "content": "3.2.2. Semi-Supervised Training with Adaptive Pseudo-Labeling" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "spans": [ + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "text", + "content": "After initialization, the proposed " + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "text", + "content": "-class classification model (feature encoder " + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "text", + "content": " and probabilistic classifier " + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "text", + "content": ") will be trained by leveraging both the labeled set " + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_L" + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "text", + "content": " and the unlabeled set " + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_U" + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "text", + "content": " within a pseudo-labeling based SSL framework. On the labeled set " + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_L" + }, + { + "bbox": [ + 55, + 445, + 296, + 517 + ], + "type": "text", + "content": ", the following standard supervised cross-entropy loss will be used as the training objective:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 526, + 296, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 526, + 296, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 526, + 296, + 544 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {c l} ^ {L} = \\mathbb {E} _ {\\left(\\mathbf {x} _ {i} ^ {l}, \\mathbf {y} _ {i} ^ {l}\\right) \\in \\mathcal {D} _ {L}} \\left[ \\ell_ {c e} \\left(h \\left(f \\left(\\mathbf {x} _ {i} ^ {l}\\right)\\right), \\operatorname {c o n c a t} \\left(\\mathbf {y} _ {i} ^ {l}, \\mathbf {0} _ {C}\\right)\\right) \\right] \\tag {4}", + "image_path": "c066b7ec3c6281a4863aefcd758aabf6f0a73bf2cd097a03e6c6cde479e0eb7f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 553, + 296, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 553, + 296, + 590 + ], + "spans": [ + { + "bbox": [ + 55, + 553, + 296, + 590 + ], + "type": "text", + "content": "where the concatenated label vector, " + }, + { + "bbox": [ + 55, + 553, + 296, + 590 + ], + "type": "inline_equation", + "content": "\\operatorname{concat}(\\mathbf{y}_i^l, \\mathbf{0}_C)" + }, + { + "bbox": [ + 55, + 553, + 296, + 590 + ], + "type": "text", + "content": ", expands the ground-truth label vector " + }, + { + "bbox": [ + 55, + 553, + 296, + 590 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_i^l" + }, + { + "bbox": [ + 55, + 553, + 296, + 590 + ], + "type": "text", + "content": " into the " + }, + { + "bbox": [ + 55, + 553, + 296, + 590 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 55, + 553, + 296, + 590 + ], + "type": "text", + "content": "-class label space by appending a zero vector with length " + }, + { + "bbox": [ + 55, + 553, + 296, + 590 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 553, + 296, + 590 + ], + "type": "text", + "content": " to it." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 590, + 296, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 590, + 296, + 685 + ], + "spans": [ + { + "bbox": [ + 55, + 590, + 296, + 685 + ], + "type": "text", + "content": "Although we have obtained initial pseudo-labels for the unlabeled set " + }, + { + "bbox": [ + 55, + 590, + 296, + 685 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_U" + }, + { + "bbox": [ + 55, + 590, + 296, + 685 + ], + "type": "text", + "content": " by utilizing the pre-trained " + }, + { + "bbox": [ + 55, + 590, + 296, + 685 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 590, + 296, + 685 + ], + "type": "text", + "content": "-class classifier, those initial labels are unavoidably noisy due to the existence of domain gap between the labeled and unlabeled domains. In order to effectively leverage the unlabeled data, we update the pseudo-label for each unlabeled instance " + }, + { + "bbox": [ + 55, + 590, + 296, + 685 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i^u" + }, + { + "bbox": [ + 55, + 590, + 296, + 685 + ], + "type": "text", + "content": " during each training iteration in a weighted moving average (WMA) fashion as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 696, + 296, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 696, + 296, + 711 + ], + "spans": [ + { + "bbox": [ + 107, + 696, + 296, + 711 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {y}} _ {i} ^ {t} = \\beta \\hat {\\mathbf {y}} _ {i} ^ {t - 1} + (1 - \\beta) h \\left(f \\left(\\mathbf {x} _ {i} ^ {u}\\right)\\right) \\tag {5}", + "image_path": "c00272b364e73b1e97b6a3e81514955ccd9575d888ebf9e76083ad097282699b.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 336, + 555, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 336, + 555, + 456 + ], + "spans": [ + { + "bbox": [ + 313, + 336, + 555, + 456 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 336, + 555, + 456 + ], + "type": "inline_equation", + "content": "\\beta \\in (0,1)" + }, + { + "bbox": [ + 313, + 336, + 555, + 456 + ], + "type": "text", + "content": " is a hyper-parameter that controls the rate of update, and " + }, + { + "bbox": [ + 313, + 336, + 555, + 456 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{y}}_i^t" + }, + { + "bbox": [ + 313, + 336, + 555, + 456 + ], + "type": "text", + "content": " is the updated pseudo-label for " + }, + { + "bbox": [ + 313, + 336, + 555, + 456 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i^u" + }, + { + "bbox": [ + 313, + 336, + 555, + 456 + ], + "type": "text", + "content": " at the " + }, + { + "bbox": [ + 313, + 336, + 555, + 456 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 336, + 555, + 456 + ], + "type": "text", + "content": "-th training iteration. This weighted moving average update strategy can yield a smooth and adaptive assignment of pseudo-labels by promptly incorporating the progress in the classification model and mitigating the risk of oscillatory updates. Moreover, to further mitigate the adverse impact of noisy pseudo-labels, we deploy the following cross-entropy loss on the unlabeled set during training, selectively utilizing only instances with more reliable pseudo-labels:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 323, + 464, + 555, + 481 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 464, + 555, + 481 + ], + "spans": [ + { + "bbox": [ + 323, + 464, + 555, + 481 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p l} ^ {U} = \\mathbb {E} _ {\\mathbf {x} _ {i} ^ {u} \\in \\mathcal {D} _ {U}} [ \\mathbb {1} (\\max (\\hat {\\mathbf {y}} _ {i} ^ {t}) > \\epsilon) \\ell_ {c e} (h (f (\\mathbf {x} _ {i} ^ {u})), \\hat {\\mathbf {y}} _ {i} ^ {t}) ] \\tag {6}", + "image_path": "763985a22f2b8849abb1b36cf29026bd94acc55f374c028ee2bfc22567c469c1.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 488, + 555, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 488, + 555, + 536 + ], + "spans": [ + { + "bbox": [ + 313, + 488, + 555, + 536 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 488, + 555, + 536 + ], + "type": "inline_equation", + "content": "\\mathbb{1}(\\cdot)" + }, + { + "bbox": [ + 313, + 488, + 555, + 536 + ], + "type": "text", + "content": " denotes an indicator function; " + }, + { + "bbox": [ + 313, + 488, + 555, + 536 + ], + "type": "inline_equation", + "content": "\\epsilon \\in (0,1)" + }, + { + "bbox": [ + 313, + 488, + 555, + 536 + ], + "type": "text", + "content": " is a predefined confidence threshold to ensure that only unlabeled instances with the maximum prediction probabilities larger than " + }, + { + "bbox": [ + 313, + 488, + 555, + 536 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 313, + 488, + 555, + 536 + ], + "type": "text", + "content": " are used for the current training iteration." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 536, + 556, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 536, + 556, + 632 + ], + "spans": [ + { + "bbox": [ + 313, + 536, + 556, + 632 + ], + "type": "text", + "content": "By treating semantic classes in distinct domains as separate categories, the " + }, + { + "bbox": [ + 313, + 536, + 556, + 632 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 313, + 536, + 556, + 632 + ], + "type": "text", + "content": "-class classification model serves as a strategic choice to differentiate samples across domains. This approach avoids the additional complexity associated with a dedicated domain classifier and naturally handles the divergence in class-feature distributions across domains. It also simplifies the process and has the potential to enhance domain generalization through a shared feature encoder." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 639, + 537, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 639, + 537, + 651 + ], + "spans": [ + { + "bbox": [ + 313, + 639, + 537, + 651 + ], + "type": "text", + "content": "3.2.3. Cross-Domain Semantic Prototype Alignment" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 654, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 555, + 713 + ], + "type": "text", + "content": "Given that the labeled domain and unlabeled domain are comprised of the same set of " + }, + { + "bbox": [ + 313, + 654, + 555, + 713 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 654, + 555, + 713 + ], + "type": "text", + "content": " semantic classes, there is a one-to-one correspondence relationship between each cross-domain class pair for the same semantic concept. In order to facilitate knowledge sharing and transfer across domains," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15374" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 167 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 167 + ], + "type": "text", + "content": "we propose to align each semantic class from the labeled domain with its corresponding semantic class in the unlabeled domain within the learned feature embedding space. To this end, we represent each class using a class-prototype vector and design a cross-domain semantic class-prototype alignment component to enforce the corresponding semantic class pairs across the domains are more similar in the feature embedding space than non-corresponding class pairs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 168, + 296, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 168, + 296, + 205 + ], + "spans": [ + { + "bbox": [ + 55, + 168, + 296, + 205 + ], + "type": "text", + "content": "Specifically, we compute the prototype vector for the " + }, + { + "bbox": [ + 55, + 168, + 296, + 205 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 168, + 296, + 205 + ], + "type": "text", + "content": "-th class in the labeled set as the average feature embedding of the labeled instances belonging to the class:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 213, + 296, + 230 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 213, + 296, + 230 + ], + "spans": [ + { + "bbox": [ + 77, + 213, + 296, + 230 + ], + "type": "interline_equation", + "content": "\\mathbf {p} _ {k} = \\mathbb {E} _ {\\left(\\mathbf {x} _ {i} ^ {l}, \\mathbf {y} _ {i} ^ {l}\\right) \\in \\mathcal {D} _ {L}} \\left[ \\mathbb {1} \\left(\\arg \\max _ {j} \\mathbf {y} _ {i j} ^ {l} = k\\right) f \\left(\\mathbf {x} _ {i} ^ {l}\\right) \\right] \\tag {7}", + "image_path": "4ca3c3f1702c087b9a758f7a036124e51b3a965c163e83b664d6bd9aac58dc59.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "spans": [ + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{ij}^{l}" + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "text", + "content": "-th entry of the label vector " + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_i^l" + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "text", + "content": ". The corresponding " + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "text", + "content": "-th semantic class in the unlabeled set is the " + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "inline_equation", + "content": "(C + k)" + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "text", + "content": "-th class in the " + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "inline_equation", + "content": "2C" + }, + { + "bbox": [ + 55, + 239, + 296, + 299 + ], + "type": "text", + "content": "-class label space. We compute the class prototype vectors in the unlabeled set based on the instances with reliable pseudo-labels, such that:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 308, + 296, + 336 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 308, + 296, + 336 + ], + "spans": [ + { + "bbox": [ + 56, + 308, + 296, + 336 + ], + "type": "interline_equation", + "content": "\\mathbf {p} _ {C + k} = \\mathbb {E} _ {\\mathbf {x} _ {i} ^ {u} \\in \\mathcal {D} _ {U}} \\left[ \\mathbb {1} \\left(\\max (\\hat {\\mathbf {y}} _ {i} ^ {t}) > \\epsilon \\wedge \\right. \\left. \\arg \\max _ {j} \\hat {\\mathbf {y}} _ {i j} ^ {t} = C + k\\right) f \\left(\\mathbf {x} _ {i} ^ {u}\\right) \\right] \\tag {8}", + "image_path": "7ca9c95b7c1c2783e78fba14e07212c50b807159f1dd0058e8a853f0c2346a88.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 344, + 296, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 344, + 296, + 392 + ], + "spans": [ + { + "bbox": [ + 55, + 344, + 296, + 392 + ], + "type": "text", + "content": "Then for each semantic class " + }, + { + "bbox": [ + 55, + 344, + 296, + 392 + ], + "type": "inline_equation", + "content": "k \\in \\{1, \\dots, C\\}" + }, + { + "bbox": [ + 55, + 344, + 296, + 392 + ], + "type": "text", + "content": ", we align the prototypes of the corresponding class pairs from the labeled and unlabeled domains, " + }, + { + "bbox": [ + 55, + 344, + 296, + 392 + ], + "type": "inline_equation", + "content": "(\\mathbf{p}_k, \\mathbf{p}_{C + k})" + }, + { + "bbox": [ + 55, + 344, + 296, + 392 + ], + "type": "text", + "content": ", by employing a cross-domain contrastive prototype alignment loss as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 402, + 296, + 477 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 402, + 296, + 477 + ], + "spans": [ + { + "bbox": [ + 58, + 402, + 296, + 477 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {p a} = - \\sum_ {k = 1} ^ {C} \\left[ \\log \\frac {\\exp \\left(\\cos \\left(\\mathbf {p} _ {k} , \\mathbf {p} _ {C + k}\\right) / \\tau\\right)}{\\sum_ {k ^ {\\prime} = 1} ^ {C} \\mathbb {1} \\left(k ^ {\\prime} \\neq k\\right) \\exp \\left(\\cos \\left(\\mathbf {p} _ {k} , \\mathbf {p} _ {C + k ^ {\\prime}}\\right) / \\tau\\right)} \\right. \\\\ \\left. + \\log \\frac {\\exp \\left(\\cos \\left(\\mathbf {p} _ {k} , \\mathbf {p} _ {C + k}\\right) / \\tau\\right)}{\\sum_ {k ^ {\\prime} = 1} ^ {C} \\mathbb {1} \\left(k ^ {\\prime} \\neq k\\right) \\exp \\left(\\cos \\left(\\mathbf {p} _ {k ^ {\\prime}} , \\mathbf {p} _ {C + k}\\right) / \\tau\\right)} \\right] \\tag {9} \\\\ \\end{array}", + "image_path": "7ee5234071348693882e442f395602353fed5620d5de300e377274d384123f9f.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 488, + 296, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 488, + 296, + 571 + ], + "spans": [ + { + "bbox": [ + 55, + 488, + 296, + 571 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 488, + 296, + 571 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 55, + 488, + 296, + 571 + ], + "type": "text", + "content": " is a temperature hyper-parameter, and " + }, + { + "bbox": [ + 55, + 488, + 296, + 571 + ], + "type": "inline_equation", + "content": "\\cos (\\cdot ,\\cdot)" + }, + { + "bbox": [ + 55, + 488, + 296, + 571 + ], + "type": "text", + "content": " denotes the cosine similarity function. This contrastive loss promotes the sharing of predictive information between the labeled and unlabeled domains by encouraging the corresponding class prototype pairs to be closer to each other while simultaneously pushing the non-corresponding cross-domain class prototype pairs farther apart." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 578, + 224, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 578, + 224, + 590 + ], + "spans": [ + { + "bbox": [ + 55, + 578, + 224, + 590 + ], + "type": "text", + "content": "3.2.4. Progressive Inter-Domain Mixup" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 594, + 296, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 296, + 676 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 296, + 676 + ], + "type": "text", + "content": "In order to bridge the gap between the labeled domain and the unlabeled domain, we propose a progressive inter-domain mixup mechanism to augment the training set by dynamically generating synthetic instances between the labeled set and unlabeled set, with the objective of facilitating steady and efficient knowledge transfer from the labeled domain to the unlabeled domain." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": "Specifically, we generate an inter-domain synthetic instance " + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}^m,\\mathbf{y}^m)" + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": " by mixing a labeled instance " + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}^l,\\mathbf{y}^l)" + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": " from the labeled set " + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_L" + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": " with a pseudo-labeled instance " + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}^u,\\hat{\\mathbf{y}}^t)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 72, + 538, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 538, + 84 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 538, + 84 + ], + "type": "text", + "content": "from the unlabeled set " + }, + { + "bbox": [ + 314, + 72, + 538, + 84 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_U" + }, + { + "bbox": [ + 314, + 72, + 538, + 84 + ], + "type": "text", + "content": " through linear interpolation:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 375, + 93, + 554, + 114 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 93, + 554, + 114 + ], + "spans": [ + { + "bbox": [ + 375, + 93, + 554, + 114 + ], + "type": "interline_equation", + "content": "\\mathbf {x} ^ {m} = \\lambda \\mathbf {x} ^ {u} + (1 - \\lambda) \\mathbf {x} ^ {l}, \\tag {10}", + "image_path": "b9cf5dab288c9d07d0c5f6a24916c02e84ac2ceb4cf62efbeb69f603d40e9bdc.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 350, + 110, + 503, + 124 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 350, + 110, + 503, + 124 + ], + "spans": [ + { + "bbox": [ + 350, + 110, + 503, + 124 + ], + "type": "interline_equation", + "content": "\\mathbf {y} ^ {m} = \\lambda \\hat {\\mathbf {y}} ^ {t} + (1 - \\lambda) \\operatorname {c o n c a t} (\\mathbf {y} ^ {t}, \\mathbf {0} _ {C}),", + "image_path": "7c5055700340c218d8a11f11f716e14c4f2099512b4991aa2e49b2cd3ea35a09.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 133, + 554, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 133, + 554, + 192 + ], + "spans": [ + { + "bbox": [ + 313, + 133, + 554, + 192 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 133, + 554, + 192 + ], + "type": "inline_equation", + "content": "\\lambda \\in [0,1]" + }, + { + "bbox": [ + 313, + 133, + 554, + 192 + ], + "type": "text", + "content": " is the mixing coefficient. To fully utilize the available data in both domains, we can generate " + }, + { + "bbox": [ + 313, + 133, + 554, + 192 + ], + "type": "inline_equation", + "content": "N^{m} = \\max (N^{l},N^{u})" + }, + { + "bbox": [ + 313, + 133, + 554, + 192 + ], + "type": "text", + "content": " synthetic instances to form a synthetic set " + }, + { + "bbox": [ + 313, + 133, + 554, + 192 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{Mixup}}" + }, + { + "bbox": [ + 313, + 133, + 554, + 192 + ], + "type": "text", + "content": " by mixing each instance in the larger domain with a randomly selected instance in the other domain." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "spans": [ + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "text", + "content": "In the standard mixup [40], the mixing coefficient " + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "text", + "content": " is sampled from a fixed " + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "inline_equation", + "content": "\\mathrm{Beta}(\\alpha, \\alpha)" + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "text", + "content": " distribution with hyperparameter " + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "text", + "content": ". To facilitate a steady and smooth adaptation from the labeled domain to the unlabeled domain for HSSL, we propose to dynamically generate the mixup data in each training iteration " + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "text", + "content": " by deploying a progressive mixing up strategy that samples " + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "text", + "content": " from a shifted " + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "inline_equation", + "content": "\\mathrm{Beta}(\\alpha, \\alpha)" + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "text", + "content": " distribution based on a schedule function " + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "inline_equation", + "content": "\\psi(t)" + }, + { + "bbox": [ + 313, + 194, + 555, + 289 + ], + "type": "text", + "content": ", such that:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 335, + 298, + 555, + 321 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 298, + 555, + 321 + ], + "spans": [ + { + "bbox": [ + 335, + 298, + 555, + 321 + ], + "type": "interline_equation", + "content": "\\lambda \\sim \\psi (t) \\times \\operatorname {B e t a} (\\alpha , \\alpha), \\quad \\psi (t) = 0. 5 + \\frac {t}{2 T} \\tag {11}", + "image_path": "a0cd3df0a94560e7125b97a67645b7480858afefeaf11206c92941cb9ea25512.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "spans": [ + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "text", + "content": " denotes the total number of training iterations. Following this schedule, at the beginning of the training process, we have " + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "inline_equation", + "content": "\\psi(0) \\approx 0.5" + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "text", + "content": " is sampled from the approximate interval [0, 0.5) as the model prioritizes the labeled domain, guarding against noisy pseudo-label predictions from unlabeled data. As the training progresses, the model gradually increases its reliance on the unlabeled data, and the interval [0, " + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "inline_equation", + "content": "\\psi(t)" + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "text", + "content": "] from which " + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "text", + "content": " is sampled is expanded gradually towards [0, 1] (with " + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "inline_equation", + "content": "\\psi(T) = 1" + }, + { + "bbox": [ + 313, + 328, + 555, + 447 + ], + "type": "text", + "content": "), allowing it to adapt seamlessly between domains." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 449, + 555, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 555, + 484 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 555, + 484 + ], + "type": "text", + "content": "Following previous works on using mixup data [1], we employ the mixup set " + }, + { + "bbox": [ + 313, + 449, + 555, + 484 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{Mixup}}" + }, + { + "bbox": [ + 313, + 449, + 555, + 484 + ], + "type": "text", + "content": " for model training by minimizing the following mean squared error:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 325, + 493, + 555, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 493, + 555, + 514 + ], + "spans": [ + { + "bbox": [ + 325, + 493, + 555, + 514 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {M i x u p}} = \\mathbb {E} _ {\\left(\\mathbf {x} _ {i} ^ {m}, \\mathbf {y} _ {i} ^ {m}\\right) \\in \\mathcal {D} _ {\\text {M i x u p}}} \\left[ \\left\\| h \\left(f \\left(\\mathbf {x} _ {i} ^ {m}\\right)\\right) - \\mathbf {y} _ {i} ^ {m}\\right) \\right\\| ^ {2} \\tag {12}", + "image_path": "acbc731a222e727eacebe36237359a23be8b6f2d14612934d7a77432d5825bad.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 521, + 423, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 521, + 423, + 533 + ], + "spans": [ + { + "bbox": [ + 314, + 521, + 423, + 533 + ], + "type": "text", + "content": "3.2.5. Training Objective" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 536, + 554, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 536, + 554, + 584 + ], + "spans": [ + { + "bbox": [ + 313, + 536, + 554, + 584 + ], + "type": "text", + "content": "By integrating the classification loss terms on the labeled set, the unlabeled set, and the mixup set, with the class prototype alignment loss, we obtain the following joint training objective for the Uni-HSSL model:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 326, + 594, + 555, + 609 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 594, + 555, + 609 + ], + "spans": [ + { + "bbox": [ + 326, + 594, + 555, + 609 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {c l} ^ {L} + \\lambda_ {p l} \\mathcal {L} _ {p l} ^ {U} + \\lambda_ {p a} \\mathcal {L} _ {p a} + \\lambda_ {\\text {M i x u p}} \\mathcal {L} _ {\\text {M i x u p}} \\tag {13}", + "image_path": "d1951c18aa2cadedb33f666f21dec94e5063a7fdd5aba0d618b9f8df96a6fab3.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 617, + 546, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 546, + 630 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 546, + 630 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 617, + 546, + 630 + ], + "type": "inline_equation", + "content": "\\lambda_{pl},\\lambda_{pa}" + }, + { + "bbox": [ + 313, + 617, + 546, + 630 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 617, + 546, + 630 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{Mixup}}" + }, + { + "bbox": [ + 313, + 617, + 546, + 630 + ], + "type": "text", + "content": " are trade-off hyper-parameters." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 640, + 394, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 640, + 394, + 654 + ], + "spans": [ + { + "bbox": [ + 313, + 640, + 394, + 654 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 659, + 430, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 659, + 430, + 673 + ], + "spans": [ + { + "bbox": [ + 313, + 659, + 430, + 673 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 555, + 713 + ], + "type": "text", + "content": "Datasets We conducted comprehensive experiments to evaluate the performance of our proposed framework on four image classification benchmark datasets: Office-31," + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15375" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 72, + 69, + 539, + 240 + ], + "blocks": [ + { + "bbox": [ + 72, + 69, + 539, + 240 + ], + "lines": [ + { + "bbox": [ + 72, + 69, + 539, + 240 + ], + "spans": [ + { + "bbox": [ + 72, + 69, + 539, + 240 + ], + "type": "table", + "html": "
SupervisedFlexMatchFixMatchSimMatchCDAN+SupMCC+SupBiAdaptUni-HSSL
A/C53.1(0.7)51.1(1.2)51.9(1.5)57.8(1.6)47.0(0.5)54.9(1.2)55.1(1.8)60.1(0.9)
C/A66.0(1.2)68.1(1.3)63.8(0.7)69.7(0.9)63.9(0.7)70.5(0.3)65.1(1.2)72.0(0.7)
C/R77.5(0.9)72.1(0.9)79.5(0.5)78.5(0.5)67.1(0.8)75.4(0.5)75.2(1.2)80.5(0.4)
R/C63.9(1.2)67.8(1.6)66.2(0.7)64.3(0.8)67.0(1.2)69.3(0.5)61.2(1.8)72.8(0.6)
R/A72.6(0.9)59.0(1.2)74.1(0.5)70.5(0.5)74.6(0.9)75.1(0.8)69.1(0.9)75.8(0.6)
A/R75.1(0.7)73.5(0.9)70.4(0.6)75.8(0.5)66.5(0.8)77.3(0.8)72.1(1.4)78.3(0.5)
A/P67.4(1.5)64.0(0.9)62.7(0.6)68.9(0.6)56.5(0.5)71.8(0.4)64.9(1.3)70.9(0.8)
P/A69.1(1.0)64.1(1.2)62.8(0.8)69.7(0.9)74.9(1.2)76.1(0.2)64.1(0.8)78.7(0.4)
C/P69.1(0.9)65.6(1.0)65.1(1.1)70.0(0.4)65.5(1.2)71.2(0.5)69.1(1.4)72.8(0.7)
P/C64.6(0.9)64.3(1.1)65.2(1.5)68.5(0.8)66.8(0.6)68.0(0.5)67.7(0.9)69.9(0.9)
P/R80.0(0.5)73.3(0.7)78.1(0.4)78.1(0.2)89.5(0.4)82.1(0.6)76.2(1.2)82.9(0.4)
R/P77.9(0.1)68.1(1.2)74.7(0.3)74.0(0.7)78.2(1.3)77.0(1.2)74.1(1.4)82.1(0.5)
Avg.69.765.967.970.567.472.367.874.7
", + "image_path": "3364bb9cb1c8607ff476c7cc33072927c71d28afc10fb4d760e55149e101017a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 249, + 555, + 273 + ], + "lines": [ + { + "bbox": [ + 55, + 249, + 555, + 273 + ], + "spans": [ + { + "bbox": [ + 55, + 249, + 555, + 273 + ], + "type": "text", + "content": "Table 1. Mean classification accuracy (standard deviation is within parentheses) on the Office-Home dataset using the ResNet-50 backbone. The first domain in each row indicates the labeled domain while the second domain indicates the unlabeled domain." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 54, + 293, + 297, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 293, + 297, + 555 + ], + "spans": [ + { + "bbox": [ + 54, + 293, + 297, + 555 + ], + "type": "text", + "content": "Office-Home, VisDA, and ISIC-2019. In all four datasets, we split the samples of each domain into 90/10 train/test data. Office-31 [28] is comprised of a collection of 4,652 images spanning 31 different categories. The images are sourced from 3 distinct domains: Amazon (A), DSLR (D), and Webcam (W) with different image resolutions, quality, and lighting conditions. Office-Home [36] is a large collection of over 15,500 images spanning 65 categories. The images are sourced from 4 diverse domains: Artistic images (A), Clip Art (C), Product images (P), and Real-World images (R). VisDA-2017 [26] is a large-scale dataset tailored specifically for the visual domain adaptation task. This dataset includes images of 12 distinct categories from two domains, Synthetic (S) and Real (R). With the significant domain shift between the synthetic and real images, VisDA highlights the difficulties associated with bridging significant domain gaps. ISIC-2019 is a comprehensive repository of skin cancer research images sourced from 4 different sources: BCN-20000 (BCN) [8], Skin Cancer MNIST (HAM) [34], MSK4 [7], and an undefined source. We only utilize BCN and HAM sources as they include samples from all eight distinct classes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 570, + 297, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 297, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 297, + 714 + ], + "type": "text", + "content": "Implementation Details For all baselines we compared our Uni-HSSL against, we strictly followed the implementation details and hyper-parameters specified in the corresponding original papers. In order to ensure consistent comparisons with a multitude of earlier studies across various benchmark datasets, we employed two common backbone networks: ResNet-50 and ResNet-101 which are pre-trained on the ImageNet [10] dataset. We utilized ResNet-101 for VisDA dataset experiments and ResNet-50 for all the other benchmark datasets. The supervised pre-training stage is made up of 10 epochs while the semi-supervised training stage is made up of 100 epochs. In both stages, we em" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "spans": [ + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "text", + "content": "ployed an SGD optimizer with a learning rate of " + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "inline_equation", + "content": "5e^{-4}" + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "text", + "content": " and Nesterov momentum [24] of 0.9. In the semi-supervised training stage, the learning rate is adjusted using a cosine annealing strategy [23, 37]. We set the L2 regularization coefficient to " + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "inline_equation", + "content": "1e^{-3}" + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "text", + "content": " and the batch size to 32 for all datasets. The trade-off hyper-parameters " + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "inline_equation", + "content": "\\lambda_{pl},\\lambda_{pa},\\lambda_{\\mathrm{Mixup}}" + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "text", + "content": " take the values 1, " + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "inline_equation", + "content": "1e^{-2}" + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "text", + "content": " and 1 respectively, while " + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "text", + "content": " take the value 0.5 and " + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 313, + 293, + 556, + 437 + ], + "type": "text", + "content": " is set to 0.8. Furthermore, similar to [1], we apply random translations and horizontal flips to the input images prior to applying the Progressive Inter-Domain Mixup. We report the mean classification accuracy and the corresponding standard deviation over 3 runs in each experiment." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 445, + 432, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 445, + 432, + 456 + ], + "spans": [ + { + "bbox": [ + 313, + 445, + 432, + 456 + ], + "type": "text", + "content": "4.2. Comparison Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 462, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 462, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 462, + 556, + 715 + ], + "type": "text", + "content": "We evaluate the proposed Uni-HSSL framework on the heterogeneous semi-supervised learning tasks and compare it to four categories of baselines: Supervised Learning baselines, Semi-Supervised Learning (SSL) baselines, Unsupervised Domain Adaptation (UDA) baselines, and Bidirectional Adaptation baselines. The supervised baseline is exclusively trained on the labeled data and does not leverage the unlabeled data during training. We employ a set of representative SSL baselines (FlexMatch [39], FixMatch [31], and SimMatch [41]) and a set of representative UDA baselines (CDAN [22] and MCC [15]). In particular, we also compare our work with the state-of-the-art bidirectional adaptation method (BiAdapt) [14]. As the traditional UDA methods are trained to perform well solely on an unlabeled target domain, to ensure a fair comparison, we equip the UDA methods with a Supervised classifier (Sup) trained on the labeled set and a domain classifier and refer to them as " + }, + { + "bbox": [ + 313, + 462, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{MCC} + \\mathrm{Sup}" + }, + { + "bbox": [ + 313, + 462, + 556, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 462, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{CDAN} + \\mathrm{Sup}" + }, + { + "bbox": [ + 313, + 462, + 556, + 715 + ], + "type": "text", + "content": ". At inference time, the domain classifier assigns each test sample to the appropriate classifier in the corresponding domain—either the supervised classifier for samples predicted to originate from the labeled domain or" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "15376" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 69, + 552, + 240 + ], + "blocks": [ + { + "bbox": [ + 59, + 69, + 552, + 240 + ], + "lines": [ + { + "bbox": [ + 59, + 69, + 552, + 240 + ], + "spans": [ + { + "bbox": [ + 59, + 69, + 552, + 240 + ], + "type": "table", + "html": "
SupervisedFlexMatchFixMatchSimMatchCDAN+SupMCC+SupBiAdaptUni-HSSL
Plane93.8(0.2)98.3(0.7)94.9(0.5)93.6(0.8)98.4(0.3)98.6(0.3)90.1(1.4)98.2(0.5)
Bicycle74.1(0.5)74.8(0.9)53.5(0.2)81.1(0.8)94.4(0.7)96.6(0.5)79.1(1.2)97.5(0.9)
Bus79.4(0.7)53.9(1.2)79.5(0.8)56.9(1.2)90.1(0.5)88.6(0.7)54.7(1.3)91.4(0.8)
Car86.2(0.9)36.4(2.1)88.5(0.3)59.6(1.5)85.1(0.5)84.8(0.9)56.1(1.2)89.0(0.9)
Horse90.9(0.2)97.4(0.5)76.0(0.8)65.6(1.0)96.6(0.1)97.6(0.3)62.1(1.4)98.2(0.3)
Knife87.5(0.7)77.2(0.8)78.8(0.9)71.9(0.5)95.0(1.4)95.1(0.9)68.2(0.1)98.9(0.4)
Motor.94.5(0.4)66.6(1.2)40.8(1.2)70.8(0.9)96.6(0.5)94.2(0.2)68.1(1.5)97.0(0.6)
Person80.0(0.7)80.5(0.8)58.9(1.6)64.1(0.8)94.3(0.6)94.6(0.2)62.5(1.2)95.6(0.7)
Plant91.1(0.7)91.8(0.8)62.7(0.7)65.5(0.9)96.5(0.4)97.3(0.5)63.5(1.9)95.7(0.2)
Skateboard81.8(0.9)90.0(0.5)68.9(1.2)57.0(1.7)85.5(0.5)83.0(0.8)59.3(1.7)91.5(0.8)
Train96.0(0.3)96.8(0.7)94.2(0.4)74.2(0.9)95.7(0.7)95.6(0.1)71.3(1.5)97.0(0.3)
Truck59.8(0.9)49.2(1.2)49.5(1.2)52.1(1.7)79.8(0.2)80.6(1.0)50.1(1.5)82.4(0.7)
Avg.84.182.487.380.892.192.079.193.1
", + "image_path": "7714dd751dca36bb537019b0ac976995b6c380b6319cc16832ce47941ea1e9d0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 79, + 290, + 272, + 403 + ], + "blocks": [ + { + "bbox": [ + 55, + 249, + 555, + 273 + ], + "lines": [ + { + "bbox": [ + 55, + 249, + 555, + 273 + ], + "spans": [ + { + "bbox": [ + 55, + 249, + 555, + 273 + ], + "type": "text", + "content": "Table 2. Mean classification accuracy (standard deviation is within parentheses) on the VisDA dataset using the ResNet-101 backbone. The rows correspond to the different classes of the dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 290, + 272, + 403 + ], + "lines": [ + { + "bbox": [ + 79, + 290, + 272, + 403 + ], + "spans": [ + { + "bbox": [ + 79, + 290, + 272, + 403 + ], + "type": "table", + "html": "
B/HH/BAvg.
Supervised70.5(0.9)65.4(1.2)67.9
FlexMatch71.3(1.4)68.7(0.8)70.0
FixMatch77.5(0.8)65.0(0.7)71.3
SimMatch75.1(1.5)69.2(1.7)72.2
CDAN+Sup72.9(1.0)65.2(0.4)69.1
MCC+Sup60.2(1.8)56.7(1.7)58.7
BiAdapt74.2(0.7)68.3(1.3)71.2
Uni-HSSL79.9(0.7)71.0(0.9)75.4
", + "image_path": "4181a9504a07b44ca03c9657c5cc6c0587c93c034995be45d31860c37b8522bd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 411, + 295, + 456 + ], + "lines": [ + { + "bbox": [ + 55, + 411, + 295, + 456 + ], + "spans": [ + { + "bbox": [ + 55, + 411, + 295, + 456 + ], + "type": "text", + "content": "Table 3. Mean classification accuracy (standard deviation is within parentheses) on ISIC-2019 using the ResNet-50 backbone. The first domain in each row indicates the labeled domain the second domain indicates the unlabeled domain." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 472, + 295, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 472, + 295, + 495 + ], + "spans": [ + { + "bbox": [ + 55, + 472, + 295, + 495 + ], + "type": "text", + "content": "the UDA classifier for those predicted to come from the unlabeled domain." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "text", + "content": "The comparison results on the Office-Home, VisDA, ISIC-2019 and Office-31 datasets are reported in Tables 1, 2, 3 and 4, respectively, where the first domain indicates the labeled domain and the second domain indicates the unlabeled domain. In the case of the VisDA dataset, the labeled dataset is sampled from the synthetic domain (S) and the unlabeled dataset is sampled from the real domain (R) and we report the average classification accuracy for each class and the overall average classification accuracy. The tables show that Uni-HSSL consistently outperforms all baselines on all datasets across all setups. The performance gains over the supervised baseline are notable exceeding " + }, + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "inline_equation", + "content": "7\\%" + }, + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "text", + "content": " on average in the cases of the Office-31, Office-Home, VisDA, and ISIC-2019 datasets, respectively. In the case of the VisDA dataset, the performance improvement over the supervised baseline at the class level is substantial, exceeding " + }, + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "inline_equation", + "content": "22\\%" + }, + { + "bbox": [ + 55, + 498, + 297, + 714 + ], + "type": "text", + "content": " for some classes. Furthermore, Uni-HSSL consistently outperforms all the SSL baselines, achieving" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "spans": [ + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": "performance gains exceeding " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "3\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "3\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": " over the most effective SSL baselines on the Office-31, Office-Home, VisDA, and ISIC-2019 datasets, respectively. In some cases, such as A/W on Office-31 and P/A on Office-Home, the performance improvement over SSL baselines is notable, surpassing " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "8\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": ", respectively, highlighting the limitations of traditional SSL baselines in the proposed HSSL task. In the case of the UDA baselines, Uni-HSSL yields superior performance with all domain setups on all four datasets with performance gains around " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": " on Office-31, Office-Home, VisDA and ISIC-2019 datasets, respectively. Uni-HSSL outperforms the UDA baselines on almost all classes of the VisDA dataset, with the UDA baselines slightly excelling in only two classes. However, Uni-HSSL still maintains superior overall performance compared to the UDA baselines. Furthermore, the MCC+Sup baseline does not perform well on the ISIC-2019 dataset, where it suffers a major drop in performance which can be attributed to the MCC baseline's sensitivity to the class imbalance inherent in this dataset. Moreover, our Uni-HSSL also substantially outperforms BiAdapt, with performance gains surpassing " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "14\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 313, + 293, + 556, + 604 + ], + "type": "text", + "content": " on the Office-31, Office-Home, VisDA and ISIC-2019 datasets, respectively. These results underscore the robustness of Uni-HSSL and highlight the limitations of BiAdapt in effectively addressing the challenges posed by the proposed HSSL task." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 612, + 408, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 612, + 408, + 624 + ], + "spans": [ + { + "bbox": [ + 313, + 612, + 408, + 624 + ], + "type": "text", + "content": "4.3. Ablation Study" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 629, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 556, + 715 + ], + "type": "text", + "content": "In order to investigate the contribution of each component of the proposed framework, we conducted an ablation study to compare the proposed Uni-HSSL with its six variants: (1) “-w/o WMA”, which drops the Weighted Moving Average component of the pseudo-label update and simply uses the model predictions to generate pseudo-labels; (2) “-w/o " + }, + { + "bbox": [ + 313, + 629, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{cl}}^{L}" + }, + { + "bbox": [ + 313, + 629, + 556, + 715 + ], + "type": "text", + "content": "”, which drops the cross-entropy classification loss on the la" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15377" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 112, + 70, + 497, + 182 + ], + "blocks": [ + { + "bbox": [ + 112, + 70, + 497, + 182 + ], + "lines": [ + { + "bbox": [ + 112, + 70, + 497, + 182 + ], + "spans": [ + { + "bbox": [ + 112, + 70, + 497, + 182 + ], + "type": "table", + "html": "
W/AA/WA/DD/AD/WW/DAvg.
Supervised68.6(1.6)82.8(1.2)85.1(1.8)35.5(0.9)96.9(0.4)98.2(0.5)77.8
FlexMatch68.1(1.8)81.3(1.3)85.1(1.8)63.0(2.1)98.5(0.2)98.9(0.2)82.4
FixMatch69.1(1.3)83.4(0.9)86.4(0.8)53.7(1.3)98.1(0.2)98.2(0.2)81.5
SimMatch71.1(0.9)84.1(1.0)86.5(0.5)68.6(1.1)96.8(0.5)98.8(0.4)84.3
CDAN+Sup61.2(1.2)82.5(1.3)87.4(2.2)58.3(2.6)79.2(0.4)97.5(0.4)77.7
MCC+Sup71.5(2.7)88.8(0.7)89.1(0.5)67.6(1.3)81.7(0.7)99.5(0.4)83.0
BiAdopt70.2(0.9)85.0(0.5)77.4(0.7)67.1(1.0)94.2(0.5)98.5(0.3)82.0
Uni-HSSL73.1(1.0)90.2(0.8)90.0(0.2)72.1(0.7)100(0.0)100(0.0)87.5
", + "image_path": "3139c34cc36c435b4dbaa76d7004cef3322abdcfff60864d9bba26612b566892.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 97, + 224, + 514, + 324 + ], + "blocks": [ + { + "bbox": [ + 55, + 191, + 553, + 213 + ], + "lines": [ + { + "bbox": [ + 55, + 191, + 553, + 213 + ], + "spans": [ + { + "bbox": [ + 55, + 191, + 553, + 213 + ], + "type": "text", + "content": "Table 4. Mean classification accuracy (standard deviation is within parentheses) on the Office-31 dataset using the ResNet-50 backbone. The first domain in each column indicates the labeled domain the second domain indicates the unlabeled domain." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 97, + 224, + 514, + 324 + ], + "lines": [ + { + "bbox": [ + 97, + 224, + 514, + 324 + ], + "spans": [ + { + "bbox": [ + 97, + 224, + 514, + 324 + ], + "type": "table", + "html": "
W/AA/WA/DD/AD/WW/DAvg.
Uni-HSSL73.1(1.0)90.2(0.8)90.0(0.2)72.1(0.7)100(0.0)100(0.0)87.5
-w/o WMA72.8(0.5)87.1(0.8)88.3(0.9)71.0(0.8)100(0.0)100(0.0)86.8
-w/o \\( \\mathcal{L}_{cl}^{L} \\)67.6(1.7)85.5(0.8)86.1(1.2)64.8(2.0)93.2(0.5)92.9(0.6)81.7
-w/o \\( \\mathcal{L}_{pl}^{U} \\)72.5(0.8)87.9(0.9)88.1(0.7)71.0(0.9)98.0(0.2)98.5(0.2)86.1
-w/o \\( \\mathcal{L}_{pa} \\)72.7(0.5)88.9(0.7)87.2(0.6)71.3(0.9)99.1(0.0)100(0.0)86.5
-w/o \\( \\mathcal{L}_{Mixup} \\)71.9(1.2)86.7(0.9)88.1(0.8)71.3(1.1)98.0(0.4)99.9(0.0)86.1
-w/o Prog. Mixup71.3(0.9)84.8(0.9)88.1(1.0)70.0(1.3)99.2(0.5)99.9(0.0)85.6
", + "image_path": "fed544c827304609b0fb2b46f32701da9459e32623d38cec8da7914ee7895e1c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 333, + 555, + 365 + ], + "lines": [ + { + "bbox": [ + 55, + 333, + 555, + 365 + ], + "spans": [ + { + "bbox": [ + 55, + 333, + 555, + 365 + ], + "type": "text", + "content": "Table 5. Ablation study results in terms of mean classification accuracy (standard deviation is within parentheses) on the Office-31 dataset using the ResNet-50 backbone. The first domain in each column indicates the labeled domain while the second domain indicates the unlabeled domain." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "spans": [ + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "text", + "content": "beled set " + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_L" + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "text", + "content": "; (3) “" + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "inline_equation", + "content": "-\\mathrm{w/o}\\mathcal{L}_{\\mathrm{pl}}^U" + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "text", + "content": ", which drops the cross-entropy pseudo-label classification loss on the unlabeled set " + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_U" + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "text", + "content": "; (4) “" + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "inline_equation", + "content": "-\\mathrm{w/o}\\mathcal{L}_{\\mathrm{pa}}" + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "text", + "content": ", which drops the Cross-Domain Prototype Alignment component; (5) “" + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "inline_equation", + "content": "-\\mathrm{w/o}\\mathcal{L}_{\\mathrm{Mixup}}" + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "text", + "content": ", which drops the Progressive Inter-Domain Mixup component; and (6) “" + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "inline_equation", + "content": "-\\mathrm{w/o}" + }, + { + "bbox": [ + 54, + 375, + 295, + 495 + ], + "type": "text", + "content": " Prog. Mixup”, which drops the progressive component of the Inter-Domain Mixup and uses a simple mixup for inter-domain data augmentation. We compare the proposed UniHSSL with all the six variants on the Office-31 dataset and report the results in Table 5." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "text", + "content": "From the table, we can see that dropping any component from the proposed unified framework results in performance degradation in all cases. “-w/o " + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{cl}}^{L}" + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "text", + "content": "” variant suffered the largest performance degradation, which highlights the importance of the ground-truth labels of " + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_L" + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "text", + "content": " in guiding the learning process of the framework. Dropping the WMA from the pseudo-label generation component led to a slight average performance drop to " + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "inline_equation", + "content": "86.8\\%" + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "text", + "content": ", underscoring its role in obtaining stable and confident pseudo-labels. Similarly, dropping the classification loss on the unlabeled data " + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{pl}^{U}" + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "text", + "content": " led to a performance degradation to " + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "inline_equation", + "content": "86.1\\%" + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "text", + "content": ". Furthermore, the variant “-w/o Prog. Mixup” suffers a larger drop in performance in comparison with the variant “-w/o " + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{Mixup}}" + }, + { + "bbox": [ + 55, + 498, + 295, + 713 + ], + "type": "text", + "content": "”, which highlights the importance of progressively generating the augmented samples to ensure the accuracy of their corresponding augmented labels. Generating inter-domain augmented samples without taking into account the domain gap between the labeled domain and unlabeled domain can" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 376, + 555, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 376, + 555, + 437 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 555, + 437 + ], + "type": "text", + "content": "lead to a degradation in performance due to the noisy augmented labels of the generated samples. Overall, the consistent performance drops across all the tasks of Office-31 for each variant validate the essential contribution of each corresponding component of the Uni-HSSL framework." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 452, + 388, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 452, + 388, + 464 + ], + "spans": [ + { + "bbox": [ + 313, + 452, + 388, + 464 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 474, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 474, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 474, + 555, + 713 + ], + "type": "text", + "content": "In this paper, we introduced a challenging heterogeneous semi-supervised learning problem, where the labeled and unlabeled training data come from different domains and possess different label and class feature distributions. To address this demanding setup, we proposed a Unified Framework for Heterogeneous Semi-Supervised Learning (Uni-HSSL), which trains a fine-grained classification model over the concatenated label space by effectively exploiting the labeled and unlabeled data as well as their relationships. Uni-HSSL adopts a WMA pseudo-labeling strategy to obtain stable and confident pseudo-labels for the unlabeled data, while deploying a cross-domain class prototype alignment component to support knowledge transfer and sharing between domains. A novel progressive inter-domain mixup component is further devised to augment the training data and bridge the significant gap between the labeled and unlabeled domains. The experimental results demonstrate the effectiveness and superiority of the proposed Uni-HSSL over state-of-the-art semi-supervised learning methods and unsupervised domain adaptation baselines." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "15378" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 91, + 297, + 714 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 61, + 91, + 297, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 91, + 297, + 145 + ], + "spans": [ + { + "bbox": [ + 61, + 91, + 297, + 145 + ], + "type": "text", + "content": "[1] David Berthelot, Nicholas Carlini, Ian Goodfellow, Nicolas Papernot, Avital Oliver, and Colin A Raffel. Mixmatch: A holistic approach to semi-supervised learning. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 5, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 148, + 296, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 148, + 296, + 201 + ], + "spans": [ + { + "bbox": [ + 61, + 148, + 296, + 201 + ], + "type": "text", + "content": "[2] David Berthelot, Rebecca Roelofs, Kihyuk Sohn, Nicholas Carlini, and Alexey Kurakin. Adamatch: A unified approach to semi-supervised learning and domain adaptation. In International Conference on Learning Representations (ICLR), 2021. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 204, + 295, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 204, + 295, + 237 + ], + "spans": [ + { + "bbox": [ + 62, + 204, + 295, + 237 + ], + "type": "text", + "content": "[3] Kaidi Cao, Maria Brbic, and Jure Leskovec. Open-world semi-supervised learning. In International Conference on Learning Representations (ICLR), 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 239, + 296, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 239, + 296, + 282 + ], + "spans": [ + { + "bbox": [ + 62, + 239, + 296, + 282 + ], + "type": "text", + "content": "[4] Chao Chen, Zhihang Fu, Zhihong Chen, Sheng Jin, Zhaowei Cheng, Xinyu Jin, and Xian-Sheng Hua. Homm: Higher-order moment matching for unsupervised domain adaptation. In AAAI Conference on Artificial Intelligence, 2020. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 285, + 296, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 285, + 296, + 328 + ], + "spans": [ + { + "bbox": [ + 62, + 285, + 296, + 328 + ], + "type": "text", + "content": "[5] Xinyang Chen, Sinan Wang, Mingsheng Long, and Jianmin Wang. Transferability vs. discriminability: Batch spectral penalization for adversarial domain adaptation. In International Conference on Machine Learning (ICML), 2019. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 330, + 296, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 330, + 296, + 372 + ], + "spans": [ + { + "bbox": [ + 62, + 330, + 296, + 372 + ], + "type": "text", + "content": "[6] Yanbei Chen, Xiatian Zhu, Wei Li, and Shaogang Gong. Semi-supervised learning under class distribution mismatch. In Proceedings of the AAAI Conference on Artificial Intelligence, 2020. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 375, + 296, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 375, + 296, + 462 + ], + "spans": [ + { + "bbox": [ + 62, + 375, + 296, + 462 + ], + "type": "text", + "content": "[7] Noel CF Codella, David Gutman, M Emre Celebi, Brian Helba, Michael A Marchetti, Stephen W Dusza, Aadi Kalloo, Konstantinos Liopyris, Nabin Mishra, Harald Kittler, et al. Skin lesion analysis toward melanoma detection: A challenge at the 2017 international symposium on biomedical imaging (isbi), hosted by the international skin imaging collaboration (isic). In International Symposium on Biomedical Imaging (ISBI), 2018. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 464, + 296, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 464, + 296, + 518 + ], + "spans": [ + { + "bbox": [ + 62, + 464, + 296, + 518 + ], + "type": "text", + "content": "[8] Marc Combalia, Noel CF Codella, Veronica Rotemberg, Brian Helba, Veronica Vilaplana, Ofer Reiter, Cristina Carrera, Alicia Barreiro, Allan C Halpern, Susana Puig, et al. Bcn20000: Dermoscopic lesions in the wild. arXiv preprint arXiv:1908.02288, 2019. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 520, + 296, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 520, + 296, + 575 + ], + "spans": [ + { + "bbox": [ + 62, + 520, + 296, + 575 + ], + "type": "text", + "content": "[9] Shuhao Cui, Shuhui Wang, Junbao Zhuo, Liang Li, Qingming Huang, and Qi Tian. Towards discriminability and diversity: Batch nuclear-norm maximization under label insufficient situations. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 577, + 296, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 577, + 296, + 620 + ], + "spans": [ + { + "bbox": [ + 57, + 577, + 296, + 620 + ], + "type": "text", + "content": "[10] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Conference on Computer Vision and Pattern Recognition (CVPR), 2009. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 623, + 296, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 623, + 296, + 655 + ], + "spans": [ + { + "bbox": [ + 57, + 623, + 296, + 655 + ], + "type": "text", + "content": "[11] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In International Conference on Machine Learning (ICML), 2015. 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 657, + 296, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 657, + 296, + 700 + ], + "spans": [ + { + "bbox": [ + 57, + 657, + 296, + 700 + ], + "type": "text", + "content": "[12] Lan-Zhe Guo, Zhen-Yu Zhang, Yuan Jiang, Yu-Feng Li, and Zhi-Hua Zhou. Safe deep semi-supervised learning for unseen-class unlabeled data. In International Conference on Machine Learning (ICML). PMLR, 2020. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 702, + 296, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 702, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 57, + 702, + 296, + 714 + ], + "type": "text", + "content": "[13] Zhuo Huang, Chao Xue, Bo Han, Jian Yang, and Chen Gong." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 712 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 333, + 73, + 555, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 555, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 555, + 95 + ], + "type": "text", + "content": "Universal semi-supervised learning. Advances in Neural Information Processing Systems (NeurIPS), 2021. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 96, + 555, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 96, + 555, + 149 + ], + "spans": [ + { + "bbox": [ + 316, + 96, + 555, + 149 + ], + "type": "text", + "content": "[14] Lin-Han Jia, Lan-Zhe Guo, Zhi Zhou, Jie-Jing Shao, Yuke Xiang, and Yu-Feng Li. Bidirectional adaptation for robust semi-supervised learning with inconsistent data distributions. In International Conference on Machine Learning (ICML), 2023. 2, 3, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 151, + 555, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 151, + 555, + 184 + ], + "spans": [ + { + "bbox": [ + 317, + 151, + 555, + 184 + ], + "type": "text", + "content": "[15] Ying Jin, Ximei Wang, Mingsheng Long, and Jianmin Wang. Minimum class confusion for versatile domain adaptation. In European Conference on Computer Vision (ECCV), 2020. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 185, + 555, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 185, + 555, + 217 + ], + "spans": [ + { + "bbox": [ + 317, + 185, + 555, + 217 + ], + "type": "text", + "content": "[16] Samuli Laine and Timo Aila. Temporal ensembling for semi-supervised learning. In International Conference on Learning Representations (ICLR), 2017. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 217, + 555, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 217, + 555, + 250 + ], + "spans": [ + { + "bbox": [ + 316, + 217, + 555, + 250 + ], + "type": "text", + "content": "[17] Qicheng Lao, Xiang Jiang, and Mohammad Havaei. Hypothesis disparity regularized mutual information maximization. In AAAI Conference on Artificial Intelligence, 2021. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 251, + 554, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 251, + 554, + 272 + ], + "spans": [ + { + "bbox": [ + 316, + 251, + 554, + 272 + ], + "type": "text", + "content": "[18] Yann LeCun, Yoshua Bengio, and Geoffrey Hinton. Deep learning. In Nature, 2015. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 273, + 555, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 273, + 555, + 315 + ], + "spans": [ + { + "bbox": [ + 317, + 273, + 555, + 315 + ], + "type": "text", + "content": "[19] Dong-Hyun Lee et al. Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks. In Workshop on challenges in representation learning, ICML, 2013. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 316, + 555, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 316, + 555, + 349 + ], + "spans": [ + { + "bbox": [ + 316, + 316, + 555, + 349 + ], + "type": "text", + "content": "[20] Hong Liu, Jianmin Wang, and Mingsheng Long. Cycle self-training for domain adaptation. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 350, + 555, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 350, + 555, + 392 + ], + "spans": [ + { + "bbox": [ + 316, + 350, + 555, + 392 + ], + "type": "text", + "content": "[21] Mingsheng Long, Yue Cao, Jianmin Wang, and Michael Jordan. Learning transferable features with deep adaptation networks. In International Conference on Machine Learning (ICML), 2015. 2, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 394, + 555, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 394, + 555, + 437 + ], + "spans": [ + { + "bbox": [ + 316, + 394, + 555, + 437 + ], + "type": "text", + "content": "[22] Mingsheng Long, Zhangjie Cao, Jianmin Wang, and Michael I Jordan. Conditional adversarial domain adaptation. In Advances in Neural Information Processing Systems (NeurIPS), 2018. 3, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 438, + 554, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 438, + 554, + 470 + ], + "spans": [ + { + "bbox": [ + 316, + 438, + 554, + 470 + ], + "type": "text", + "content": "[23] Ilya Loshchilov and Frank Hutter. SGDR: Stochastic gradient descent with warm restarts. In International Conference on Learning Representations (ICLR), 2017. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 472, + 555, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 472, + 555, + 503 + ], + "spans": [ + { + "bbox": [ + 316, + 472, + 555, + 503 + ], + "type": "text", + "content": "[24] Yurii Nesterov. A method for unconstrained convex minimization problem with the rate of convergence o (1/k2). In Dokl. Akad. Nauk. SSSR, 1983. 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 505, + 554, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 505, + 554, + 548 + ], + "spans": [ + { + "bbox": [ + 317, + 505, + 554, + 548 + ], + "type": "text", + "content": "[25] Avital Oliver, Augustus Odena, Colin A Raffel, Ekin Dogus Cubuk, and Ian Goodfellow. Realistic evaluation of deep semi-supervised learning algorithms. In Advances in Neural Information Processing Systems (NeurIPS), 2018. 1, 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 548, + 555, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 548, + 555, + 591 + ], + "spans": [ + { + "bbox": [ + 317, + 548, + 555, + 591 + ], + "type": "text", + "content": "[26] Xingchao Peng, Ben Usman, Neela Kaushik, Judy Hoffman, Dequan Wang, and Kate Saenko. Visda: The visual domain adaptation challenge. arXiv preprint arXiv:1710.06924, 2017. 6" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 593, + 555, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 593, + 555, + 635 + ], + "spans": [ + { + "bbox": [ + 317, + 593, + 555, + 635 + ], + "type": "text", + "content": "[27] Hoang Phan, Trung Le, Trung Phung, Anh Tuan Bui, Nhat Ho, and Dinh Phung. Global-local regularization via distributional robustness. In International Conference on Artificial Intelligence and Statistics (AISTATS), 2023. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 637, + 555, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 637, + 555, + 669 + ], + "spans": [ + { + "bbox": [ + 317, + 637, + 555, + 669 + ], + "type": "text", + "content": "[28] Kate Saenko, Brian Kulis, Mario Fritz, and Trevor Darrell. Adapting visual category models to new domains. In European Conference on Computer Vision (ECCV), 2010. 6" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 670, + 555, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 555, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 555, + 712 + ], + "type": "text", + "content": "[29] Jian Shen, Yanru Qu, Weinan Zhang, and Yong Yu. Wasserstein distance guided representation learning for domain adaptation. In AAAI Conference on Artificial Intelligence, 2018. 3" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "15379" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 297, + 654 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 73, + 297, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 297, + 115 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 297, + 115 + ], + "type": "text", + "content": "[30] Rui Shu, Hung H Bui, Hirokazu Narui, and Stefano Ermon. A dirt-t approach to unsupervised domain adaptation. International Conference on Learning Representations (ICLR), 2018. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 295, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 295, + 183 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 295, + 183 + ], + "type": "text", + "content": "[31] Kihyuk Sohn, David Berthelot, Nicholas Carlini, Zizhao Zhang, Han Zhang, Colin A Raffel, Ekin Dogus Cubuk, Alexey Kurakin, and Chun-Liang Li. Fixmatch: Simplifying semi-supervised learning with consistency and confidence. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 185, + 295, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 185, + 295, + 228 + ], + "spans": [ + { + "bbox": [ + 56, + 185, + 295, + 228 + ], + "type": "text", + "content": "[32] Chen Sun, Abhinav Shrivastava, Saurabh Singh, and Abhinav Gupta. Revisiting unreasonable effectiveness of data in deep learning era. In International Conference on Computer Vision (ICCV), 2017. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 230, + 295, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 230, + 295, + 274 + ], + "spans": [ + { + "bbox": [ + 56, + 230, + 295, + 274 + ], + "type": "text", + "content": "[33] Antti Tarvainen and Harri Valpola. Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. In Advances in Neural Information Processing Systems (NeurIPS), 2017. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 275, + 295, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 275, + 295, + 317 + ], + "spans": [ + { + "bbox": [ + 56, + 275, + 295, + 317 + ], + "type": "text", + "content": "[34] Philipp Tschandl, Cliff Rosendahl, and Harald Kittler. The ham10000 dataset, a large collection of multi-source dermatoscopic images of common pigmented skin lesions. Scientific data, 2018. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 319, + 295, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 319, + 295, + 342 + ], + "spans": [ + { + "bbox": [ + 56, + 319, + 295, + 342 + ], + "type": "text", + "content": "[35] Jesper E Van Engelen and Holger H Hoos. A survey on semi-supervised learning. Machine learning, 2020. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 343, + 295, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 343, + 295, + 385 + ], + "spans": [ + { + "bbox": [ + 56, + 343, + 295, + 385 + ], + "type": "text", + "content": "[36] Hemanth Venkateswara, Jose Eusebio, Shayok Chakraborty, and Sethuraman Panchanathan. Deep hashing network for unsupervised domain adaptation. In Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 387, + 295, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 387, + 295, + 430 + ], + "spans": [ + { + "bbox": [ + 56, + 387, + 295, + 430 + ], + "type": "text", + "content": "[37] Vikas Verma, Kenji Kawaguchi, Alex Lamb, Juho Kannala, Arno Solin, Yoshua Bengio, and David Lopez-Paz. Interpolation consistency training for semi-supervised learning. In Neural Networks, 2022. 2, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 432, + 295, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 432, + 295, + 475 + ], + "spans": [ + { + "bbox": [ + 56, + 432, + 295, + 475 + ], + "type": "text", + "content": "[38] Qing Yu, Daiki Ikami, Go Irie, and Kiyoharu Aizawa. Multi-task curriculum framework for open-set semi-supervised learning. In European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 477, + 295, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 477, + 295, + 531 + ], + "spans": [ + { + "bbox": [ + 56, + 477, + 295, + 531 + ], + "type": "text", + "content": "[39] Bowen Zhang, Yidong Wang, Wenxin Hou, Hao Wu, Jindong Wang, Manabu Okumura, and Takahiro Shinozaki. Flexmatch: Boosting semi-supervised learning with curriculum pseudo labeling. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 533, + 295, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 533, + 295, + 576 + ], + "spans": [ + { + "bbox": [ + 56, + 533, + 295, + 576 + ], + "type": "text", + "content": "[40] Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. Mixup: Beyond empirical risk minimization. In International Conference on Learning Representations (ICLR), 2018. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 578, + 295, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 578, + 295, + 620 + ], + "spans": [ + { + "bbox": [ + 56, + 578, + 295, + 620 + ], + "type": "text", + "content": "[41] Mingkai Zheng, Shan You, Lang Huang, Fei Wang, Chen Qian, and Chang Xu. Simmatch: Semi-supervised learning with similarity matching. In Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 623, + 295, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 623, + 295, + 654 + ], + "spans": [ + { + "bbox": [ + 56, + 623, + 295, + 654 + ], + "type": "text", + "content": "[42] Yang Zou, Zhiding Yu, Xiaofeng Liu, BVK Kumar, and Jinsong Wang. Confidence regularized self-training. In International Conference on Computer Vision (ICCV), 2019. 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "15380" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file