diff --git "a/2023/Train-Once-for-All Personalization/layout.json" "b/2023/Train-Once-for-All Personalization/layout.json" new file mode 100644--- /dev/null +++ "b/2023/Train-Once-for-All Personalization/layout.json" @@ -0,0 +1,8869 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 188, + 103, + 405, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 103, + 405, + 119 + ], + "spans": [ + { + "bbox": [ + 188, + 103, + 405, + 119 + ], + "type": "text", + "content": "Train-Once-for-All Personalization" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 142, + 534, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 142, + 534, + 171 + ], + "spans": [ + { + "bbox": [ + 53, + 142, + 534, + 171 + ], + "type": "text", + "content": "Hong-You Chen†* Yandong Li‡ Yin Cui‡ Mingda Zhang‡ Wei-Lun Chao† Li Zhang† The Ohio State University † Google Research" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 173, + 504, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 173, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 92, + 173, + 504, + 186 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 92, + 173, + 504, + 186 + ], + "type": "text", + "content": " {chen.9301, chao.209}@osu.edu, " + }, + { + "bbox": [ + 92, + 173, + 504, + 186 + ], + "type": "inline_equation", + "content": "\\diamond" + }, + { + "bbox": [ + 92, + 173, + 504, + 186 + ], + "type": "text", + "content": " {yandongli, yincui, mingdaz, zhl} @google.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 213, + 191, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 191, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 191, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 239, + 290, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 239, + 290, + 550 + ], + "spans": [ + { + "bbox": [ + 45, + 239, + 290, + 550 + ], + "type": "text", + "content": "We study the problem of how to train a \"personalization-friendly\" model such that given only the task descriptions, the model can be adapted to different end-users' needs, e.g., for accurately classifying different subsets of objects. One baseline approach is to train a \"generic\" model for classifying a wide range of objects, followed by class selection. In our experiments, we however found it suboptimal, perhaps because the model's weights are kept frozen without being personalized. To address this drawback, we propose Train-once-for-All PERsonalization (TAPER), a framework that is trained just once and can later customize a model for different end-users given their task descriptions. TAPER learns a set of \"basis\" models and a mixer predictor, such that given the task description, the weights (not the predictions!) of the basis models can be on the fly combined into a single \"personalized\" model. Via extensive experiments on multiple recognition tasks, we show that TAPER consistently outperforms the baseline methods in achieving a higher personalized accuracy. Moreover, we show that TAPER can synthesize a much smaller model to achieve comparable performance to a huge generic model, making it \"deployment-friendly\" to resource-limited end devices. Interestingly, even without end-users' task descriptions, TAPER can still be specialized to the deployed context based on its past predictions, making it even more \"personalization-friendly\"." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 575, + 127, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 575, + 127, + 586 + ], + "spans": [ + { + "bbox": [ + 47, + 575, + 127, + 586 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 596, + 287, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 596, + 287, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 596, + 287, + 693 + ], + "type": "text", + "content": "Recent years have witnessed multiple breakthroughs in visual recognition [10, 17, 23, 25, 36], thanks to the advance in deep learning and the accessibility to large datasets. Specifically, existing works have shown the possibility to train a gigantic and versatile \"generic\" model capable of classifying a wide range of over tens of thousands of objects [22, 33], rendering the promising future towards general-purposed AI." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 321, + 212, + 533, + 309 + ], + "blocks": [ + { + "bbox": [ + 321, + 212, + 533, + 309 + ], + "lines": [ + { + "bbox": [ + 321, + 212, + 533, + 309 + ], + "spans": [ + { + "bbox": [ + 321, + 212, + 533, + 309 + ], + "type": "image", + "image_path": "4dca061a99cdaa155aa50c01706b968d182561e31354c801c805d2e81f23519c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 311, + 545, + 366 + ], + "lines": [ + { + "bbox": [ + 304, + 311, + 545, + 366 + ], + "spans": [ + { + "bbox": [ + 304, + 311, + 545, + 366 + ], + "type": "text", + "content": "Figure 1. Examples of personalization via task description. We propose a useful formulation: train-once-for-all personalization. Our \"personalization-friendly\" framework TAPER can on the fly reply to each user's request with a personalized model promptly conditioned on the task description only." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 376, + 546, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 376, + 546, + 568 + ], + "spans": [ + { + "bbox": [ + 304, + 376, + 546, + 568 + ], + "type": "text", + "content": "However, from an end-user's perspective, we often do not need such a versatility at once. Instead, users more often look for models that are specialized to their requests, e.g., for accurately classifying a few but frequently encountered or safety-critical objects in their environments. Taking ImageNet-1K [9] as an example, a ResNet-152 classifier [17] can achieve around " + }, + { + "bbox": [ + 304, + 376, + 546, + 568 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 304, + 376, + 546, + 568 + ], + "type": "text", + "content": " accuracy in recognizing each of the 1K objects, which, while exciting to the vision community, may sound terrible to a visually-impaired user who seeks to smoothly interact with a handful of everyday objects. A better solution for end-users is perhaps to construct \"personalized\" models dedicated to their needs, e.g., train a 20-way classifier for everyday objects to attain an accuracy closer to " + }, + { + "bbox": [ + 304, + 376, + 546, + 568 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 304, + 376, + 546, + 568 + ], + "type": "text", + "content": ". Importantly, a personalized model usually requires a smaller capacity/size than a generic one, making it easier to deploy to resource-limited devices." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": "Personalization is by no means a new concept. A naive way to achieve it is to retrain a new model upon request, using the corresponding data. Doing so, however, is hardly scalable from a service provider's point of view: the computation for training simply grows linearly with the number of users and their requests. The training latency can also degrade the user experience. Suppose the service provider has sufficient data and is capable of training a generic model, retraining may just sound superfluous: if the objects the end-user cares about are already seen in training the generic model, why bother training on them again for personalization? In this paper, we therefore ask:" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 240, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 240, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 240, + 712 + ], + "type": "text", + "content": "*Work done as a student researcher at Google Research." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11818" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 72, + 287, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 72, + 287, + 121 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 287, + 121 + ], + "type": "text", + "content": "Can we train a \"personalization-friendly\" model such that after deployed, it can be easily specialized and rapidly condensed based on the end-user's task description, without further training?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 128, + 287, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 128, + 287, + 317 + ], + "spans": [ + { + "bbox": [ + 46, + 128, + 287, + 317 + ], + "type": "text", + "content": "To begin with, we investigate a fairly simple idea, which is to train a (large) generic model, followed by class selection for personalization — chopping off the classes that are not of the user's interest from the classification head. While extremely straightforward without further training, this idea can already boost the aforementioned ResNet-152 to " + }, + { + "bbox": [ + 46, + 128, + 287, + 317 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 46, + 128, + 287, + 317 + ], + "type": "text", + "content": " accuracy on recognizing 20 classes. Nevertheless, this approach does not condense the model for computation and memory efficiency. One may resolve this problem by training a smaller generic model like ResNet-18, whose size is roughly " + }, + { + "bbox": [ + 46, + 128, + 287, + 317 + ], + "type": "inline_equation", + "content": "\\frac{1}{5}" + }, + { + "bbox": [ + 46, + 128, + 287, + 317 + ], + "type": "text", + "content": " of ResNet-152. However, with limited capacity, ResNet-18 after class selection can only attain " + }, + { + "bbox": [ + 46, + 128, + 287, + 317 + ], + "type": "inline_equation", + "content": "92\\%" + }, + { + "bbox": [ + 46, + 128, + 287, + 317 + ], + "type": "text", + "content": " accuracy on classifying 20 classes. We hypothesize if we can somehow personalize the backbone weights as well, the model will be able to better utilize its capacity to tackle the shrunken scope of end-users' tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 319, + 287, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 319, + 287, + 390 + ], + "spans": [ + { + "bbox": [ + 46, + 319, + 287, + 390 + ], + "type": "text", + "content": "To address these deficiencies while keeping the personalization process simple, we propose Train-once-for-All PERsonalization (TAPER), a novel framework that is trained just once and can later head-to-toe customizes a condensed model on the fly for different end-users and requests, given their task descriptions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 390, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 390, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 390, + 287, + 581 + ], + "type": "text", + "content": "At the core of TAPER is a set of shareable \"basis\" models inspired by [5, 12], and a \"mixer\" predictor. The basis models have the same neural network architecture, each of which is expected to capture a certain specialty and therefore can be smaller in size than a large generic model. The mixer predictor then takes the user's task description (e.g., \"Classify bicycle, pedestrian, tree, obstacle for me.\") as input, and produces coefficients to linearly combine the weights (not predictions!) of the basis models, condensing them into a \"personalized\" model on the fly. As TAPER adapts to users by predicting corresponding coefficients, not by adjusting the bases, it requires no retraining and enjoys parameter efficiency (e.g., for cloud services). Moreover, since the resulting personalized model is just like a basis model in size, it enjoys computation and memory efficiency during inference and is suitable for edge deployment." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "text", + "content": "We introduce a stage-wise training procedure to effectively learn the bases and the mixer predictor. We found that naïve end-to-end training for optimizing personalized accuracy often results in inferior bases that either generalize poorly or are not specialized. We thus dedicate each stage to one desired property, starting with training each basis to generically classify all classes, followed by specializing them to different but fixed portions of data. The final stage then jointly refines the bases, together with learning the mixer predictor, to synthesize classifiers for randomly sampled tasks on the fly to optimize personalized accuracy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 545, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 252 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 252 + ], + "type": "text", + "content": "We validate TAPER on three visual recognition datasets, including ImageNet [9], iNaturalist [39], and DomainNet [31], each of which captures a different personalization scenario. TAPER consistently outperforms the baselines in achieving a higher personalized accuracy. For instance, on ImageNet, TAPER is able to synthesize a ResNet-18 to achieve " + }, + { + "bbox": [ + 304, + 72, + 545, + 252 + ], + "type": "inline_equation", + "content": "96\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 252 + ], + "type": "text", + "content": " accuracy on classifying 20 classes, " + }, + { + "bbox": [ + 304, + 72, + 545, + 252 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 252 + ], + "type": "text", + "content": " higher than ResNet-18 with class selection. The accuracy is even higher than ResNet-152 with class selection while using " + }, + { + "bbox": [ + 304, + 72, + 545, + 252 + ], + "type": "inline_equation", + "content": "\\frac{1}{5}" + }, + { + "bbox": [ + 304, + 72, + 545, + 252 + ], + "type": "text", + "content": " of the model size. Interestingly, even without end-users' task descriptions, we show that TAPER can still be \"self-specialized\" to the deployed environment conditioned on its past predictions. Most importantly, none of these improvements require further training, making TAPER truly \"personalization-friendly.\"" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 261, + 392, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 261, + 392, + 274 + ], + "spans": [ + { + "bbox": [ + 306, + 261, + 392, + 274 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 281, + 545, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 281, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 304, + 281, + 545, + 449 + ], + "type": "text", + "content": "Personalization. Unlike the standard machine learning (ML) learns a generic model to serve many users, personalization acknowledges users' characteristics and learns each a dedicated model. Its practical value is shown in many applications such as pose estimation [6], ads predictions [2], speech recognition [43], medical ML [15, 40], etc. More recently, personalization is studied in the context of federated learning, which focuses on how the users collaborate while training their own models under privacy concern [21, 29, 37]. Differently, our goal is to train a single \"personalization-friendly\" model. This concept is related to meta-learning [13, 18], while it mainly learns to adapt to many new tasks with few-shot data from unseen classes, not for train-once-for-all (each task still needs fine-tuning)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 462, + 545, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 462, + 545, + 641 + ], + "spans": [ + { + "bbox": [ + 304, + 462, + 545, + 641 + ], + "type": "text", + "content": "Conditional neural networks. Our implementation is inspired by recent architectures that dynamically adapt the networks based on the inputs [7, 41, 44]. Another approach is Mixture-of-Experts (MoE) [34, 35] that scales a model to be powerful and computational-heavy with a group of networks/layers. Given an input, MoE routes it to the related experts and combines their predictions. Our goal is to collapse into a compact model for each task. The motivations of these methods are different from ours. They specialized the network during the inference of an individual input (e.g., \"this image looks like an animal\"), while we specialize based on the overall knowledge of the test environment a user prefers (e.g., \"I'm in a jungle\"). We believe these different levels of personalization (inputs vs. tasks) are complimentary to each other for future consideration." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "Another possible implementation is by a HyperNetwork [16] that learns another network to predict the high-dimensional personalized parameters directly. It remains challenging for modern deep networks due to the large output size and training difficulty [45]. Ours learns to combine several bases instead as a special case of HyperNetwork." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "11819" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": "Ensembles and model interpolation. Combining several specialized models to serve more versatile inputs is a widely-used concept. For instance, model ensemble [1, 26, 28] combines several models' predictions for better precision or generalization. Recently, researchers found similar benefits by combining models on the weight space instead of on the outputs [20], motivated by less training cost. We extend the concept to personalize many tasks at once by predicting to combine the basis model parameters." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 198, + 289, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 198, + 289, + 319 + ], + "spans": [ + { + "bbox": [ + 46, + 198, + 289, + 319 + ], + "type": "text", + "content": "Other train-once-for-all tasks. Besides our train-once-for-all personalization, the idea of training once and getting several models is a practical approach in other contexts as well. For example, [4, 27] propose to train a model that later can serve on various platforms of different system resources, significantly reducing the training efforts for neural architecture searches. [19] snapshots the intermediate models in one pass of training and uses them for ensemble. [11] trains a single model that can dynamically adjust the strengths towards multiple loss functions in test time." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 331, + 115, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 331, + 115, + 344 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 115, + 344 + ], + "type": "text", + "content": "3. Approach" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 351, + 157, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 351, + 157, + 363 + ], + "spans": [ + { + "bbox": [ + 47, + 351, + 157, + 363 + ], + "type": "text", + "content": "3.1. Problem definition" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "spans": [ + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": "Define a task " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": " as classification over a subset of classes " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "\\mathcal{V}_t \\subset \\mathcal{V}" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": ". The goal of personalization is to learn a predictor " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "f_t : \\mathcal{X} \\mapsto \\mathcal{Y}_t" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": ". To handle many tasks at the same time, we further assume we have the task description " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "d_t" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "\\mathcal{V}_t" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": ", and we want to build a framework " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "h(d_t)" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": " where given " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "d_t" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": ", it will output " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "f_t" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": ". Generally, the task description should provide information about the classes within the task in the form of vector representation. We will leave the realizations and choices of " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "d_t" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": " in subsection 3.5. We consider using a large-scale dataset with many classes covering " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": ", to learn the personalized-friendly function " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "f_t = h(d_t; \\mathcal{V})" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": " parameterized by " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 46, + 370, + 289, + 539 + ], + "type": "text", + "content": " inferences on the task description as guidance for synthesizing a personalized model without further optimization, essentially train-once-for-all personalization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 556, + 287, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 556, + 287, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 556, + 287, + 616 + ], + "type": "text", + "content": "Personalization in a server-user system As a motivating application of train-once-for-all personalization, the personalized model generator " + }, + { + "bbox": [ + 46, + 556, + 287, + 616 + ], + "type": "inline_equation", + "content": "h(\\cdot ,\\mathcal{V})" + }, + { + "bbox": [ + 46, + 556, + 287, + 616 + ], + "type": "text", + "content": " is useful for cloud service deployments in that the server learns " + }, + { + "bbox": [ + 46, + 556, + 287, + 616 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 46, + 556, + 287, + 616 + ], + "type": "text", + "content": " on a large-scale dataset and maintains it for serving many future users." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 617, + 287, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 287, + 701 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 287, + 701 + ], + "type": "text", + "content": "The users are ultimately performing the tasks on end devices such as mobile phones, laptops, drones, etc. The computation resource is often quite limited. This constrains the memory, power, and FLOPs budgets thus making it unfavorable for the users to train or inference large models on their ends. Specifically, train-once-for-all personalization enjoys the following aspects." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "content": "- Scalability. We propose a principle way based on a model" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 545, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 545, + 96 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 545, + 96 + ], + "type": "text", + "content": "generator to summarize a large number of tasks (in practice, possibly over millions) as a more scalable approach." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 96, + 547, + 239 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 306, + 96, + 547, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 96, + 547, + 167 + ], + "spans": [ + { + "bbox": [ + 306, + 96, + 547, + 167 + ], + "type": "text", + "content": "- On-the-fly personalization. By modeling " + }, + { + "bbox": [ + 306, + 96, + 547, + 167 + ], + "type": "inline_equation", + "content": "h(d, \\mathcal{V})" + }, + { + "bbox": [ + 306, + 96, + 547, + 167 + ], + "type": "text", + "content": " as a translation from task descriptions to the model weight space, it allows a user to generate a personalized model without any training but just inference. This essentially bypasses the bottleneck of training cost and makes such a personalization system to be closer to a real-time API." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 168, + 547, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 168, + 547, + 239 + ], + "spans": [ + { + "bbox": [ + 306, + 168, + 547, + 239 + ], + "type": "text", + "content": "- Condensed personalized models. Our formulation provides an advantage that decouples the number of parameters of model generator " + }, + { + "bbox": [ + 306, + 168, + 547, + 239 + ], + "type": "inline_equation", + "content": "|\\mathcal{V}|" + }, + { + "bbox": [ + 306, + 168, + 547, + 239 + ], + "type": "text", + "content": " and the output personalized models. We can in theory use more parameters in " + }, + { + "bbox": [ + 306, + 168, + 547, + 239 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 306, + 168, + 547, + 239 + ], + "type": "text", + "content": " for a powerful generator and condense it into lightweight personalized models for final deployment." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 247, + 501, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 247, + 501, + 261 + ], + "spans": [ + { + "bbox": [ + 306, + 247, + 501, + 261 + ], + "type": "text", + "content": "3.2. A strong baseline: classifier selection" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "spans": [ + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "text", + "content": "Given an input " + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "text", + "content": ", we consider " + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "text", + "content": " as a general neural network " + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "inline_equation", + "content": "f(\\pmb{x};\\pmb{\\theta})" + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "text", + "content": " that consists of a feature extractor parameterized by " + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "text", + "content": " with a linear classifier " + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\pmb{w} = [\\pmb{w}^{(1)},\\dots,\\pmb{w}^{(|\\mathcal{V}|)}]" + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "inline_equation", + "content": "|\\mathcal{V}|" + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "text", + "content": " vectors for output predictions over all classes in " + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "text", + "content": ". We denote by " + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} = \\{\\psi ,\\pmb{w}\\}" + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "text", + "content": ". Let the task specified by a user be a few-way classification task " + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 305, + 266, + 547, + 338 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "spans": [ + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "text", + "content": "One strong baseline to personalize and fulfill the aspects in subsection 3.1 is to assume a generic, non-personalized feature extractor is sufficient and build a personalized classifier " + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_t" + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "text", + "content": " on top of it by selecting only the row vectors in " + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}" + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "text", + "content": " for the relevant classes. That is, the personalized parameters for task " + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "inline_equation", + "content": "\\theta_t = \\{\\psi, \\boldsymbol{w}_t\\}" + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "text", + "content": ". As will be shown in section 4, by training a generic feature extractor along with " + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}" + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "text", + "content": " in a standard way followed by classifier selection to retrieve " + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_t" + }, + { + "bbox": [ + 304, + 339, + 547, + 470 + ], + "type": "text", + "content": ", it can largely outperform a non-personalized classifier. It serves as a surprisingly strong baseline for the train-once-for-all personalization." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "spans": [ + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "type": "text", + "content": "However, we found it suboptimal since the features may also need to be personalized to focus on more dedicated relationships between the classes within a task. As we discussed in the introduction, there are two baseline solutions, to adapt and save " + }, + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "type": "inline_equation", + "content": "\\psi_t" + }, + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "type": "text", + "content": " for every " + }, + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "type": "text", + "content": ", or not to personalize " + }, + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "type": "text", + "content": " but to use a larger and more powerful feature extractor. They both have obvious drawbacks — the former is not scalable in training cost for many tasks, and the latter is computationally unfavorable for end devices — contradicting the requirements of a cloud service system." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 590, + 547, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 590, + 547, + 627 + ], + "spans": [ + { + "bbox": [ + 304, + 590, + 547, + 627 + ], + "type": "text", + "content": "To this end, we are thus motivated in resolving such a dilemma. That is, can we have train-once-for-all personalization for the whole compact network?" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 635, + 538, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 635, + 538, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 635, + 538, + 647 + ], + "type": "text", + "content": "3.3. Proposed TAPER: personalization with bases" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "text", + "content": "**Formulation:** basis models. We propose TAPER to implement " + }, + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\theta_t = h(\\cdot, \\mathcal{V})" + }, + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "text", + "content": " for personalizing the whole network as " + }, + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\theta_t = \\{\\psi_t, w_t\\}" + }, + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "text", + "content": ". Inspired by multi-task learning [12], we assume the tasks share similarity (e.g., superclasses, domains, styles, etc) — it is likely that we can represent each" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "11820" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 70, + 521, + 224 + ], + "blocks": [ + { + "bbox": [ + 75, + 70, + 521, + 224 + ], + "lines": [ + { + "bbox": [ + 75, + 70, + 521, + 224 + ], + "spans": [ + { + "bbox": [ + 75, + 70, + 521, + 224 + ], + "type": "image", + "image_path": "77419cbc5e9de94c0739a805847a3b5d661031aaed2b099d96b779b8a7563f52.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 229, + 547, + 265 + ], + "lines": [ + { + "bbox": [ + 46, + 229, + 547, + 265 + ], + "spans": [ + { + "bbox": [ + 46, + 229, + 547, + 265 + ], + "type": "text", + "content": "Figure 2. Overview of TAPER architecture. A user first provides the task description (e.g., a few classes of interest), which will be encoded into text embedding and fed into the mixer predictor to generate the mixers. The parameters of each layer are linear combinations of the basis models based on the mixers. The final outcome is a single basis personalized model, followed by classifier selection." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "spans": [ + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "text", + "content": "of the personalized model weight vectors " + }, + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_t" + }, + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "text", + "content": " with combinations of a much smaller set of basis vectors " + }, + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "inline_equation", + "content": "\\{\\pmb{v}_1, \\dots, \\pmb{v}_Q\\}" + }, + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "inline_equation", + "content": "|\\pmb{v}| = |\\pmb{\\theta}|" + }, + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "text", + "content": ". In our experiments, " + }, + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "text", + "content": " is typically small (e.g., 10) compared to the number of tasks it possibly can handle (e.g., for 20-way classification, there are " + }, + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "inline_equation", + "content": "\\binom{|Q|}{20}" + }, + { + "bbox": [ + 47, + 278, + 287, + 339 + ], + "type": "text", + "content": " combinations)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "spans": [ + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "text", + "content": "For every task, " + }, + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "inline_equation", + "content": "\\{\\pmb{v}_q\\}" + }, + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "text", + "content": " are combined into a personalized model " + }, + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_t" + }, + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "text", + "content": " with a combination vector " + }, + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "inline_equation", + "content": "\\alpha_{t}" + }, + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "text", + "content": ", we call it mixers," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 368, + 287, + 393 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 368, + 287, + 393 + ], + "spans": [ + { + "bbox": [ + 107, + 368, + 287, + 393 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\theta} _ {t} \\left(\\boldsymbol {\\alpha} _ {t}, \\mathcal {V}\\right) = \\sum_ {q} \\boldsymbol {\\alpha} _ {t} [ q ] \\times \\boldsymbol {v} _ {q}, \\tag {1}", + "image_path": "7e81f70ed63460d125096037edaafa4f3d94f1239d9050ca9ebd8ae0eeaff551.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "spans": [ + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "text", + "content": "where the mixers " + }, + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "inline_equation", + "content": "\\alpha_{t} \\in \\Delta^{Q-1}" + }, + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "text", + "content": "-dimensional vector on the " + }, + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "inline_equation", + "content": "(Q-1)" + }, + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "text", + "content": "-simplex for convexly combining the basis models into a personalized model. Both " + }, + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 46, + 399, + 287, + 435 + ], + "type": "text", + "content": " are learned." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 437, + 288, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 288, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 288, + 616 + ], + "type": "text", + "content": "By adjusting only the mixers for a user, we can then quickly condense the bases into a compact personalized model for the user's future use. We note that the bases are trained to be combined layer by layer element-wisely on weights, not on the activation. This is starkly different from the mixture of experts [35] that maintains several experts and aggregates their predictions, where the model size and computation cost scale with the number of experts. In contrast, TAPER outputs a single basis model of size " + }, + { + "bbox": [ + 46, + 437, + 288, + 616 + ], + "type": "inline_equation", + "content": "|\\pmb{\\theta}| = |\\pmb{v}|" + }, + { + "bbox": [ + 46, + 437, + 288, + 616 + ], + "type": "text", + "content": " and does not scale with " + }, + { + "bbox": [ + 46, + 437, + 288, + 616 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 46, + 437, + 288, + 616 + ], + "type": "text", + "content": ". TAPER fulfills the requirements in subsection 3.1: scalable, on-the-fly personalization, and lightweight. Unlike the baseline, it adapts the whole network, governs by the mixers over the set of bases. An overview of the architecture is provided in Figure 2. We will discuss training TAPER in subsection 3.4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": "Mixer predictor. Our goal is to generate " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_t = h(\\pmb{d}_t, \\mathcal{V})" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": " given the task description. The task description vector is translated into the mixers by a mixer predictor network " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\alpha_t = g(\\pmb{d}_t; \\phi)" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": ", parameterized by " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": ", for selecting the relevant bases dedicated to the task and combining them into a condensed personalized model. We adopt a simple 4-layer multilayer perceptron (MLP) which is shared by all tasks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "spans": [ + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "text", + "content": "Block-wise mixers. So far, we assume to use a single mixers vector " + }, + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "inline_equation", + "content": "\\alpha_{t}" + }, + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "text", + "content": " for the whole network. A slight relaxation is to allow each component of the network to have its own mixers such that it provides more freedom for " + }, + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "text", + "content": " to jointly learn to combine layer-wisely. In our experiments on ResNet-18, we use one mixer vector for each of the 4 blocks, i.e., now " + }, + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "inline_equation", + "content": "|\\alpha_t| = 4Q" + }, + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 304, + 278, + 547, + 363 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 369, + 406, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 369, + 406, + 383 + ], + "spans": [ + { + "bbox": [ + 305, + 369, + 406, + 383 + ], + "type": "text", + "content": "3.4. Training TAPER" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 388, + 546, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 388, + 546, + 413 + ], + "spans": [ + { + "bbox": [ + 304, + 388, + 546, + 413 + ], + "type": "text", + "content": "Objective. Building upon Equation 1, let the loss of a task to be " + }, + { + "bbox": [ + 304, + 388, + 546, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_t" + }, + { + "bbox": [ + 304, + 388, + 546, + 413 + ], + "type": "text", + "content": ", we define TAPER objective function as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 420, + 428, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 420, + 428, + 453 + ], + "spans": [ + { + "bbox": [ + 314, + 420, + 428, + 453 + ], + "type": "interline_equation", + "content": "\\min _ {\\boldsymbol {\\phi}, \\mathcal {V} = \\{\\boldsymbol {v} _ {q} \\} _ {q = 1} ^ {Q}} \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\mathcal {L} _ {t} (\\boldsymbol {\\theta} _ {t}),", + "image_path": "0c22ca19da5a70936b7db85ca239ca15636768f09ad7d30f2fb4b64f717702b7.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 325, + 456, + 545, + 482 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 456, + 545, + 482 + ], + "spans": [ + { + "bbox": [ + 325, + 456, + 545, + 482 + ], + "type": "interline_equation", + "content": "\\text {w h e r e} \\boldsymbol {\\theta} _ {t} = \\sum_ {q} \\boldsymbol {\\alpha} _ {t} [ q ] \\times \\boldsymbol {v} _ {q}, \\quad \\boldsymbol {\\alpha} _ {t} = \\sigma (g (\\boldsymbol {d} _ {t}; \\phi)), \\tag {2}", + "image_path": "de9976156b3c10d0a4418d59ef58ea29fa59b8ba3b960a07d4466e36b41a4915.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 490, + 547, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 490, + 547, + 538 + ], + "spans": [ + { + "bbox": [ + 304, + 490, + 547, + 538 + ], + "type": "text", + "content": "where we implement " + }, + { + "bbox": [ + 304, + 490, + 547, + 538 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 490, + 547, + 538 + ], + "type": "text", + "content": " to be a convex combination by a softmax function " + }, + { + "bbox": [ + 304, + 490, + 547, + 538 + ], + "type": "inline_equation", + "content": "\\sigma(\\cdot)" + }, + { + "bbox": [ + 304, + 490, + 547, + 538 + ], + "type": "text", + "content": " in our experiments, as a form of regularization [12] to avoid it becoming unbounded. Both the basis models and the mixer predictor are to be learned." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 552, + 547, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 552, + 547, + 649 + ], + "spans": [ + { + "bbox": [ + 304, + 552, + 547, + 649 + ], + "type": "text", + "content": "Naïve approach. Equation 2 can be optimized end-to-end in standard deep learning frameworks (e.g., TensorFlow) by initializing each basis with different random weights1. One concern is that an individual basis does not learn much about the general knowledge since each basis is likely selected by a few tasks and not trained on enough data, resulting in poor generalization. To better leverage the capacity of more bases, we provide a simple multi-stage training recipe." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 662, + 547, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 662, + 547, + 687 + ], + "spans": [ + { + "bbox": [ + 304, + 662, + 547, + 687 + ], + "type": "text", + "content": "Improved three-stage training. A better strategy is to first have each base be generally knowledgeable and then" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "text", + "content": "1We note that the bases " + }, + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\{v_{q}\\}" + }, + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "text", + "content": " cannot all be initialized with the same weights otherwise it reduces to a single basis network." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11821" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "specialize them. This is inspired by the recent practice of few-shot learning [38, 42], which shows it is very important to initialize the model which will be specialized by a well-trained backbone. The training is in three stages." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 121, + 287, + 300 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 47, + 121, + 287, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 121, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 47, + 121, + 287, + 156 + ], + "type": "text", + "content": "- Stage 1: single basis pre-training. We begin with a single network " + }, + { + "bbox": [ + 47, + 121, + 287, + 156 + ], + "type": "inline_equation", + "content": "\\theta^{(0)}" + }, + { + "bbox": [ + 47, + 121, + 287, + 156 + ], + "type": "text", + "content": " to learn the general representation of the whole dataset in a standard way, e.g., with cross-entropy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "text", + "content": "- **Stage 2:** specialized basis models. Next, we want to prepare " + }, + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "text", + "content": " specialized networks as the initialization for the " + }, + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "text", + "content": " bases. We split the dataset into " + }, + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "text", + "content": " shards based on classes or domains. For each shard, we copy " + }, + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}^{(0)}" + }, + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "text", + "content": " as the initialization, fine-tune it, and collect the \"expert\" model as " + }, + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "inline_equation", + "content": "v_{q}" + }, + { + "bbox": [ + 47, + 157, + 287, + 239 + ], + "type": "text", + "content": ". We note that the purpose is just to burn in each basis different domain knowledge as warm starts." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 240, + 287, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 240, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 47, + 240, + 287, + 300 + ], + "type": "text", + "content": "- Stage 3: learning to mix the bases for tasks. We jointly learn both the bases " + }, + { + "bbox": [ + 47, + 240, + 287, + 300 + ], + "type": "inline_equation", + "content": "\\{\\pmb{v}_q\\}_{q=1}^Q" + }, + { + "bbox": [ + 47, + 240, + 287, + 300 + ], + "type": "text", + "content": " and the mixer predictor " + }, + { + "bbox": [ + 47, + 240, + 287, + 300 + ], + "type": "inline_equation", + "content": "g(\\pmb{d};\\phi)" + }, + { + "bbox": [ + 47, + 240, + 287, + 300 + ], + "type": "text", + "content": " to combine them for all the tasks, guided by the task descriptions. Note that, we use the classifier " + }, + { + "bbox": [ + 47, + 240, + 287, + 300 + ], + "type": "inline_equation", + "content": "\\pmb{w}_t" + }, + { + "bbox": [ + 47, + 240, + 287, + 300 + ], + "type": "text", + "content": " selected for each task, building upon subsection 3.2." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 300, + 287, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 300, + 287, + 408 + ], + "spans": [ + { + "bbox": [ + 46, + 300, + 287, + 408 + ], + "type": "text", + "content": "Despite its simplicity, we found it crucial for addressing the dilemma in the naïve approach when more bases are used. It warm-starts TAPER with well-pre-trained specialized bases thus the mixer predictor only needs to learn to \"mix\" them for a few epochs. This makes the developed cycles much shorter and more flexible. For instance, when the developers collect a new dataset for augmenting the existing bases, it only requires fine-tuning from " + }, + { + "bbox": [ + 46, + 300, + 287, + 408 + ], + "type": "inline_equation", + "content": "\\theta^{(0)}" + }, + { + "bbox": [ + 46, + 300, + 287, + 408 + ], + "type": "text", + "content": ", adding it as a new basis, and re-train the mixer predictor." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 418, + 151, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 418, + 151, + 430 + ], + "spans": [ + { + "bbox": [ + 47, + 418, + 151, + 430 + ], + "type": "text", + "content": "3.5. Task descriptions" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 437, + 287, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 287, + 556 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 287, + 556 + ], + "type": "text", + "content": "In subsection 3.1, we assume the personalized model generator " + }, + { + "bbox": [ + 46, + 437, + 287, + 556 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 46, + 437, + 287, + 556 + ], + "type": "text", + "content": " takes a vector representation of the task and outputs the corresponding personalization model. This is realistic for some applications where (1) the users may not have training data while (2) the task that the user wants to perform can beforehand be pre-defined by the user's preference. The task descriptions not only instruct " + }, + { + "bbox": [ + 46, + 437, + 287, + 556 + ], + "type": "inline_equation", + "content": "h(\\pmb{d},\\mathcal{V})" + }, + { + "bbox": [ + 46, + 437, + 287, + 556 + ], + "type": "text", + "content": " what kind of personalized model it should generate but also more importantly, for the " + }, + { + "bbox": [ + 46, + 437, + 287, + 556 + ], + "type": "inline_equation", + "content": "h(\\pmb{d},\\mathcal{V})" + }, + { + "bbox": [ + 46, + 437, + 287, + 556 + ], + "type": "text", + "content": " to leverage the relationships between tasks during training." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 557, + 287, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 557, + 287, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 557, + 287, + 641 + ], + "type": "text", + "content": "The task description can be a flexible design choice. As an example, considering a classification task from ImageNet, a simple way is to create the bag-of-word (BoW) vector for a task, i.e., a 1000-way binary vector with the bits turned on for the corresponding class indexes. The mixer " + }, + { + "bbox": [ + 46, + 557, + 287, + 641 + ], + "type": "inline_equation", + "content": "g(\\pmb{d}_t; \\phi)" + }, + { + "bbox": [ + 46, + 557, + 287, + 641 + ], + "type": "text", + "content": " in TAPER can gradually realize the relationships among classes during training." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 642, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 287, + 713 + ], + "type": "text", + "content": "Another way is to explicitly leverage the semantics of the classes by extracting the \"textual class names\" (e.g., \"Red wolf\" or \"Buckeye\"), encode each of them into a text embedding via an external pre-trained language model, and average over classes into a vector representation " + }, + { + "bbox": [ + 46, + 642, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\pmb{d}_t" + }, + { + "bbox": [ + 46, + 642, + 287, + 713 + ], + "type": "text", + "content": ". In our experiments, we pre-compute the 1024-dim text embed" + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 307, + 69, + 545, + 120 + ], + "blocks": [ + { + "bbox": [ + 335, + 56, + 516, + 66 + ], + "lines": [ + { + "bbox": [ + 335, + 56, + 516, + 66 + ], + "spans": [ + { + "bbox": [ + 335, + 56, + 516, + 66 + ], + "type": "text", + "content": "Table 1. Summary of the datasets in experiments." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 69, + 545, + 120 + ], + "lines": [ + { + "bbox": [ + 307, + 69, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 307, + 69, + 545, + 120 + ], + "type": "table", + "html": "
DatasetTrain/Val Size#ClassTask
ImageNet1.3M/50K1KGeneral object recognition
iNaturalist-212.7M/100K10KSpecies classification
DomainNet410K/177K345Object recognition with domains
", + "image_path": "d5b8494e5ff44f31021c4de9ce7ab8b25ec5fbd7ccc81308a302dd72b807a36d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 137, + 545, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 137, + 545, + 256 + ], + "spans": [ + { + "bbox": [ + 304, + 137, + 545, + 256 + ], + "type": "text", + "content": "ding for each class following the prompt ensemble approach in [32] and keep them frozen. Using textual embedding takes the advantage of large-scale language modeling and is more convenient as a compact fixed-dimension representation, unlike BoW depends on the class size. We provide experiments on the choice of task descriptions in subsection 4.5. Interestingly, we show in subsection 5.1 that it also allows the users to use flexible free language descriptions (e.g., \"A fish of deep water having a light organ\") instead of specifying the class name (\"flashlight fish\")." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 268, + 386, + 281 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 268, + 386, + 281 + ], + "spans": [ + { + "bbox": [ + 305, + 268, + 386, + 281 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 287, + 365, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 287, + 365, + 300 + ], + "spans": [ + { + "bbox": [ + 306, + 287, + 365, + 300 + ], + "type": "text", + "content": "4.1. Settings" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 306, + 545, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 306, + 545, + 473 + ], + "spans": [ + { + "bbox": [ + 304, + 306, + 545, + 473 + ], + "type": "text", + "content": "Datasets. To validate the effectiveness of TAPER on three large-scale visual recognition datasets, including ImageNet [9], iNaturalist (2021) [39], and DomainNet [31], each of which captures a different personalization scenario. All of them are single-label classification tasks and the resolution is " + }, + { + "bbox": [ + 304, + 306, + 545, + 473 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 304, + 306, + 545, + 473 + ], + "type": "text", + "content": ". The summary is in Table 1. For each dataset, we construct the tasks as 20-way classification by sampling from the label space " + }, + { + "bbox": [ + 304, + 306, + 545, + 473 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 304, + 306, + 545, + 473 + ], + "type": "text", + "content": ". Each image from the training/validation set is randomly assigned with a task description as discussed in subsection 3.5 for training (sampled every epoch) and evaluation, respectively. The goal is to accurately predict the labels from the whole " + }, + { + "bbox": [ + 304, + 306, + 545, + 473 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 304, + 306, + 545, + 473 + ], + "type": "text", + "content": " and the metric is the standard top-1 accuracy. More details for each dataset are provided in the corresponding subsections." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 474, + 545, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 545, + 581 + ], + "type": "text", + "content": "Implementation details. We use the training process similar to the standard on ImageNet [17] for all the datasets, including data pre-processing/augmentation and learning rate schedule (initial learning rate is 0.1 and decay by 0.1 every 30 epochs). We use the SGD optimizer with momentum " + }, + { + "bbox": [ + 304, + 474, + 545, + 581 + ], + "type": "inline_equation", + "content": "= 0.9" + }, + { + "bbox": [ + 304, + 474, + 545, + 581 + ], + "type": "text", + "content": ", batch size " + }, + { + "bbox": [ + 304, + 474, + 545, + 581 + ], + "type": "inline_equation", + "content": "= 128" + }, + { + "bbox": [ + 304, + 474, + 545, + 581 + ], + "type": "text", + "content": ", and weight decay " + }, + { + "bbox": [ + 304, + 474, + 545, + 581 + ], + "type": "inline_equation", + "content": "= 0.0001" + }, + { + "bbox": [ + 304, + 474, + 545, + 581 + ], + "type": "text", + "content": ". Our experiments are implemented using JAX [3]. We train on randomly-initialized ResNet-18 networks [17] with cross-entropy by default." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": "For TAPER, each of the basis models uses the same architecture, and each layer is linearly combined via the mixers. The mixer predictor is a 4-layer MLP (with batchnorms and ReLU non-linearity between each layer) which maps the 1024-dim task description text embedding to the blockwise mixers, as discussed in subsection 3.3. For our proposed three-stage training in subsection 3.4, we train each stage sequentially for " + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "inline_equation", + "content": "100/5/20" + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": " epochs, for the 3 stages, respectively. For a fair comparison, we, therefore, train 125 epochs for the baseline approaches (subsection 3.2). We provide more details in the supplement materials." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "11822" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 217, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 217, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 217, + 85 + ], + "type": "text", + "content": "4.2. Train-once-for-all on ImageNet" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 90, + 288, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 90, + 288, + 293 + ], + "spans": [ + { + "bbox": [ + 46, + 90, + 288, + 293 + ], + "type": "text", + "content": "We first use ImageNet to develop our method. In reality, the tasks may not be random combinations of classes but somehow have correlations depending on the use case. For instance, a user in a driving scene may ask \"Classify bicycle, pedestrian, tree, obstacle for me.\" Another user may ask for a classifier for a kitchen or for different types of electronics; e.g., \"coffee pot\" and \"espresso maker\" are more likely in the same task. To simulate this more realistic/meaningful scenario without losing generality, we assign each image a " + }, + { + "bbox": [ + 46, + 90, + 288, + 293 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 90, + 288, + 293 + ], + "type": "text", + "content": "-way task (" + }, + { + "bbox": [ + 46, + 90, + 288, + 293 + ], + "type": "inline_equation", + "content": "k = 20" + }, + { + "bbox": [ + 46, + 90, + 288, + 293 + ], + "type": "text", + "content": " by default) by sampling from classes that are the nearest " + }, + { + "bbox": [ + 46, + 90, + 288, + 293 + ], + "type": "inline_equation", + "content": "2k" + }, + { + "bbox": [ + 46, + 90, + 288, + 293 + ], + "type": "text", + "content": " synsets in the WordNet knowledge graph [30] based on its ground-truth label (which is included in the task as well" + }, + { + "bbox": [ + 46, + 90, + 288, + 293 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 46, + 90, + 288, + 293 + ], + "type": "text", + "content": "). We use 10 bases for ImageNet experiments. For stage 2 of TAPER training, we simply divide the dataset into 10 chunks by sharding the 1000 classes (i.e., 100 classes per chunk). It is just to initialize the bases as slightly different specialists." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 293, + 288, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 293, + 288, + 342 + ], + "spans": [ + { + "bbox": [ + 46, + 293, + 288, + 342 + ], + "type": "text", + "content": "We then train the mixer predictor jointly with bases to personalize conditioned on the task description. The results of TAPER and the baseline approaches using different sizes of networks are in Table 2." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 356, + 287, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 356, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 46, + 356, + 287, + 453 + ], + "type": "text", + "content": "Personalization is valuable. Our first observations of the two baseline approaches in subsection 3.2 are: (1) increasing the network depths without personalization (ignoring the tasks) improves the accuracy but saturates at around " + }, + { + "bbox": [ + 46, + 356, + 287, + 453 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 46, + 356, + 287, + 453 + ], + "type": "text", + "content": ", while (2) simply post-processing a ResNet-18 with classifier selection already achieves " + }, + { + "bbox": [ + 46, + 356, + 287, + 453 + ], + "type": "inline_equation", + "content": "92.2\\%" + }, + { + "bbox": [ + 46, + 356, + 287, + 453 + ], + "type": "text", + "content": ", i.e., a " + }, + { + "bbox": [ + 46, + 356, + 287, + 453 + ], + "type": "inline_equation", + "content": "+22.3\\%" + }, + { + "bbox": [ + 46, + 356, + 287, + 453 + ], + "type": "text", + "content": " gain. This demonstrates the importance of personalization and the feasibility of train-once-for-all personalization." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 467, + 287, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 467, + 287, + 598 + ], + "spans": [ + { + "bbox": [ + 46, + 467, + 287, + 598 + ], + "type": "text", + "content": "TAPER outperforms the strong baseline, with much smaller networks. The baselines keep the features frozen. Our TAPER better leverages the model capacity and outputs a fully-personalized network for every task — the adapted ResNet-18 outperforms the classifier selection with a ResNet-152, using only roughly " + }, + { + "bbox": [ + 46, + 467, + 287, + 598 + ], + "type": "inline_equation", + "content": "\\frac{1}{5}" + }, + { + "bbox": [ + 46, + 467, + 287, + 598 + ], + "type": "text", + "content": " of parameters. We note that, although the baseline uses a single feature extractor, it does not have an advantage on parameter efficiency from the users' perspective since it still needs to be copied and delivered to each user's end device. TAPER's ResNet-18 outperforms the baseline counterpart by " + }, + { + "bbox": [ + 46, + 467, + 287, + 598 + ], + "type": "inline_equation", + "content": "3.6\\%" + }, + { + "bbox": [ + 46, + 467, + 287, + 598 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 613, + 287, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 613, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 613, + 287, + 685 + ], + "type": "text", + "content": "Different number of classes in a task. Before we extend our study to other datasets, we first verify the effects of the number of classes in a task. TAPER takes a task vector representation as input and in theory, can handle tasks with different class sizes in one network. In Table 3, we consider training and cross-evaluate TAPER in two scenarios:" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 314, + 86, + 537, + 211 + ], + "blocks": [ + { + "bbox": [ + 323, + 71, + 527, + 82 + ], + "lines": [ + { + "bbox": [ + 323, + 71, + 527, + 82 + ], + "spans": [ + { + "bbox": [ + 323, + 71, + 527, + 82 + ], + "type": "text", + "content": "Table 2. Accuracy " + }, + { + "bbox": [ + 323, + 71, + 527, + 82 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 323, + 71, + 527, + 82 + ], + "type": "text", + "content": " on ImageNet with 20-way tasks." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 314, + 86, + 537, + 211 + ], + "lines": [ + { + "bbox": [ + 314, + 86, + 537, + 211 + ], + "spans": [ + { + "bbox": [ + 314, + 86, + 537, + 211 + ], + "type": "table", + "html": "
MethodNetwork#Parameters per taskClassifier selectionAccuracy
BaselineResNet-15260.4MX78.4
ResNet-15258.4M95.1 (+16.7)
BaselineResNet-10144.7MX77.6
ResNet-10142.7M94.8 (+17.2)
BaselineResNet-1811.4MX69.9
ResNet-1810.9M92.2 (+22.3)
TAPERResNet-1810.9M95.8
", + "image_path": "71d277f8af41f44010a1069e1ed7e6ef90d26cf70063af19455a7b09f4103ad6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 313, + 232, + 537, + 294 + ], + "blocks": [ + { + "bbox": [ + 313, + 216, + 537, + 227 + ], + "lines": [ + { + "bbox": [ + 313, + 216, + 537, + 227 + ], + "spans": [ + { + "bbox": [ + 313, + 216, + 537, + 227 + ], + "type": "text", + "content": "Table 3. TAPER on ImageNet with different classes per task." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 313, + 232, + 537, + 294 + ], + "lines": [ + { + "bbox": [ + 313, + 232, + 537, + 294 + ], + "spans": [ + { + "bbox": [ + 313, + 232, + 537, + 294 + ], + "type": "table", + "html": "
Training/EvaluationFixed 20-wayDynamic [5, 100]-way
Baseline92.2 ±0.3688.5 ±0.78
Fixed 20-way95.8 ±0.4593.6 ±0.85
Dynamic [5, 100]-way95.2 ±0.7195.0 ±0.68
", + "image_path": "ccd914ad63b0a64b5a87fa4fb6de169397aee3109494ddc6cb9d46dc608f4610.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 314, + 315, + 537, + 412 + ], + "blocks": [ + { + "bbox": [ + 323, + 300, + 529, + 311 + ], + "lines": [ + { + "bbox": [ + 323, + 300, + 529, + 311 + ], + "spans": [ + { + "bbox": [ + 323, + 300, + 529, + 311 + ], + "type": "text", + "content": "Table 4. Accuracy (%) on iNaturalist with 20-way tasks." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 314, + 315, + 537, + 412 + ], + "lines": [ + { + "bbox": [ + 314, + 315, + 537, + 412 + ], + "spans": [ + { + "bbox": [ + 314, + 315, + 537, + 412 + ], + "type": "table", + "html": "
MethodNetwork#Parameters per taskClassifier selectionAccuracy
BaselineResNet-10163.1MX84.3
ResNet-10142.7M97.7 (+13.4)
BaselineResNet-1816.0MX72.3
ResNet-1810.9M90.8 (+18.5)
TAPERResNet-1810.9M95.9
", + "image_path": "a4cc6030f6f5a780561bd359fa837adc6ee793a922cfaaea1ccecbff6a1c8c48.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 422, + 545, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 422, + 545, + 483 + ], + "spans": [ + { + "bbox": [ + 304, + 422, + 545, + 483 + ], + "type": "text", + "content": "the tasks are either fixed 20-way or dynamically drawn with " + }, + { + "bbox": [ + 304, + 422, + 545, + 483 + ], + "type": "inline_equation", + "content": "5 \\sim 100" + }, + { + "bbox": [ + 304, + 422, + 545, + 483 + ], + "type": "text", + "content": " ways. We observe that TAPER can handle all the cases reasonably well, where it is slightly better if training and evaluation are matched on the same scenario. For simplicity, later we will focus on the fixed 20-way scenario." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 491, + 487, + 503 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 491, + 487, + 503 + ], + "spans": [ + { + "bbox": [ + 305, + 491, + 487, + 503 + ], + "type": "text", + "content": "4.3. Fine-grained species classification" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 510, + 545, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 640 + ], + "type": "text", + "content": "Another concrete use case of personalization is fine-grained predictions in a specific topic. For instance, an entomologist might want to classify different kinds of moths. TAPER is particularly helpful for supporting scientific research in the wild that has constraints on computation resources or Internet bandwidth. We simulate such a scenario on the iNaturalist (2021) datasets that have 10,000 species from 11 super-categories such as \"Mammals\" and \"Reptiles\". We construct each image a 20-way task description by sampling other classes from the same super-category. We use " + }, + { + "bbox": [ + 304, + 510, + 545, + 640 + ], + "type": "inline_equation", + "content": "Q = 3 \\times 11" + }, + { + "bbox": [ + 304, + 510, + 545, + 640 + ], + "type": "text", + "content": " bases for TAPER here." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "In Table 4, we again see TAPER's superiority — comparable performance and fewer parameters compared to the baseline. Notably, here we see the clear benefits of classifier selection. When the number of classes is large, cutting the classes that are not likely of the user's interest can save significant parameters and achieve higher accuracy." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 693, + 287, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 287, + 713 + ], + "type": "text", + "content": "2Note that, we encode each class and average over classes as the task embedding thus it will not leak the ground-truths." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11823" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 64, + 86, + 529, + 194 + ], + "blocks": [ + { + "bbox": [ + 62, + 70, + 530, + 82 + ], + "lines": [ + { + "bbox": [ + 62, + 70, + 530, + 82 + ], + "spans": [ + { + "bbox": [ + 62, + 70, + 530, + 82 + ], + "type": "text", + "content": "Table 5. Personalization with tasks specifying both classes and domains. Test accuracy on DomainNet per domain is reported." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 64, + 86, + 529, + 194 + ], + "lines": [ + { + "bbox": [ + 64, + 86, + 529, + 194 + ], + "spans": [ + { + "bbox": [ + 64, + 86, + 529, + 194 + ], + "type": "table", + "html": "
MethodNetwork#Parameters per taskClassifier selectionReal PaintingClipart QuickdrawInfographSketchAvg. over domains
BaselineResNet-10143.4MX75.161.073.970.034.162.562.885.8 (+23.0)
ResNet-10142.7M93.585.892.194.363.285.6
BaselineResNet-1811.1MX74.259.472.169.832.062.161.684.9 (+23.3)
ResNet-1810.9M93.384.891.194.061.484.5
TAPER (1 basis/domain)ResNet-1810.9M96.090.994.696.774.090.690.592.0
TAPER (3 bases/domain)ResNet-1810.9M96.792.495.797.577.691.9
", + "image_path": "7e0b6cffe1042bd85a43b322a302d87880c61ca0db43e68e419cc1e72f28ec42.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 72, + 237, + 523, + 394 + ], + "blocks": [ + { + "bbox": [ + 46, + 214, + 545, + 236 + ], + "lines": [ + { + "bbox": [ + 46, + 214, + 545, + 236 + ], + "spans": [ + { + "bbox": [ + 46, + 214, + 545, + 236 + ], + "type": "text", + "content": "Table 6. Ablation study for different design choices of TAPER. The indentation with different symbols denotes adding (+) / removing (-) a component, or using a variant (○). We report the mean±std based on 3 runs on ResNet-18. * Accuracy here is averaged over examples." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 72, + 237, + 523, + 394 + ], + "lines": [ + { + "bbox": [ + 72, + 237, + 523, + 394 + ], + "spans": [ + { + "bbox": [ + 72, + 237, + 523, + 394 + ], + "type": "table", + "html": "
Design choicesMethods / Datasets (#Bases Q)ImageNet (10)iNaturalist (33)DomainNet* (18)Avg. Accuracy
Standard, w/o personalization69.9 ±0.2572.3 ±0.5265.8 ±0.2369.3
+ Classifier selection: a strong baseline92.2 ±0.3690.8 ±0.7588.4 ±0.5490.5
TAPER w/ naïve training & classifier selection81.8 ±2.4575.7 ±3.0178.6 ±1.4478.7
TAPER at Stage 169.8 ±0.3472.3 ±0.4665.8 ±0.2669.3
+ Stage 2 & classifier selection91.2 ±1.5689.3 ±2.4588.5 ±1.1689.7
+ uniform weight average86.1 ±1.5215.8 ±7.5587.2 ±2.6663.0
+ fine-tuning w/o task description92.1 ±0.5691.0 ±0.7688.4 ±0.4490.5
+ Stage 3 (complete TAPER)95.8 ±0.4595.9 ±0.7294.1 ±0.6395.3
○ BoW task description94.9 ±0.5193.1 ±0.8193.5 ±0.7493.8
- Block-wise mixers94.0 ±0.2493.1 ±1.2091.7 ±0.3992.9
- classifier selection84.3 ±0.5781.0 ±1.7587.5 ±0.6484.3
", + "image_path": "049ca630b66eef2360890cd794b03c09b8120f5012c48da0a2eecb1c107ea91a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 413, + 258, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 413, + 258, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 413, + 258, + 426 + ], + "type": "text", + "content": "4.4. Personalization with domain description" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 434, + 287, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 434, + 287, + 506 + ], + "spans": [ + { + "bbox": [ + 46, + 434, + 287, + 506 + ], + "type": "text", + "content": "The task information passed to the TAPER mixer predictor can be a flexible description of the tasks of users' interest. We go beyond classes and consider if the users provide domain information related to the image styles. For instance, a user may ask: \"help me classify flowers in paintings\" or \"I want a cartoon animals classifier\"." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 288, + 713 + ], + "type": "text", + "content": "We investigate such a use case on the DomainNet dataset that provides 6 domains of image styles over 345 common objects. Here, we prepare the task descriptions by attaching the domain name before each of the class names, e.g., \"This is a sketch airplane.\", encoding each class to retrieve the textual embedding, and averaging over the classes within the task. Each task is from one domain but with different 20 class combinations. We perform stage 2 training on the division of domains. The test accuracy per domain is summarized in Table 5. We see TAPER consistently outperforms the baselines for all the domains, more on the harder domains (e.g., Infograph and Sketch). This echo to why TAPER improves by using 1 basis per domain (intuitively, it may depend more on domains) — the ideal features are likely domain-specific. By adding up to 3 bases per domain, TAPER can further improve since it has more freedom to personalize by considering both the domains and classes." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 413, + 454, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 413, + 454, + 426 + ], + "spans": [ + { + "bbox": [ + 306, + 413, + 454, + 426 + ], + "type": "text", + "content": "4.5. Remarks on design choices" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 432, + 545, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 432, + 545, + 467 + ], + "spans": [ + { + "bbox": [ + 305, + 432, + 545, + 467 + ], + "type": "text", + "content": "Here we verify our design choices proposed in subsection 3.3 and subsection 3.4. Please refer to the indexes in Table 6. We observe:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 469, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 306, + 469, + 545, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 469, + 545, + 528 + ], + "spans": [ + { + "bbox": [ + 306, + 469, + 545, + 528 + ], + "type": "text", + "content": "- TAPER with naive training (③) outperforms a non-personalized network (①) but not the classifier selection baseline (②), even (3) is attached with classifier selection already. We hypothesize the bases are not properly trained and poor in generalization." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 529, + 545, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 529, + 545, + 600 + ], + "spans": [ + { + "bbox": [ + 306, + 529, + 545, + 600 + ], + "type": "text", + "content": "- As sanity checks, TAPER's stage 1 (④) is basically ① but trained less (i.e., training more cannot improve). Stage 2 (⑤) is slightly worse than ② as expected since the models are specialized on a shard of the dataset. Simply averaging them on weights (⑥) will not become a stronger model but fine-tuning it (⑦) can recover it to ②." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 601, + 545, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 601, + 545, + 660 + ], + "spans": [ + { + "bbox": [ + 306, + 601, + 545, + 660 + ], + "type": "text", + "content": "- From ⑧ to ①, TAPER leverages task descriptions to personalize the features by the mixer predictor thus outperforming the baseline (②). Text embedding is better for task descriptions compared to BoW vectors (⑨). It is preferred to have mixers block-wise (⑩)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 661, + 545, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 661, + 545, + 699 + ], + "spans": [ + { + "bbox": [ + 306, + 661, + 545, + 699 + ], + "type": "text", + "content": "- Removing classifier selection from TAPER (⑪) has a big impact. However, comparing ⑪ to ① and ④, we validate that TAPER indeed learns personalized features." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 700, + 533, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 700, + 533, + 712 + ], + "spans": [ + { + "bbox": [ + 306, + 700, + 533, + 712 + ], + "type": "text", + "content": "- Complete TAPER (⑧) performs the best consistently." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "11824" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 51, + 86, + 284, + 159 + ], + "blocks": [ + { + "bbox": [ + 80, + 71, + 253, + 82 + ], + "lines": [ + { + "bbox": [ + 80, + 71, + 253, + 82 + ], + "spans": [ + { + "bbox": [ + 80, + 71, + 253, + 82 + ], + "type": "text", + "content": "Table 7. Free languages descriptions of classes." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 51, + 86, + 284, + 159 + ], + "lines": [ + { + "bbox": [ + 51, + 86, + 284, + 159 + ], + "spans": [ + { + "bbox": [ + 51, + 86, + 284, + 159 + ], + "type": "table", + "html": "
Users' free languages“Did you mean this?”
WordNet DefinitionsClass names
“a drinking glass with a base and stem”“goblet”
“live associated with sea anemones”“anemones fish”
“a tall elegant chest of drawers”“chiffonier”
", + "image_path": "2ed854a731e1c9a306226cd3c222137d57529367f911113eda48c81dee000893.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 175, + 276, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 175, + 276, + 190 + ], + "spans": [ + { + "bbox": [ + 47, + 175, + 276, + 190 + ], + "type": "text", + "content": "5. Applications, Discussions, and Conclusion" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "spans": [ + { + "bbox": [ + 47, + 197, + 287, + 232 + ], + "type": "text", + "content": "We consider practical use cases and discussions. We provide more studies and evaluations in the supplementary materials, including the effects of the number of bases/tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 239, + 236, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 236, + 251 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 236, + 251 + ], + "type": "text", + "content": "5.1. Class descriptions in free languages" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 257, + 287, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 257, + 287, + 424 + ], + "spans": [ + { + "bbox": [ + 46, + 257, + 287, + 424 + ], + "type": "text", + "content": "So far we use a pre-trained language encoder to embed the class names via prompts. Since the language encoder can handle general textual descriptions, this allows the users to enjoy more flexibility in their descriptions. To demonstrate such an advantage, we still train TAPER with class name descriptions, but in evaluation, we replace them with free languages that do not describe the class names explicitly for encoding, by using the definitions in the WordNet dictionary. See examples in Table 7. Perhaps surprisingly, TAPER is robust to such replacement. In ImageNet experiments, it achieves " + }, + { + "bbox": [ + 46, + 257, + 287, + 424 + ], + "type": "inline_equation", + "content": "94.2\\%" + }, + { + "bbox": [ + 46, + 257, + 287, + 424 + ], + "type": "text", + "content": " accuracy, slightly dropped from " + }, + { + "bbox": [ + 46, + 257, + 287, + 424 + ], + "type": "inline_equation", + "content": "95.8\\%" + }, + { + "bbox": [ + 46, + 257, + 287, + 424 + ], + "type": "text", + "content": " in Table 2. We also compared the mixers predicted from class names and free languages for each class — we see a high 0.92 cosine similarity; they select similar bases." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 430, + 274, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 430, + 274, + 443 + ], + "spans": [ + { + "bbox": [ + 47, + 430, + 274, + 443 + ], + "type": "text", + "content": "5.2. Self-improvement without task descriptions" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 449, + 287, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 449, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 449, + 287, + 544 + ], + "type": "text", + "content": "So far, we have assumed that the task description is provided for personalization. We show TAPER can provide some training-free personalization even without a description but given the unlabeled test data of the task. This is useful in some scenarios such as a smart surveillance camera keeps collecting images from the same environment and wants to refine its classifier for future predictions. Concretely, assuming we have trained the TAPER model," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 544, + 286, + 615 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 47, + 544, + 286, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 544, + 286, + 567 + ], + "spans": [ + { + "bbox": [ + 47, + 544, + 286, + 567 + ], + "type": "text", + "content": "1. Begin with a standard, non-personalized classifier (e.g., the stage 1 model) to predict a batch of test data." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 569, + 286, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 569, + 286, + 592 + ], + "spans": [ + { + "bbox": [ + 47, + 569, + 286, + 592 + ], + "type": "text", + "content": "2. Extract the top most common pseudo labels and use them to construct the task description." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 593, + 286, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 593, + 286, + 615 + ], + "spans": [ + { + "bbox": [ + 47, + 593, + 286, + 615 + ], + "type": "text", + "content": "3. Use the mixer predictor to combine a personalized model and repeat from (2) over time." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 616, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 616, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 47, + 616, + 287, + 652 + ], + "type": "text", + "content": "We demonstrate with a case of a 20-way task sampled from ImageNet in Figure 3: it can gradually estimate the task and improve along with seeing more test data." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 659, + 110, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 659, + 110, + 671 + ], + "spans": [ + { + "bbox": [ + 47, + 659, + 110, + 671 + ], + "type": "text", + "content": "5.3. Analysis" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": "Visualization. To understand if the bases and the mixers are learned to tailor different tasks, we visualize their mixers " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\alpha_{t}" + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": " and pairwise cosine similarity of the parameters of" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 370, + 71, + 485, + 144 + ], + "blocks": [ + { + "bbox": [ + 370, + 71, + 485, + 144 + ], + "lines": [ + { + "bbox": [ + 370, + 71, + 485, + 144 + ], + "spans": [ + { + "bbox": [ + 370, + 71, + 485, + 144 + ], + "type": "image", + "image_path": "00817fe94dfbaf7c563f2ef850a6b46c857ebc0e313cb0ecf373f05dcb056f30.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 148, + 545, + 182 + ], + "lines": [ + { + "bbox": [ + 305, + 148, + 545, + 182 + ], + "spans": [ + { + "bbox": [ + 305, + 148, + 545, + 182 + ], + "type": "text", + "content": "Figure 3. Self-improvement without task descriptions. In each step, we predict the top common classes in the test batch, retrieve the task embedding, and re-generate the personalized model." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 312, + 189, + 422, + 263 + ], + "blocks": [ + { + "bbox": [ + 312, + 189, + 422, + 263 + ], + "lines": [ + { + "bbox": [ + 312, + 189, + 422, + 263 + ], + "spans": [ + { + "bbox": [ + 312, + 189, + 422, + 263 + ], + "type": "image", + "image_path": "41c0d4eeec33957ce550994aafb0cfbcf7880edbf95510a47762d113061fcf34.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 270, + 403, + 281 + ], + "lines": [ + { + "bbox": [ + 331, + 270, + 403, + 281 + ], + "spans": [ + { + "bbox": [ + 331, + 270, + 403, + 281 + ], + "type": "text", + "content": "(a) t-SNE of mixers" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 430, + 188, + 541, + 269 + ], + "blocks": [ + { + "bbox": [ + 430, + 188, + 541, + 269 + ], + "lines": [ + { + "bbox": [ + 430, + 188, + 541, + 269 + ], + "spans": [ + { + "bbox": [ + 430, + 188, + 541, + 269 + ], + "type": "image", + "image_path": "ee7f8b370de220f54ef687c99b63684e0f77577fa4a227be90afe9cb35f8dd09.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 436, + 270, + 533, + 281 + ], + "lines": [ + { + "bbox": [ + 436, + 270, + 533, + 281 + ], + "spans": [ + { + "bbox": [ + 436, + 270, + 533, + 281 + ], + "type": "text", + "content": "(b) Bases cosine similarity" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 283, + 545, + 327 + ], + "lines": [ + { + "bbox": [ + 305, + 283, + 545, + 327 + ], + "spans": [ + { + "bbox": [ + 305, + 283, + 545, + 327 + ], + "type": "text", + "content": "Figure 4. Visualization. (a) Predicted mixers of tasks contain \"Bird\" and \"Cat\" in ImageNet (each task colored by the sum of sorted class IDs). (b) Similarity matrix of the basis parameters learned on DomainNet. One basis for each domain." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 340, + 545, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 340, + 545, + 364 + ], + "spans": [ + { + "bbox": [ + 305, + 340, + 545, + 364 + ], + "type": "text", + "content": "bases " + }, + { + "bbox": [ + 305, + 340, + 545, + 364 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 305, + 340, + 545, + 364 + ], + "type": "text", + "content": " in Figure 4. We see different tasks indeed leverage different specialized bases." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 384, + 545, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 384, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 304, + 384, + 545, + 612 + ], + "type": "text", + "content": "Limitations: the price of personalization. Given the text embedding of the class names, can TAPER extend to classes not in training? To construct the classifier for unseen classes, we follow the zero-shot learning literature [14] to learn an extra mapping from the image feature space to the text embedding space on seen classes in ImageNet-1K, and evaluate unseen classes from ImageNet-21K. We observe it can hardly have such free lunch — using 10 bases is worse than using one. We hypothesize two reasons: (1) plainly fitting seen classes (better) inevitably degrades unseen performance, consistent with previous studies [8, 24]. (2) The relationships between text and vision may not be learned yet in training or have changed significantly in new classes. For instance, it might learn both \"Crown\" and \"Daisy\" in training, but \"Crown daisy\" is visually different from them. This will be our future study. Practically, developers might consider expanding the training dataset, using text with detailed visual descriptions, or augmenting TAPER with advanced optimization that promotes generalization." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 622, + 380, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 622, + 380, + 633 + ], + "spans": [ + { + "bbox": [ + 306, + 622, + 380, + 633 + ], + "type": "text", + "content": "5.4. Conclusion" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 642, + 545, + 713 + ], + "type": "text", + "content": "We propose a new framework, train-once-for-all personalization, named TAPER, that is trained just once and can support many end-users given their task descriptions only. TAPER is simple and general. Personalization with TAPER is scalable, training-free, compact, and effective on various applications we evaluated." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11825" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 134 + ], + "type": "text", + "content": "[1] Armenii Ashukha, Alexander Lyzhov, Dmitry Molchanov, and Dmitry Vetrov. Pitfalls of in-domain uncertainty estimation and ensembling in deep learning. arXiv preprint arXiv:2002.06470, 2020. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 288, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 288, + 179 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 288, + 179 + ], + "type": "text", + "content": "[2] Mikhail Bilenko and Matthew Richardson. Predictive client-side profiles for personalized advertising. In Proceedings of the 17th ACM SIGKDD international conference on Knowledge discovery and data mining, pages 413-421, 2011. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 180, + 288, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 180, + 288, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 180, + 288, + 235 + ], + "type": "text", + "content": "[3] James Bradbury, Roy Frostig, Peter Hawkins, Matthew James Johnson, Chris Leary, Dougal Maclaurin, George Necula, Adam Paszke, Jake VanderPlas, Skye Wanderman-Milne, and Qiao Zhang. JAX: composable transformations of Python+NumPy programs, 2018. 5, 12" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 236, + 288, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 288, + 280 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 288, + 280 + ], + "type": "text", + "content": "[4] Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, and Song Han. Once for all: Train one network and specialize it for efficient deployment. In International Conference on Learning Representations, 2020. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 280, + 287, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 280, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 53, + 280, + 287, + 312 + ], + "type": "text", + "content": "[5] Soravit Changpinyo, Wei-Lun Chao, Boqing Gong, and Fei Sha. Synthesized classifiers for zero-shot learning. In CVPR, 2016. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 313, + 288, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 313, + 288, + 367 + ], + "spans": [ + { + "bbox": [ + 53, + 313, + 288, + 367 + ], + "type": "text", + "content": "[6] James Charles, Tomas Pfister, Derek Magee, David Hogg, and Andrew Zisserman. Personalizing human video pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3063-3072, 2016. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 369, + 288, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 369, + 288, + 423 + ], + "spans": [ + { + "bbox": [ + 53, + 369, + 288, + 423 + ], + "type": "text", + "content": "[7] Yinpeng Chen, Xiyang Dai, Mengchen Liu, Dongdong Chen, Lu Yuan, and Zicheng Liu. Dynamic convolution: Attention over convolution kernels. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 424, + 288, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 424, + 288, + 479 + ], + "spans": [ + { + "bbox": [ + 53, + 424, + 288, + 479 + ], + "type": "text", + "content": "[8] Yinbo Chen, Zhuang Liu, Huijuan Xu, Trevor Darrell, and Xiaolong Wang. Meta-baseline: Exploring simple meta-learning for few-shot learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9062-9071, 2021. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 480, + 288, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 480, + 288, + 524 + ], + "spans": [ + { + "bbox": [ + 53, + 480, + 288, + 524 + ], + "type": "text", + "content": "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 1, 2, 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 525, + 287, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 525, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 48, + 525, + 287, + 590 + ], + "type": "text", + "content": "[10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 590, + 288, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 288, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 288, + 624 + ], + "type": "text", + "content": "[11] Alexey Dosovitskiy and Josip Djolonga. You only train once: Loss-conditional training of deep networks. In International Conference on Learning Representations, 2020. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 624, + 288, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 646 + ], + "type": "text", + "content": "[12] An Evgeniou and Massimiliano Pontil. Multi-task feature learning. In NeurIPS, 2007. 2, 3, 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 647, + 288, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 689 + ], + "type": "text", + "content": "[13] Chelsea Finn, Pieter Abbeel, and Sergey Levine. Model-agnostic meta-learning for fast adaptation of deep networks. In International conference on machine learning, pages 1126–1135. PMLR, 2017. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 690, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 690, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 690, + 288, + 713 + ], + "type": "text", + "content": "[14] Andrea Frome, Greg S Corrado, Jon Shlens, Samy Bengio, Jeff Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. De" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "vise: A deep visual-semantic embedding model. Advances in neural information processing systems, 26, 2013. 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 96, + 545, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 129 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 129 + ], + "type": "text", + "content": "[15] Jeremy Goecks, Vahid Jalili, Laura M Heiser, and Joe W Gray. How machine learning will transform biomedicine. Cell, 181(1):92-101, 2020. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 129, + 545, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 152 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 152 + ], + "type": "text", + "content": "[16] David Ha, Andrew Dai, and Quoc V Le. Hypernetworks. arXiv preprint arXiv:1609.09106, 2016. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 153, + 545, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 153, + 545, + 186 + ], + "spans": [ + { + "bbox": [ + 307, + 153, + 545, + 186 + ], + "type": "text", + "content": "[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 1, 5, 11, 12" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 186, + 545, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 231 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 231 + ], + "type": "text", + "content": "[18] Timothy Hospedales, Antreas Antoniou, Paul Micaelli, and Amos Storkey. Meta-learning in neural networks: A survey. IEEE transactions on pattern analysis and machine intelligence, 44(9):5149-5169, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 232, + 545, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 232, + 545, + 276 + ], + "spans": [ + { + "bbox": [ + 307, + 232, + 545, + 276 + ], + "type": "text", + "content": "[19] Gao Huang, Yixuan Li, Geoff Pleiss, Zhuang Liu, John E. Hopcroft, and Kilian Q. Weinberger. Snapshot ensembles: Train 1, get m for free. In International Conference on Learning Representations, 2017. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 277, + 545, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 277, + 545, + 322 + ], + "spans": [ + { + "bbox": [ + 307, + 277, + 545, + 322 + ], + "type": "text", + "content": "[20] Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry Vetrov, and Andrew Gordon Wilson. Averaging weights leads to wider optima and better generalization. arXiv preprint arXiv:1803.05407, 2018. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 323, + 545, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 323, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 307, + 323, + 545, + 365 + ], + "type": "text", + "content": "[21] Yihan Jiang, Jakub Konečný, Keith Rush, and Sreeram Kannan. Improving federated learning personalization via model agnostic meta learning. arXiv preprint arXiv:1909.12488, 2019. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 366, + 545, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 366, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 307, + 366, + 545, + 434 + ], + "type": "text", + "content": "[22] Armand Joulin, Laurens van der Maaten, Allan Jabri, and Nicolas Vasilache. Learning visual features from large weakly supervised data. In Bastian Leibe, Jiri Matas, Nicu Sebe, and Max Welling, editors, Computer Vision – ECCV 2016, pages 67–84, Cham, 2016. Springer International Publishing. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 434, + 545, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 434, + 545, + 490 + ], + "spans": [ + { + "bbox": [ + 307, + 434, + 545, + 490 + ], + "type": "text", + "content": "[23] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In F. Pereira, C.J. Burges, L. Bottou, and K.Q. Weinberger, editors, Advances in Neural Information Processing Systems, volume 25. Curran Associates, Inc., 2012. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 491, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 491, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 491, + 545, + 544 + ], + "type": "text", + "content": "[24] Ananya Kumar, Aditi Raghunathan, Robbie Matthew Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. In International Conference on Learning Representations, 2022. 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 546, + 545, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 546, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 307, + 546, + 545, + 601 + ], + "type": "text", + "content": "[25] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 1" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 601, + 545, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 601, + 545, + 646 + ], + "spans": [ + { + "bbox": [ + 307, + 601, + 545, + 646 + ], + "type": "text", + "content": "[26] Ekaterina Lobacheva, Nadezhda Chirkova, Maxim Kodryan, and Dmitry P Vetrov. On power laws in deep ensembles. Advances In Neural Information Processing Systems, 33:2375-2385, 2020. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 647, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 713 + ], + "type": "text", + "content": "[27] Wei Lou, Lei Xun, Amin Sabet, Jia Bi, Jonathon Hare, and Geoff V Merrett. Dynamic-ofa: Runtime dnn architecture switching for performance scaling on heterogeneous embedded platforms. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3110-3118, 2021. 3" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "11826" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[28] Wesley J Maddox, Pavel Izmailov, Timur Garipov, Dmitry P Vetrov, and Andrew Gordon Wilson. A simple baseline for bayesian uncertainty in deep learning. Advances in Neural Information Processing Systems, 32, 2019. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 287, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 287, + 161 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 287, + 161 + ], + "type": "text", + "content": "[29] Yishay Mansour, Mehryar Mohri, Jae Ro, and Ananda Theertha Suresh. Three approaches for personalization with applications to federated learning. arXiv preprint arXiv:2002.10619, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 162, + 287, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 162, + 287, + 183 + ], + "spans": [ + { + "bbox": [ + 48, + 162, + 287, + 183 + ], + "type": "text", + "content": "[30] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41, 1995. 6, 11" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 184, + 287, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 184, + 287, + 237 + ], + "spans": [ + { + "bbox": [ + 48, + 184, + 287, + 237 + ], + "type": "text", + "content": "[31] Xingchao Peng, Qinxun Bai, Xide Xia, Zijun Huang, Kate Saenko, and Bo Wang. Moment matching for multi-source domain adaptation. In Proceedings of the IEEE International Conference on Computer Vision, pages 1406-1415, 2019. 2, 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 239, + 287, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 239, + 287, + 304 + ], + "spans": [ + { + "bbox": [ + 48, + 239, + 287, + 304 + ], + "type": "text", + "content": "[32] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 5, 11" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 305, + 287, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 305, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 48, + 305, + 287, + 403 + ], + "type": "text", + "content": "[33] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 8748-8763. PMLR, 18-24 Jul 2021. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 404, + 287, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 404, + 287, + 459 + ], + "spans": [ + { + "bbox": [ + 48, + 404, + 287, + 459 + ], + "type": "text", + "content": "[34] Carlos Riquelme, Joan Puigcerver, Basil Mustafa, Maxim Neumann, Rodolphe Jenatton, André Susano Pinto, Daniel Keysers, and Neil Houlsby. Scaling vision with sparse mixture of experts. Advances in Neural Information Processing Systems, 34:8583-8595, 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 460, + 287, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 460, + 287, + 513 + ], + "spans": [ + { + "bbox": [ + 48, + 460, + 287, + 513 + ], + "type": "text", + "content": "[35] Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. arXiv preprint arXiv:1701.06538, 2017. 2, 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "type": "text", + "content": "[36] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In International Conference on Learning Representations, 2015. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 559, + 287, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 287, + 592 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 287, + 592 + ], + "type": "text", + "content": "[37] Virginia Smith, Chao-Kai Chiang, Maziar Sanjabi, and Ameet S Talwalkar. Federated multi-task learning. Advances in neural information processing systems, 30, 2017. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 593, + 287, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 593, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 593, + 287, + 646 + ], + "type": "text", + "content": "[38] Yonglong Tian, Yue Wang, Dilip Krishnan, Joshua B Tenenbaum, and Phillip Isola. Rethinking few-shot image classification: a good embedding is all you need? In European Conference on Computer Vision, pages 266-282. Springer, 2020. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "text", + "content": "[39] Grant Van Horn, Elijah Cole, Sara Beery, Kimberly Wilber, Serge Belongie, and Oisin Mac Aodha. Benchmarking representation learning for natural world image collections. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12884-12893, 2021. 2, 5" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 384 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "text", + "content": "[40] Jeremy C Weiss, Sriraam Natarajan, Peggy L Peissig, Catherine A McCarty, and David Page. Machine learning for personalized medicine: predicting primary myocardial infarction from electronic health records. *Ai Magazine*, 33(4):33-33, 2012. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "type": "text", + "content": "[41] Brandon Yang, Gabriel Bender, Quoc V Le, and Jiquan Ngiam. Condconv: Conditionally parameterized convolutions for efficient inference. Advances in Neural Information Processing Systems, 32, 2019. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 174, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 174, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 307, + 174, + 545, + 228 + ], + "type": "text", + "content": "[42] Han-Jia Ye, Hexiang Hu, De-Chuan Zhan, and Fei Sha. Few-shot learning via embedding adaptation with set-to-set functions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8808-8817, 2020. 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 229, + 545, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 229, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 229, + 545, + 262 + ], + "type": "text", + "content": "[43] Dong Yu and Jinyu Li. Recent progresses in deep learning based acoustic models. IEEE/CAA Journal of automatica sinica, 4(3):396-409, 2017. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 263, + 545, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 263, + 545, + 328 + ], + "spans": [ + { + "bbox": [ + 308, + 263, + 545, + 328 + ], + "type": "text", + "content": "[44] Mingda Zhang, Chun-Te Chu, Andrey Zhmoginov, Andrew Howard, Brendan Jou, Yukun Zhu, Li Zhang, Rebecca Hwa, and Adriana Kovashka. Basisnet: Two-stage model synthesis for efficient inference. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3081–3090, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 331, + 545, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 331, + 545, + 384 + ], + "spans": [ + { + "bbox": [ + 308, + 331, + 545, + 384 + ], + "type": "text", + "content": "[45] Andrey Zhmoginov, Mark Sandler, and Maksym Vlademyrov. Hypertransformer: Model generation for supervised and semi-supervised few-shot learning. In International Conference on Machine Learning, pages 27075-27098. PMLR, 2022. 2" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11827" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file