diff --git "a/2025/(Almost) Free Modality Stitching of Foundation Models/layout.json" "b/2025/(Almost) Free Modality Stitching of Foundation Models/layout.json" new file mode 100644--- /dev/null +++ "b/2025/(Almost) Free Modality Stitching of Foundation Models/layout.json" @@ -0,0 +1,12846 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 124, + 76, + 468, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 76, + 468, + 94 + ], + "spans": [ + { + "bbox": [ + 124, + 76, + 468, + 94 + ], + "type": "text", + "content": "(Almost) Free Modality Stitching of Foundation Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 108, + 121, + 489, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 121, + 489, + 136 + ], + "spans": [ + { + "bbox": [ + 108, + 121, + 489, + 136 + ], + "type": "text", + "content": "Jaisidh Singh" + }, + { + "bbox": [ + 108, + 121, + 489, + 136 + ], + "type": "inline_equation", + "content": "^{1,2,4}" + }, + { + "bbox": [ + 108, + 121, + 489, + 136 + ], + "type": "text", + "content": ", Diganta Misra" + }, + { + "bbox": [ + 108, + 121, + 489, + 136 + ], + "type": "inline_equation", + "content": "^{3,4}" + }, + { + "bbox": [ + 108, + 121, + 489, + 136 + ], + "type": "text", + "content": ", Boris Knyazev" + }, + { + "bbox": [ + 108, + 121, + 489, + 136 + ], + "type": "inline_equation", + "content": "^{5,6}" + }, + { + "bbox": [ + 108, + 121, + 489, + 136 + ], + "type": "text", + "content": ", Antonio Orvieto" + }, + { + "bbox": [ + 108, + 121, + 489, + 136 + ], + "type": "inline_equation", + "content": "^{3,4,7}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 136, + 524, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 136, + 524, + 164 + ], + "spans": [ + { + "bbox": [ + 70, + 136, + 524, + 164 + ], + "type": "text", + "content": "1University of Tübingen, 2Zuse School ELIZA 3ELLIS Institute Tübingen, 4MPI-IS Tübingen, 5Samsung - SAIT AI Lab Montréal, 6Université de Montréal, 7Tübingen AI Center" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 84, + 244, + 274, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 244, + 274, + 592 + ], + "spans": [ + { + "bbox": [ + 84, + 244, + 274, + 592 + ], + "type": "text", + "content": "Foundation multi-modal models are often designed by stitching of multiple existing pretrained uni-modal models: for example, an image classifier with a text model. This stitching process is performed by training a connector module that aims to align the representation spaces of these uni-modal models towards a multi-modal objective. However, given the complexity of training such connectors on large scale web-based datasets coupled with the ever-increasing number of available pretrained uni-modal models, the task of uni-modal models selection and subsequent connector module training becomes computationally demanding. To address this under-studied critical problem, we propose Hypernetwork Model Alignment (HYMA), a novel all-in-one solution for optimal uni-modal model selection and connector training by leveraging hypernetworks. Specifically, our framework utilizes the parameter prediction capability of a hypernetwork to obtain jointly trained connector modules for " + }, + { + "bbox": [ + 84, + 244, + 274, + 592 + ], + "type": "inline_equation", + "content": "N \\times M" + }, + { + "bbox": [ + 84, + 244, + 274, + 592 + ], + "type": "text", + "content": " combinations of uni-modal models. In our experiments, HYMA reduces the cost of searching for the best performing uni-modal model pair by " + }, + { + "bbox": [ + 84, + 244, + 274, + 592 + ], + "type": "inline_equation", + "content": "10 \\times" + }, + { + "bbox": [ + 84, + 244, + 274, + 592 + ], + "type": "text", + "content": ", while matching the ranking and trained connector performance obtained via grid search across a suite of diverse multi-modal benchmarks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 603, + 155, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 603, + 155, + 617 + ], + "spans": [ + { + "bbox": [ + 68, + 603, + 155, + 617 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 626, + 292, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 626, + 292, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 626, + 292, + 775 + ], + "type": "text", + "content": "Multi-modal foundation models have emerged as a new frontier in the Artificial Intelligence (AI) landscape. Fueled by the increasing need for considering inter-dependency of multiple data modalities in modern tasks, multi-modal foundation models often leverage modality-specific (uni-modal) models as sub-components, which are stitched together via a connector module. A prominent class of such models is Vision-Language Models (VLMs) (Radford et al., 2021; Singh et al., 2024; Li et al., 2022; Singh et al., 2022), which comprise image and text" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 303, + 217, + 524, + 332 + ], + "blocks": [ + { + "bbox": [ + 303, + 217, + 524, + 332 + ], + "lines": [ + { + "bbox": [ + 303, + 217, + 524, + 332 + ], + "spans": [ + { + "bbox": [ + 303, + 217, + 524, + 332 + ], + "type": "image", + "image_path": "2c8b5411126f966fd294fc8227ff26174841ea3d69be3a87dfc929c371441fea.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 340, + 526, + 460 + ], + "lines": [ + { + "bbox": [ + 302, + 340, + 526, + 460 + ], + "spans": [ + { + "bbox": [ + 302, + 340, + 526, + 460 + ], + "type": "text", + "content": "Figure 1: We train connectors between pretrained uni-modal models to show that uni-modal model performance is not predictive of multi-modal performance obtained by stitching. Image encoder performance refers to top-1 ImageNet-1K accuracy, text encoder performance refers to semantic search performance across 14 datasets (Reimers and Gurevych, 2019). Multi-modal scores refers to ImageNet-1K top-1 accuracy (classification by matching images to prompts such as \"this is a photo of a {class}\").1" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 472, + 524, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 472, + 524, + 498 + ], + "spans": [ + { + "bbox": [ + 302, + 472, + 524, + 498 + ], + "type": "text", + "content": "encoders that embed image and text concepts into a common contrastively learnt latent space." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 499, + 525, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 499, + 525, + 647 + ], + "spans": [ + { + "bbox": [ + 302, + 499, + 525, + 647 + ], + "type": "text", + "content": "Connector modules powering VLMs are often constructed as an " + }, + { + "bbox": [ + 302, + 499, + 525, + 647 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 302, + 499, + 525, + 647 + ], + "type": "text", + "content": "-layer multi-layer perceptron (MLP) (Liu et al., 2024), or in some cases even as simple as a linear layer (Merullo et al., 2022), with the purpose of stitching modality-specific models. While some exceptions do arise where these modules are extensively engineered transformer-like architectures (Li et al., 2023), the vast majority consensus on the design of such connector modules has been limited to MLPs (Zhu et al., 2025) due to their efficiency." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 648, + 526, + 756 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 648, + 526, + 756 + ], + "spans": [ + { + "bbox": [ + 302, + 648, + 526, + 756 + ], + "type": "text", + "content": "While training connector modules for a pair of predetermined uni-modal models is feasible, the picture becomes more complex when considering multiple uni-modal options and aiming to optimize for downstream performance after stitching. Indeed, it is often not the case (see Figure 1) that simply choosing to align best-performing uni-modal models leads to the best multi-modal performance." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 315, + 762, + 515, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 762, + 515, + 775 + ], + "spans": [ + { + "bbox": [ + 315, + 762, + 515, + 775 + ], + "type": "text", + "content": "1All model abbreviations can be found in Appendix B." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "19785" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 91, + 795, + 501, + 818 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 795, + 501, + 818 + ], + "spans": [ + { + "bbox": [ + 91, + 795, + 501, + 818 + ], + "type": "text", + "content": "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing, pages 19785-19801 November 4-9, 2025 ©2025 Association for Computational Linguistics" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 71, + 526, + 249 + ], + "blocks": [ + { + "bbox": [ + 70, + 71, + 526, + 249 + ], + "lines": [ + { + "bbox": [ + 70, + 71, + 526, + 249 + ], + "spans": [ + { + "bbox": [ + 70, + 71, + 526, + 249 + ], + "type": "image", + "image_path": "cf7c20dfbe3e8f6b82287bd1a0b177de8cb7207efdf3de85d2235e3dd84da4bd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 258, + 525, + 285 + ], + "lines": [ + { + "bbox": [ + 67, + 258, + 525, + 285 + ], + "spans": [ + { + "bbox": [ + 67, + 258, + 525, + 285 + ], + "type": "text", + "content": "Figure 2: Given multiple options for uni-modal models, pair-wise grid search can be an expensive way to determine the best multi-modal combination. Alternatively, HYMA formulates search as a predictive or generative process." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 304, + 292, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 304, + 292, + 454 + ], + "spans": [ + { + "bbox": [ + 67, + 304, + 292, + 454 + ], + "type": "text", + "content": "This trend is further illustrated in Table 1, where uni-modal model parametric capacity fails to serve as a reliable predictor of multi-modal performance. Consequently, the cost of optimal stitching can grow quadratically with the number of available options on both ends. In addition, the availability of extremely large web-scale pretraining datasets, consisting of samples in the order of billions (Schuhmann et al., 2022; Changpinyo et al., 2021; Desai et al., 2021), constitutes a blocker for proper ablation on such design choices." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 70, + 462, + 290, + 506 + ], + "blocks": [ + { + "bbox": [ + 70, + 462, + 290, + 506 + ], + "lines": [ + { + "bbox": [ + 70, + 462, + 290, + 506 + ], + "spans": [ + { + "bbox": [ + 70, + 462, + 290, + 506 + ], + "type": "table", + "html": "
I (#Params)T (#Params)Total #ParamsPerf.
EVA2-L (305M)roberta-L (355M)660M + c26.85
DeiT3-L (304M)mpnet-B (109M)413M + c42.63
", + "image_path": "2114cef9e1d61f762cce81d0a0cb7ef446c54d452b8ff413a4ad4c6e48b05b12.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 513, + 291, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 513, + 291, + 658 + ], + "spans": [ + { + "bbox": [ + 67, + 513, + 291, + 658 + ], + "type": "text", + "content": "Table 1: Parametric capacity of unimodal models is not a reliable indicator of multimodal performance. On the task of multi-modal image classification using the ImageNet-1k dataset, we observe that stitching the highest-capacity models: EVA-2 Large (305M) for the image modality (I) and RoBERTa Large (355M) for the text modality (T), totaling " + }, + { + "bbox": [ + 67, + 513, + 291, + 658 + ], + "type": "inline_equation", + "content": "660\\mathrm{M} + c" + }, + { + "bbox": [ + 67, + 513, + 291, + 658 + ], + "type": "text", + "content": " parameters—yields significantly lower performance than a smaller stitched pair: DeiT-3 Large (I) (304M) and MPNet-Base (T) (109M), totaling just " + }, + { + "bbox": [ + 67, + 513, + 291, + 658 + ], + "type": "inline_equation", + "content": "413\\mathrm{M} + c" + }, + { + "bbox": [ + 67, + 513, + 291, + 658 + ], + "type": "text", + "content": " parameters. " + }, + { + "bbox": [ + 67, + 513, + 291, + 658 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 513, + 291, + 658 + ], + "type": "text", + "content": " denotes the parameters contributed by 1-hidden layer MLP connector and Perf. denotes the Top-1 accuracy metric." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 673, + 290, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 673, + 290, + 714 + ], + "spans": [ + { + "bbox": [ + 67, + 673, + 290, + 714 + ], + "type": "text", + "content": "We highlight and define the problem, which we term Multi-modal Optimal Pairing and Stitching (M-OPS), as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 80, + 721, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 721, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 80, + 721, + 291, + 775 + ], + "type": "text", + "content": "- Pairing: Given a set of " + }, + { + "bbox": [ + 80, + 721, + 291, + 775 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 80, + 721, + 291, + 775 + ], + "type": "text", + "content": " models in modality 1 (e.g., vision) and " + }, + { + "bbox": [ + 80, + 721, + 291, + 775 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 80, + 721, + 291, + 775 + ], + "type": "text", + "content": " models in modality 2 (e.g., text), provide the optimal (best performing) combination pair " + }, + { + "bbox": [ + 80, + 721, + 291, + 775 + ], + "type": "inline_equation", + "content": "(n, m; n \\in N \\mid m \\in" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 324, + 304, + 526, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 304, + 526, + 346 + ], + "spans": [ + { + "bbox": [ + 324, + 304, + 526, + 346 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 324, + 304, + 526, + 346 + ], + "type": "text", + "content": " ) to construct a multi-modal model for a target task and/or under target constraints (e.g., parametric size, embedding dimensions)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 358, + 525, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 358, + 525, + 411 + ], + "spans": [ + { + "bbox": [ + 316, + 358, + 525, + 411 + ], + "type": "text", + "content": "- Stitching: For the selected uni-modal models " + }, + { + "bbox": [ + 316, + 358, + 525, + 411 + ], + "type": "inline_equation", + "content": "(n,m)" + }, + { + "bbox": [ + 316, + 358, + 525, + 411 + ], + "type": "text", + "content": ", obtain the optimal trained connector " + }, + { + "bbox": [ + 316, + 358, + 525, + 411 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 316, + 358, + 525, + 411 + ], + "type": "text", + "content": " that stitches them to construct the target multi-modal model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 425, + 526, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 425, + 526, + 547 + ], + "spans": [ + { + "bbox": [ + 302, + 425, + 526, + 547 + ], + "type": "text", + "content": "Due to the infeasibility of addressing the pairing sub-problem of M-OPS via a grid-search approach for a large " + }, + { + "bbox": [ + 302, + 425, + 526, + 547 + ], + "type": "inline_equation", + "content": "N \\times M" + }, + { + "bbox": [ + 302, + 425, + 526, + 547 + ], + "type": "text", + "content": " pair, we propose a novel alternative approach to tackle both the pairing and stitching steps in a single unified manner that utilizes a HyperNetwork (Ha et al., 2016). The key idea behind our approach is that stitching similar models shares latent semantics, which can be captured by jointly training a network to generate connectors." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "spans": [ + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "text", + "content": "We present Hypernetwork Model Alignment " + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "inline_equation", + "content": "(\\mathbf{HYMA})^2" + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "text", + "content": ", a method that, given " + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "text", + "content": " modality 1 (e.g., image) and " + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "text", + "content": " modality 2 (e.g., text) models, leverages a hypernetwork (Ha et al., 2016) that jointly learns to generate connectors for all possible " + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "inline_equation", + "content": "N\\times M" + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "text", + "content": " combinations. Our approach serves both as an indicator for optimal model pair configurations and as a trainer that produces stitched multi-modal models performing on par with the best stitched model pair obtained via grid search. In our experiments, where " + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "inline_equation", + "content": "N\\times M" + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "text", + "content": " can be as high as 27 (discussed in Section 5), our method enables an efficiency gain of " + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "inline_equation", + "content": "10\\times" + }, + { + "bbox": [ + 302, + 548, + 525, + 737 + ], + "type": "text", + "content": " in obtaining the best stitched model pair compared to grid search." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 738, + 500, + 751 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 738, + 500, + 751 + ], + "spans": [ + { + "bbox": [ + 314, + 738, + 500, + 751 + ], + "type": "text", + "content": "We highlight our contributions as follows:" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 315, + 762, + 484, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 762, + 484, + 774 + ], + "spans": [ + { + "bbox": [ + 315, + 762, + 484, + 774 + ], + "type": "text", + "content": "2https://github.com/jaisidhsingh/hyma" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19786" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 71, + 291, + 267 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 77, + 71, + 291, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 71, + 291, + 153 + ], + "spans": [ + { + "bbox": [ + 77, + 71, + 291, + 153 + ], + "type": "text", + "content": "1. We propose Hypernetwork Model Alignment (HYMA), a hypernetwork-based approach for obtaining strong uni-modal model pairs that perform on par with the best stitched model pair obtained via grid search at an order-of-magnitude lower computational cost." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 163, + 290, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 163, + 290, + 216 + ], + "spans": [ + { + "bbox": [ + 76, + 163, + 290, + 216 + ], + "type": "text", + "content": "2. Our proposed approach HYMA is, to the best of our knowledge, the first to demonstrate the effectiveness of hypernetworks for solving the M-OPS problem defined above." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 227, + 291, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 227, + 291, + 267 + ], + "spans": [ + { + "bbox": [ + 76, + 227, + 291, + 267 + ], + "type": "text", + "content": "3. We empirically demonstrate the performance and efficiency of HYMA on VLMs across various multi-modal benchmarks." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 279, + 152, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 279, + 152, + 293 + ], + "spans": [ + { + "bbox": [ + 68, + 279, + 152, + 293 + ], + "type": "text", + "content": "2 Background" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 301, + 290, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 301, + 290, + 383 + ], + "spans": [ + { + "bbox": [ + 67, + 301, + 290, + 383 + ], + "type": "text", + "content": "In this section, we present the necessary preliminaries for the M-OPS problem, along with the general training paradigm of hypernetworks. These formal definitions establish the foundation for our proposed method, HYMA, which we introduce in the following section." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "spans": [ + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "text", + "content": "Definition 1 (Hypernetworks for Parameter Prediction). A hypernetwork (Ha et al., 2016) is a neural network " + }, + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "inline_equation", + "content": "H_{\\phi}" + }, + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "text", + "content": " parameterized by " + }, + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "text", + "content": ", designed to predict the parameters " + }, + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "text", + "content": " of a target network " + }, + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "text", + "content": " based on a conditioning input " + }, + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 67, + 387, + 291, + 468 + ], + "type": "text", + "content": ". The parameter generation process is defined as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 152, + 481, + 206, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 481, + 206, + 495 + ], + "spans": [ + { + "bbox": [ + 152, + 481, + 206, + 495 + ], + "type": "interline_equation", + "content": "H _ {\\phi} (\\mathbf {c}) = \\theta .", + "image_path": "dfb39e6d88a31f2d386eec0e98f44bd49c058e619d00c407fd8170aabe00f8c2.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 507, + 291, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 507, + 291, + 574 + ], + "spans": [ + { + "bbox": [ + 67, + 507, + 291, + 574 + ], + "type": "text", + "content": "The parameters " + }, + { + "bbox": [ + 67, + 507, + 291, + 574 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 67, + 507, + 291, + 574 + ], + "type": "text", + "content": " of the hypernetwork are optimized indirectly via the performance of the generated network " + }, + { + "bbox": [ + 67, + 507, + 291, + 574 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 67, + 507, + 291, + 574 + ], + "type": "text", + "content": " on a downstream task. Given a task-specific loss " + }, + { + "bbox": [ + 67, + 507, + 291, + 574 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{task}" + }, + { + "bbox": [ + 67, + 507, + 291, + 574 + ], + "type": "text", + "content": " evaluated on corresponding data, the optimization objective becomes:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 586, + 242, + 606 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 586, + 242, + 606 + ], + "spans": [ + { + "bbox": [ + 115, + 586, + 242, + 606 + ], + "type": "interline_equation", + "content": "\\phi^ {*} = \\arg \\min _ {\\phi} \\mathcal {L} _ {t a s k} (f _ {H _ {\\phi} (\\mathbf {c})}).", + "image_path": "56c69c2a039e4e3d8a237cc7145993ee989d8d3d36084be47a23302d2a7c01b4.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "spans": [ + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "text", + "content": "The trained hypernetwork " + }, + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "inline_equation", + "content": "H_{\\phi^*}" + }, + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "text", + "content": " can then be used to generate task-adapted parameters " + }, + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "text", + "content": " given new conditioning inputs. Optimizing " + }, + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "text", + "content": " rather than " + }, + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 67, + 616, + 291, + 698 + ], + "type": "text", + "content": " directly can offer advantages in terms of training dynamics, capacity control, and generalization (Chauhan et al., 2024)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 703, + 291, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 703, + 291, + 743 + ], + "spans": [ + { + "bbox": [ + 67, + 703, + 291, + 743 + ], + "type": "text", + "content": "For simplicity, assume encoders producing sequences of " + }, + { + "bbox": [ + 67, + 703, + 291, + 743 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 67, + 703, + 291, + 743 + ], + "type": "text", + "content": " features (e.g., number of patches or tokens) living in a " + }, + { + "bbox": [ + 67, + 703, + 291, + 743 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 67, + 703, + 291, + 743 + ], + "type": "text", + "content": "-dimensional space." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "content": "Definition 2 (Connector-based multi-modal stitching). Let " + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{A}:\\mathcal{X}_A\\to \\mathbb{R}^{D_A}" + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{B}:\\mathcal{X}_B\\to \\mathbb{R}^{D_B}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "spans": [ + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "text", + "content": "be pretrained uni-modal encoders for two different modalities with input spaces " + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_A" + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_B" + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "text", + "content": ", respectively. The goal is to construct a multi-modal model by learning a connector function " + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "inline_equation", + "content": "f_{\\theta}:\\mathbb{R}^{D_A}\\to \\mathbb{R}^{D_B}" + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "text", + "content": " that stitches the output of " + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "text", + "content": " to the representation space of " + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "text", + "content": ": given input pairs " + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "inline_equation", + "content": "(\\mathbf{u},\\mathbf{v})\\in \\mathcal{X}_A\\times \\mathcal{X}_B" + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "text", + "content": ", the connector stitches the modality-" + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 301, + 71, + 527, + 179 + ], + "type": "text", + "content": " features" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 369, + 185, + 458, + 200 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 185, + 458, + 200 + ], + "spans": [ + { + "bbox": [ + 369, + 185, + 458, + 200 + ], + "type": "interline_equation", + "content": "\\mathbf {x} ^ {a} = \\mathcal {A} (\\mathbf {u}) \\in \\mathbb {R} ^ {D _ {A}}", + "image_path": "556108f73f926bb9a2410b8bf8fe9d93746528e4fb5905e69115d0f4fcbcbd3b.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 206, + 411, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 206, + 411, + 219 + ], + "spans": [ + { + "bbox": [ + 302, + 206, + 411, + 219 + ], + "type": "text", + "content": "to modality- " + }, + { + "bbox": [ + 302, + 206, + 411, + 219 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 302, + 206, + 411, + 219 + ], + "type": "text", + "content": " space via" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 366, + 225, + 461, + 240 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 366, + 225, + 461, + 240 + ], + "spans": [ + { + "bbox": [ + 366, + 225, + 461, + 240 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {x}} ^ {a} = f _ {\\theta} \\bigl (\\mathbf {x} ^ {a} \\bigr) \\in \\mathbb {R} ^ {D _ {B}}", + "image_path": "39041d1be0f3ae9ce9f3ac133870c15ef2b2ccb73528b45d068aa4fdb627de61.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "spans": [ + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "text", + "content": "The stitched representation " + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}^a" + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "text", + "content": " is then combined with " + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^b = \\mathcal{B}(\\mathbf{v})" + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "text", + "content": " to construct a joint multi-modal representation. The connector parameters " + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "text", + "content": " are optimized while keeping " + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "text", + "content": " frozen. The training objective follows contrastive stitching, that uses a similarity function " + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "inline_equation", + "content": "\\sin (\\cdot ,\\cdot)" + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "text", + "content": " and temperature " + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 302, + 247, + 527, + 354 + ], + "type": "text", + "content": " to train the connector on the InfoNCE (Oord et al., 2018) loss (quadratic):" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 309, + 360, + 518, + 393 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 360, + 518, + 393 + ], + "spans": [ + { + "bbox": [ + 309, + 360, + 518, + 393 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {c o n t r a s t i v e}} (\\theta) = - \\log \\frac {\\exp (s i m (\\tilde {\\mathbf {x}} ^ {a} , \\mathbf {x} ^ {b}) / \\tau)}{\\sum_ {j} \\exp (s i m (\\tilde {\\mathbf {x}} ^ {a} , \\mathbf {x} _ {j} ^ {b}) / \\tau)}", + "image_path": "d007fa81ed12c53f33bd7df3eb7f907a568b6bb95f5375cec8c83e8ec26dfaef.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 396, + 392, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 396, + 392, + 411 + ], + "spans": [ + { + "bbox": [ + 302, + 396, + 392, + 411 + ], + "type": "text", + "content": "3 Methodology" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 417, + 429, + 429 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 417, + 429, + 429 + ], + "spans": [ + { + "bbox": [ + 302, + 417, + 429, + 429 + ], + "type": "text", + "content": "3.1 Problem formulation" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "spans": [ + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "text", + "content": "We aim to jointly learn " + }, + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "inline_equation", + "content": "N\\times M" + }, + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "text", + "content": " connectors, where each connector is specified to the hypernetwork via a conditional input " + }, + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "inline_equation", + "content": "\\mathbf{c}^k" + }, + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "text", + "content": ". More formally, for the " + }, + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "inline_equation", + "content": "k^{th}" + }, + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "text", + "content": " model combination, the hypernetwork generates the parameters as " + }, + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "inline_equation", + "content": "H_{\\phi}(\\mathbf{c}^{k})" + }, + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "text", + "content": ". The resulting connector " + }, + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "inline_equation", + "content": "f_{H_{\\phi}(\\mathbf{c}^{k})}" + }, + { + "bbox": [ + 302, + 434, + 525, + 542 + ], + "type": "text", + "content": " is then used to compute a task-specific loss. The overall training loss is computed by averaging over all combinations:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 334, + 546, + 525, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 546, + 525, + 582 + ], + "spans": [ + { + "bbox": [ + 334, + 546, + 525, + 582 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {H Y M A}} = \\frac {1}{N M} \\sum_ {k = 1} ^ {N M} \\mathcal {L} _ {\\text {t a s k}} \\left(f _ {H _ {\\phi} \\left(\\mathbf {c} ^ {k}\\right)}\\right). \\tag {1}", + "image_path": "e406fc11d11f145dea8f3e987ec9b7f87c531ab47bf6dbd402ba558b3bb0e516.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 302, + 587, + 526, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 587, + 526, + 682 + ], + "spans": [ + { + "bbox": [ + 302, + 587, + 526, + 682 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 302, + 587, + 526, + 682 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{task}}" + }, + { + "bbox": [ + 302, + 587, + 526, + 682 + ], + "type": "text", + "content": " corresponds to a contrastive InfoNCE loss (for retrieval-style objectives like that in CLIP (Radford et al., 2021)). The trained hypernetwork is denoted by " + }, + { + "bbox": [ + 302, + 587, + 526, + 682 + ], + "type": "inline_equation", + "content": "H_{\\phi^*}" + }, + { + "bbox": [ + 302, + 587, + 526, + 682 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 302, + 587, + 526, + 682 + ], + "type": "inline_equation", + "content": "\\phi^* = \\arg \\min_{\\phi} \\mathcal{L}_{\\mathrm{HYMA}}" + }, + { + "bbox": [ + 302, + 587, + 526, + 682 + ], + "type": "text", + "content": ". Following prior work (Rosenfeld et al., 2022; Jia et al., 2024), we restrict connectors to be multi-layer perceptrons (MLPs)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 302, + 690, + 458, + 703 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 690, + 458, + 703 + ], + "spans": [ + { + "bbox": [ + 302, + 690, + 458, + 703 + ], + "type": "text", + "content": "3.2 Hypernetwork architecture" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": "We define the hypernetwork as a function " + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "inline_equation", + "content": "H_{\\phi}:\\mathbb{R}^{C}\\to \\mathbb{R}^{D_{\\theta}}" + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": " , mapping conditional inputs " + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "inline_equation", + "content": "\\mathbf{c}\\in \\mathbb{R}^C" + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": " to connector parameters " + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "inline_equation", + "content": "\\theta \\in \\mathbb{R}^{D_{\\theta}}" + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": " . We describe next how " + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": " is constructed and how it is mapped to the parameter space." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19787" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 86, + 68, + 512, + 293 + ], + "blocks": [ + { + "bbox": [ + 86, + 68, + 512, + 293 + ], + "lines": [ + { + "bbox": [ + 86, + 68, + 512, + 293 + ], + "spans": [ + { + "bbox": [ + 86, + 68, + 512, + 293 + ], + "type": "image", + "image_path": "10284f1997723483fc738ff125426856fab058ce1c44311bdfce9b879c18c57a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 301, + 527, + 328 + ], + "lines": [ + { + "bbox": [ + 67, + 301, + 527, + 328 + ], + "spans": [ + { + "bbox": [ + 67, + 301, + 527, + 328 + ], + "type": "text", + "content": "Figure 3: A visual walkthrough of our hypernetwork architecture is provided above. We take the example of predicting the parameters of an MLP-type connector with depth " + }, + { + "bbox": [ + 67, + 301, + 527, + 328 + ], + "type": "inline_equation", + "content": "= 3" + }, + { + "bbox": [ + 67, + 301, + 527, + 328 + ], + "type": "text", + "content": " (denotes 2 hidden layers)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 347, + 291, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 347, + 291, + 389 + ], + "spans": [ + { + "bbox": [ + 67, + 347, + 291, + 389 + ], + "type": "text", + "content": "Conditional inputs: We use a learnable lookup table of embeddings " + }, + { + "bbox": [ + 67, + 347, + 291, + 389 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{\\sigma}^{\\mathrm{H}} \\in \\mathbb{R}^{NM \\times C}" + }, + { + "bbox": [ + 67, + 347, + 291, + 389 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 347, + 291, + 389 + ], + "type": "inline_equation", + "content": "\\mathbf{c}^k = \\mathbf{W}_{\\sigma}^{\\mathrm{H}}[k]" + }, + { + "bbox": [ + 67, + 347, + 291, + 389 + ], + "type": "text", + "content": " encodes the " + }, + { + "bbox": [ + 67, + 347, + 291, + 389 + ], + "type": "inline_equation", + "content": "k^{th}" + }, + { + "bbox": [ + 67, + 347, + 291, + 389 + ], + "type": "text", + "content": " model pair." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 399, + 291, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 399, + 291, + 481 + ], + "spans": [ + { + "bbox": [ + 67, + 399, + 291, + 481 + ], + "type": "text", + "content": "Mapping conditional inputs to parameters: The hypernetwork " + }, + { + "bbox": [ + 67, + 399, + 291, + 481 + ], + "type": "inline_equation", + "content": "H_{\\phi}" + }, + { + "bbox": [ + 67, + 399, + 291, + 481 + ], + "type": "text", + "content": " is implemented using an MLP " + }, + { + "bbox": [ + 67, + 399, + 291, + 481 + ], + "type": "inline_equation", + "content": "F_{\\varrho}" + }, + { + "bbox": [ + 67, + 399, + 291, + 481 + ], + "type": "text", + "content": ", which predicts connector parameters layer-wise. Each layer prediction is conditioned on both " + }, + { + "bbox": [ + 67, + 399, + 291, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{c}^k" + }, + { + "bbox": [ + 67, + 399, + 291, + 481 + ], + "type": "text", + "content": " and a learnable layer-specific embedding " + }, + { + "bbox": [ + 67, + 399, + 291, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_j = \\mathbf{E}_{\\omega}^{\\mathrm{H}}[j]" + }, + { + "bbox": [ + 67, + 399, + 291, + 481 + ], + "type": "text", + "content": ", such that:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 86, + 493, + 271, + 512 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 493, + 271, + 512 + ], + "spans": [ + { + "bbox": [ + 86, + 493, + 271, + 512 + ], + "type": "interline_equation", + "content": "F _ {\\varrho} \\left(\\tilde {\\mathbf {c}} _ {j} ^ {k}\\right) \\in \\mathbb {R} ^ {D _ {\\vartheta^ {k}}}, \\quad \\text {w h e r e} \\quad \\tilde {\\mathbf {c}} _ {j} ^ {k} = \\mathbf {c} ^ {k} + \\mathbf {e} _ {j}", + "image_path": "6a3ef3ad95ea52eaad2b81f3eec38d94961e7e0948c379f1b40e7f3236f4b75d.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 524, + 292, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 524, + 292, + 633 + ], + "spans": [ + { + "bbox": [ + 67, + 524, + 292, + 633 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 67, + 524, + 292, + 633 + ], + "type": "inline_equation", + "content": "\\vartheta^k" + }, + { + "bbox": [ + 67, + 524, + 292, + 633 + ], + "type": "text", + "content": " denotes the size of the largest layer in the " + }, + { + "bbox": [ + 67, + 524, + 292, + 633 + ], + "type": "inline_equation", + "content": "k^{th}" + }, + { + "bbox": [ + 67, + 524, + 292, + 633 + ], + "type": "text", + "content": " connector. The output is then sliced to the appropriate dimension for layer " + }, + { + "bbox": [ + 67, + 524, + 292, + 633 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 67, + 524, + 292, + 633 + ], + "type": "text", + "content": ". This process is repeated for all layers, and the resulting parameters are concatenated to form the complete connector parameter vector " + }, + { + "bbox": [ + 67, + 524, + 292, + 633 + ], + "type": "inline_equation", + "content": "\\theta^k \\in \\mathbb{R}^{D_\\theta^k}" + }, + { + "bbox": [ + 67, + 524, + 292, + 633 + ], + "type": "text", + "content": ". This modular, layer-wise parameterization makes the hypernetwork more tractable and memory-efficient." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 646, + 277, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 646, + 277, + 673 + ], + "spans": [ + { + "bbox": [ + 67, + 646, + 277, + 673 + ], + "type": "text", + "content": "3.3 Mini-batching model combinations for scalable hypernetwork training" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 680, + 292, + 761 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 680, + 292, + 761 + ], + "spans": [ + { + "bbox": [ + 67, + 680, + 292, + 761 + ], + "type": "text", + "content": "Jointly training connectors for all " + }, + { + "bbox": [ + 67, + 680, + 292, + 761 + ], + "type": "inline_equation", + "content": "N \\times M" + }, + { + "bbox": [ + 67, + 680, + 292, + 761 + ], + "type": "text", + "content": " model combinations can become computationally prohibitive. To address this, we follow the strategy of model mini-batching (Knyazev et al., 2023), wherein each training step operates over a batch of " + }, + { + "bbox": [ + 67, + 680, + 292, + 761 + ], + "type": "inline_equation", + "content": "B_{m}" + }, + { + "bbox": [ + 67, + 680, + 292, + 761 + ], + "type": "text", + "content": " model combinations. The modified loss is:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 337, + 357, + 525, + 393 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 357, + 525, + 393 + ], + "spans": [ + { + "bbox": [ + 337, + 357, + 525, + 393 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {H Y M A}} = \\frac {1}{B _ {m}} \\sum_ {k = 1} ^ {B _ {m}} \\mathcal {L} _ {\\text {t a s k}} \\left(f _ {H _ {\\phi} \\left(\\mathbf {c} ^ {k}\\right)}\\right). \\tag {2}", + "image_path": "2f289b85068cf15bf1ecea1dd0c16c25b8c4a5315643feccfb78ed25aab3fbe0.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 398, + 478, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 398, + 478, + 412 + ], + "spans": [ + { + "bbox": [ + 303, + 398, + 478, + 412 + ], + "type": "text", + "content": "Each training step proceeds as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 311, + 421, + 525, + 494 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 312, + 421, + 465, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 421, + 465, + 433 + ], + "spans": [ + { + "bbox": [ + 312, + 421, + 465, + 433 + ], + "type": "text", + "content": "1. Sample a data batch of size " + }, + { + "bbox": [ + 312, + 421, + 465, + 433 + ], + "type": "inline_equation", + "content": "B_{d}" + }, + { + "bbox": [ + 312, + 421, + 465, + 433 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 311, + 437, + 524, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 437, + 524, + 463 + ], + "spans": [ + { + "bbox": [ + 311, + 437, + 524, + 463 + ], + "type": "text", + "content": "2. For each data sample, evaluate " + }, + { + "bbox": [ + 311, + 437, + 524, + 463 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{HYMA}}" + }, + { + "bbox": [ + 311, + 437, + 524, + 463 + ], + "type": "text", + "content": " over each of the " + }, + { + "bbox": [ + 311, + 437, + 524, + 463 + ], + "type": "inline_equation", + "content": "B_{m}" + }, + { + "bbox": [ + 311, + 437, + 524, + 463 + ], + "type": "text", + "content": " model combinations." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 311, + 468, + 525, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 468, + 525, + 494 + ], + "spans": [ + { + "bbox": [ + 311, + 468, + 525, + 494 + ], + "type": "text", + "content": "3. Use the accumulated loss to update hypernetwork parameters " + }, + { + "bbox": [ + 311, + 468, + 525, + 494 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 311, + 468, + 525, + 494 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 503, + 526, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 503, + 526, + 571 + ], + "spans": [ + { + "bbox": [ + 302, + 503, + 526, + 571 + ], + "type": "text", + "content": "This training strategy enables HYMA to scale efficiently without requiring all models or their combinations to be loaded simultaneously. We elaborate on the impact of this choice on our framework in Appendix F." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 581, + 390, + 595 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 581, + 390, + 595 + ], + "spans": [ + { + "bbox": [ + 302, + 581, + 390, + 595 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 602, + 374, + 614 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 602, + 374, + 614 + ], + "spans": [ + { + "bbox": [ + 302, + 602, + 374, + 614 + ], + "type": "text", + "content": "4.1 Baselines" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 620, + 526, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 620, + 526, + 660 + ], + "spans": [ + { + "bbox": [ + 302, + 620, + 526, + 660 + ], + "type": "text", + "content": "To ensure comprehensive evaluation of our proposed method, we compare against the following baselines:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 671, + 525, + 775 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 316, + 671, + 525, + 739 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 671, + 525, + 739 + ], + "spans": [ + { + "bbox": [ + 316, + 671, + 525, + 739 + ], + "type": "text", + "content": "- Random: A naive baseline that randomly selects and stitches uni-modal model pairs using the specified connector on the target multi-modal dataset. Reported performance is the average over five independent trials." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 748, + 524, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 748, + 524, + 775 + ], + "spans": [ + { + "bbox": [ + 316, + 748, + 524, + 775 + ], + "type": "text", + "content": "- UniModal Top-1 (UniT-1): Inspired by the observation in Fig. 1, this baseline stitches" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19788" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 71, + 296, + 247 + ], + "blocks": [ + { + "bbox": [ + 73, + 71, + 296, + 247 + ], + "lines": [ + { + "bbox": [ + 73, + 71, + 296, + 247 + ], + "spans": [ + { + "bbox": [ + 73, + 71, + 296, + 247 + ], + "type": "image", + "image_path": "553815cdf1fcacd2b20fedcbad0e47dfe805d496d90b0a61a1ddb912c1cf21f7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 258, + 525, + 307 + ], + "lines": [ + { + "bbox": [ + 66, + 258, + 525, + 307 + ], + "spans": [ + { + "bbox": [ + 66, + 258, + 525, + 307 + ], + "type": "text", + "content": "Figure 4: " + }, + { + "bbox": [ + 66, + 258, + 525, + 307 + ], + "type": "inline_equation", + "content": "\\mathbf{MLP}_1 \\mid N \\times M = 3" + }, + { + "bbox": [ + 66, + 258, + 525, + 307 + ], + "type": "text", + "content": ": We show the trade-off between computational resources (measured in FLOPs) and performance of the best stitched model pairs across all comparative baselines. We find that HYMA is able to predict a highly performance pairing at a significantly reduced FLOP cost in comparison to training on the optimal model pair as well as search over all model pairs for " + }, + { + "bbox": [ + 66, + 258, + 525, + 307 + ], + "type": "inline_equation", + "content": "N \\times M = 3" + }, + { + "bbox": [ + 66, + 258, + 525, + 307 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 298, + 69, + 521, + 248 + ], + "blocks": [ + { + "bbox": [ + 298, + 69, + 521, + 248 + ], + "lines": [ + { + "bbox": [ + 298, + 69, + 521, + 248 + ], + "spans": [ + { + "bbox": [ + 298, + 69, + 521, + 248 + ], + "type": "image", + "image_path": "0b05966ad8e11df53b324d1e705cd14e4358ce1b0d5353e65541325ba47bb86b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 88, + 327, + 291, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 327, + 291, + 421 + ], + "spans": [ + { + "bbox": [ + 88, + 327, + 291, + 421 + ], + "type": "text", + "content": "the top-performing individual uni-modal models—selected based on their uni-modal benchmark performance—via the target connector. For VLMs, image models are ranked by ImageNet Top-1 accuracy, and text models by their corresponding sentence embedding performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 81, + 436, + 291, + 775 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 81, + 436, + 291, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 436, + 291, + 558 + ], + "spans": [ + { + "bbox": [ + 81, + 436, + 291, + 558 + ], + "type": "text", + "content": "- Ask-LLM: Since uni-modal model properties such as parameter count and pretraining data can influence multi-modal performance, we define a baseline Ask-LLM. Here, a language model is prompted with metadata from the model zoo for both modalities and asked to select the most suitable pair for the target task. The chosen pair is stitched using a connector and evaluated in isolation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 81, + 571, + 291, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 571, + 291, + 707 + ], + "spans": [ + { + "bbox": [ + 81, + 571, + 291, + 707 + ], + "type": "text", + "content": "- AutoPair: To enable a fair comparison with HYMA's efficiency-focused design, we implement an pairing baseline that iteratively searches a given set of pairs by training for a fixed number of epochs, and then prunes all pairs below the median performance. Specifically, AutoPair optimizes model pair selection and stitching within a FLOPs budget equal to that used by HYMA for the same model zoo. More details are provided in Section 5.3." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 81, + 720, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 720, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 81, + 720, + 291, + 775 + ], + "type": "text", + "content": "- Oracle (Grid Search): This upper-bound baseline performs exhaustive grid search over all model pairs in the zoo, independently training and evaluating each stitched pair. While" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 324, + 327, + 525, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 327, + 525, + 354 + ], + "spans": [ + { + "bbox": [ + 324, + 327, + 525, + 354 + ], + "type": "text", + "content": "this provides optimal performance, it is computationally prohibitive." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 316, + 364, + 525, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 364, + 525, + 432 + ], + "spans": [ + { + "bbox": [ + 316, + 364, + 525, + 432 + ], + "type": "text", + "content": "- Best Guess: A hypothetical upper-bound baseline representing the training cost of the model combination that would yield the best multi-modal pair after stitching, assuming the optimal pair was known in advance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 443, + 365, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 443, + 365, + 454 + ], + "spans": [ + { + "bbox": [ + 302, + 443, + 365, + 454 + ], + "type": "text", + "content": "4.2 Models" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 460, + 525, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 460, + 525, + 570 + ], + "spans": [ + { + "bbox": [ + 302, + 460, + 525, + 570 + ], + "type": "text", + "content": "All model details are provided in Appendix B. To construct our Vision-Language Models (VLMs), we define a model zoo containing " + }, + { + "bbox": [ + 302, + 460, + 525, + 570 + ], + "type": "inline_equation", + "content": "N = 9" + }, + { + "bbox": [ + 302, + 460, + 525, + 570 + ], + "type": "text", + "content": " image encoders: ViT-S, DeiT-S, DeiT3-S, ViT-B, DeiT-B, DeiT3-B, ViT-L, DeiT3-L, Eva2-L and " + }, + { + "bbox": [ + 302, + 460, + 525, + 570 + ], + "type": "inline_equation", + "content": "M = 3" + }, + { + "bbox": [ + 302, + 460, + 525, + 570 + ], + "type": "text", + "content": " text encoders: minilm-L, mpnet-B, roberta-L. This results in a total of " + }, + { + "bbox": [ + 302, + 460, + 525, + 570 + ], + "type": "inline_equation", + "content": "N \\times M = 27" + }, + { + "bbox": [ + 302, + 460, + 525, + 570 + ], + "type": "text", + "content": " possible VLM configurations." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 579, + 421, + 591 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 579, + 421, + 591 + ], + "spans": [ + { + "bbox": [ + 302, + 579, + 421, + 591 + ], + "type": "text", + "content": "4.3 Connector variants" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 597, + 525, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 597, + 525, + 624 + ], + "spans": [ + { + "bbox": [ + 302, + 597, + 525, + 624 + ], + "type": "text", + "content": "We test HYMA against the aforementioned baselines across three connector configurations:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 311, + 634, + 525, + 773 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 312, + 634, + 525, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 634, + 525, + 702 + ], + "spans": [ + { + "bbox": [ + 312, + 634, + 525, + 702 + ], + "type": "text", + "content": "1. Linear: As demonstrated in (Merullo et al., 2022), we construct the connector to be a linear layer parameterized via " + }, + { + "bbox": [ + 312, + 634, + 525, + 702 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 312, + 634, + 525, + 702 + ], + "type": "text", + "content": ", mapping from the embedding space of the text encoder to the image encoder of a specific pair." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 311, + 711, + 525, + 737 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 711, + 525, + 737 + ], + "spans": [ + { + "bbox": [ + 311, + 711, + 525, + 737 + ], + "type": "text", + "content": "2. " + }, + { + "bbox": [ + 311, + 711, + 525, + 737 + ], + "type": "inline_equation", + "content": "\\mathbf{MLP}_1" + }, + { + "bbox": [ + 311, + 711, + 525, + 737 + ], + "type": "text", + "content": ": An MLP with one hidden layer of hidden dimension set 1024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 311, + 748, + 524, + 773 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 748, + 524, + 773 + ], + "spans": [ + { + "bbox": [ + 311, + 748, + 524, + 773 + ], + "type": "text", + "content": "3. " + }, + { + "bbox": [ + 311, + 748, + 524, + 773 + ], + "type": "inline_equation", + "content": "\\mathbf{MLP}_2" + }, + { + "bbox": [ + 311, + 748, + 524, + 773 + ], + "type": "text", + "content": ": A MLP with two hidden layers, each of dimension 1024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19789" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 135, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 135, + 83 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 135, + 83 + ], + "type": "text", + "content": "4.4 Datasets" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 89, + 291, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 89, + 291, + 198 + ], + "spans": [ + { + "bbox": [ + 67, + 89, + 291, + 198 + ], + "type": "text", + "content": "We employ the LLaVA-CC558K dataset (Jia et al., 2024), which consists of 558,128 high-quality synthetic image-text pairs. Connectors between image and text encoders are trained using the contrastive InfoNCE loss (Oord et al., 2018) for 10 epochs, after which the best-performing checkpoint is selected. Hyperparameters are tuned for performance, stability, and GPU efficiency, detailed in Appendix." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 208, + 175, + 221 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 208, + 175, + 221 + ], + "spans": [ + { + "bbox": [ + 67, + 208, + 175, + 221 + ], + "type": "text", + "content": "4.5 Evaluation Tasks" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 226, + 290, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 226, + 290, + 253 + ], + "spans": [ + { + "bbox": [ + 67, + 226, + 290, + 253 + ], + "type": "text", + "content": "Post-training, the resulting VLMs are evaluated on the following four downstream tasks:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 81, + 264, + 291, + 543 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 81, + 264, + 291, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 264, + 291, + 386 + ], + "spans": [ + { + "bbox": [ + 81, + 264, + 291, + 386 + ], + "type": "text", + "content": "- Multi-modal Image Classification (MIC): We compute the zero-shot top-1 image classification accuracies of the VLMs on the ImageNet-1K (Deng et al., 2009) and the CIFAR-100 (Krizhevsky et al., 2009) datasets. The evaluation follows an image-text matching approach, where the text corresponding to each image input takes the form: \"THIS IS A PHOTO OF A {CLASS}\"." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 81, + 396, + 291, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 396, + 291, + 464 + ], + "spans": [ + { + "bbox": [ + 81, + 396, + 291, + 464 + ], + "type": "text", + "content": "- Image-Text Matching (ITM): Here, we compute the zero-shot recall @ 5 scores of the VLMs on the MSCOCO validation split (Lin et al., 2014) and the Flickr-8K (Hodosh et al., 2013) datasets." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 81, + 475, + 291, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 475, + 291, + 543 + ], + "spans": [ + { + "bbox": [ + 81, + 475, + 291, + 543 + ], + "type": "text", + "content": "- Visual Question Answering (VQA): We use the validation splits of the OK-VQA (Marino et al., 2019) and the Text-VQA (Singh et al., 2019) datasets. Implementation details for VQA are given in Appendix G." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 554, + 227, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 554, + 227, + 567 + ], + "spans": [ + { + "bbox": [ + 67, + 554, + 227, + 567 + ], + "type": "text", + "content": "4.6 Varying multi-modal setting" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 572, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 572, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 572, + 291, + 775 + ], + "type": "text", + "content": "We also explore our formulation on a different setting than contrastive VLMs, i.e., input-output stitching instead of output-output stitching. Fusing image encoder outputs to LLM inputs is a technique used to develop multi-modal language models (MLLMs) (Achiam et al., 2023; Touvron et al., 2023; Jia et al., 2024; Anthropic), another avenue of multi-modal models that we employ HYMA in. We find that using HYMA for MLLMs does not reflect the ranking observed via full grid search, however, HYMA predicts reliable connectors at a lower cost than grid search, with larger connectors exhibiting the best performance equal to the best setting found via grid search. We provide more details on this in Appendix A." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 70, + 416, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 70, + 416, + 84 + ], + "spans": [ + { + "bbox": [ + 303, + 70, + 416, + 84 + ], + "type": "text", + "content": "5 Empirical Results" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 91, + 424, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 91, + 424, + 105 + ], + "spans": [ + { + "bbox": [ + 302, + 91, + 424, + 105 + ], + "type": "text", + "content": "5.1 " + }, + { + "bbox": [ + 302, + 91, + 424, + 105 + ], + "type": "inline_equation", + "content": "\\mathbf{MLP}_1\\mid \\mathbf{N}\\times \\mathbf{M} = 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 301, + 110, + 526, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 110, + 526, + 257 + ], + "spans": [ + { + "bbox": [ + 301, + 110, + 526, + 257 + ], + "type": "text", + "content": "Initially, we stitch " + }, + { + "bbox": [ + 301, + 110, + 526, + 257 + ], + "type": "inline_equation", + "content": "N = 3" + }, + { + "bbox": [ + 301, + 110, + 526, + 257 + ], + "type": "text", + "content": " image encoders (ViT-S, DeiT-S, DeiT3-S) with " + }, + { + "bbox": [ + 301, + 110, + 526, + 257 + ], + "type": "inline_equation", + "content": "M = 1" + }, + { + "bbox": [ + 301, + 110, + 526, + 257 + ], + "type": "text", + "content": " text encoder (MiniLM) using an MLP connector of 1 hidden layer " + }, + { + "bbox": [ + 301, + 110, + 526, + 257 + ], + "type": "inline_equation", + "content": "(\\mathrm{MLP}_1)" + }, + { + "bbox": [ + 301, + 110, + 526, + 257 + ], + "type": "text", + "content": ". This yields a total of " + }, + { + "bbox": [ + 301, + 110, + 526, + 257 + ], + "type": "inline_equation", + "content": "N \\times M = 3" + }, + { + "bbox": [ + 301, + 110, + 526, + 257 + ], + "type": "text", + "content": " possible VLMs, that we construct and evaluate on the image-classification task. For the best performing combination per evaluation benchmark, we show its accuracy in Figure 4 as well as the computational resources, measured in floating point operations (FLOPs) required to obtain the corresponding connectors." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "spans": [ + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "text", + "content": "On ImageNet-1K, DeiT3-S emerges as the best image encoder to be stitched with minilm-L. Further, HYMA and Grid Search (and Best Guess) exhibit the same final performance, i.e., " + }, + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "inline_equation", + "content": "27.4\\%" + }, + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "text", + "content": " top-1 accuracy. On the other hand, the most performative image encoder when stitched to MiniLM is ViT-S. In terms of performance, HYMA exhibits a top-1 accuracy of " + }, + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "inline_equation", + "content": "38.4\\%" + }, + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "text", + "content": ", nearly matching the performance of baselines that individually train connectors to find the optimal setting, i.e., " + }, + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "inline_equation", + "content": "39.3\\%" + }, + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "text", + "content": ". Also, HYMA is strongly cost-effective for VLMs, being " + }, + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "inline_equation", + "content": "4.44\\times" + }, + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "inline_equation", + "content": "1.48\\times" + }, + { + "bbox": [ + 302, + 259, + 526, + 435 + ], + "type": "text", + "content": " more compute-efficient than Grid Search and Best Guess respectively." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 305, + 444, + 525, + 493 + ], + "blocks": [ + { + "bbox": [ + 305, + 444, + 525, + 493 + ], + "lines": [ + { + "bbox": [ + 305, + 444, + 525, + 493 + ], + "spans": [ + { + "bbox": [ + 305, + 444, + 525, + 493 + ], + "type": "table", + "html": "
DatasetEfficiency @10 ep (×)Efficiency @ best (×)
BGGSBGGS
IN-1K1.484.441.484.44
CIFAR-1001.484.442.968.89
", + "image_path": "4c59244e3915b3e707b37506945b625fca678b3360ce2ba7aeb5947848f5e2fd.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 302, + 501, + 525, + 550 + ], + "lines": [ + { + "bbox": [ + 302, + 501, + 525, + 550 + ], + "spans": [ + { + "bbox": [ + 302, + 501, + 525, + 550 + ], + "type": "text", + "content": "Table 2: " + }, + { + "bbox": [ + 302, + 501, + 525, + 550 + ], + "type": "inline_equation", + "content": "N\\times M = 3" + }, + { + "bbox": [ + 302, + 501, + 525, + 550 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_1" + }, + { + "bbox": [ + 302, + 501, + 525, + 550 + ], + "type": "text", + "content": ": HYMA is significantly more compute-efficient than independently stitching model pairs, as shown w.r.t Best Guess (BG) and Grid Search (GS)." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 574, + 500, + 587 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 574, + 500, + 587 + ], + "spans": [ + { + "bbox": [ + 302, + 574, + 500, + 587 + ], + "type": "text", + "content": "5.2 Linear, " + }, + { + "bbox": [ + 302, + 574, + 500, + 587 + ], + "type": "inline_equation", + "content": "\\mathbf{MLP}_1,\\mathbf{MLP}_2\\mid \\mathbf{N}\\times \\mathbf{M} = 27" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "spans": [ + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "text", + "content": "After demonstrating the efficacy of HYMA on a small search space of " + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "inline_equation", + "content": "N \\times M = 3" + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "text", + "content": " combinations and for " + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_1" + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "text", + "content": " scale up the number of combinations in comparison to " + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "inline_equation", + "content": "N \\times M = 27" + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "text", + "content": ", and vary the capacity of the connectors in use (Linear, " + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_1" + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_2" + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "text", + "content": "). This yields 81 total VLMs. Table 3 shows the performance of HYMA in terms of a search, i.e., how well it matches the true ranking given by full grid search. Performance gain " + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "inline_equation", + "content": "(\\Delta)" + }, + { + "bbox": [ + 301, + 592, + 526, + 740 + ], + "type": "text", + "content": " is also reported across the Random, UniT-1, Ask-LLM, and Oracle (GS) baselines for each task and dataset employed." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 748, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 748, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 748, + 526, + 775 + ], + "type": "text", + "content": "Multi-modal Image classification: For multi-modal image classification on the ImageNet-1K," + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19790" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 68, + 68, + 526, + 289 + ], + "blocks": [ + { + "bbox": [ + 68, + 68, + 526, + 289 + ], + "lines": [ + { + "bbox": [ + 68, + 68, + 526, + 289 + ], + "spans": [ + { + "bbox": [ + 68, + 68, + 526, + 289 + ], + "type": "table", + "html": "
TaskDatasetConnectorNDCG @ k (↑)ρ (↑)ΔPerformance (↑)
k = 5k = 7k = 10N × M = 27RandomUniT-1Ask-LLMOracle (GS)
MICIN-1KLinear1.01.00.980.97+6.93+13.51+13.51-4.14
MLP11.00.980.960.91+4.78+11.11+11.11-4.47
MLP20.960.930.920.89+3.89+10.34+10.34-5.91
CIFAR-100Linear0.880.960.970.97+6.91+38.50+38.50-3.73
MLP10.830.960.970.86+6.31+35.21+35.21-1.85
MLP20.740.930.950.90+5.01+35.48+35.48-3.06
ITMMSCOCOLinear0.960.950.990.99+4.94+31.62+33.20-2.0
MLP10.920.910.970.99+3.72+28.41+28.41-3.06
MLP20.960.910.970.98+2.22+27.30+27.30-4.03
Flickr-8KLinear0.950.990.990.99+5.18+26.68+7.83-2.06
MLP11.01.00.990.99+3.54+23.32+23.32-2.26
MLP20.920.990.960.98+1.92+21.44+21.44-3.25
VQAOK-VQALinear0.950.950.980.99+0.81+7.86+7.86-0.43
MLP10.940.900.950.95+0.49+6.63+6.63-0.77
MLP20.990.930.930.97+0.01+6.81+6.81-1.44
Text-VQALinear0.940.970.990.97+1.31+3.64+3.64-0.06
MLP10.920.870.900.87+0.72+2.59+2.59-0.32
MLP20.850.870.860.87+0.72+2.28+2.28-0.59
", + "image_path": "045b506a940a47d9666098e1cd48c67abbfb7e8401a7def1d98e3016fd82d3b1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 296, + 526, + 384 + ], + "lines": [ + { + "bbox": [ + 66, + 296, + 526, + 384 + ], + "spans": [ + { + "bbox": [ + 66, + 296, + 526, + 384 + ], + "type": "text", + "content": "Table 3: HYMA VLM Results: We report the ranking similarity between HYMA and the Oracle—Grid Search (GS)—using NDCG and Spearman's " + }, + { + "bbox": [ + 66, + 296, + 526, + 384 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 66, + 296, + 526, + 384 + ], + "type": "text", + "content": ". Across all three connector configurations, HYMA exhibits a strong correlation with GS rankings. Additionally, we show the performance gain " + }, + { + "bbox": [ + 66, + 296, + 526, + 384 + ], + "type": "inline_equation", + "content": "(\\Delta)" + }, + { + "bbox": [ + 66, + 296, + 526, + 384 + ], + "type": "text", + "content": " of the best connector obtained post stitching via HYMA, compared to four baselines: (a) Random: Random pairing and stitching (averaged over five runs), (b) UniT-1: Stitching the best unimodal models based on unimodal benchmarks, (c) Ask-LLM: Stitching based on model pairs selected via prompting Claude 4 Sonnet (detailed prompt in appendix), and (d) Oracle: Full grid search over all possible configurations on the complete model zoo " + }, + { + "bbox": [ + 66, + 296, + 526, + 384 + ], + "type": "inline_equation", + "content": "(N \\times M = 27)" + }, + { + "bbox": [ + 66, + 296, + 526, + 384 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "spans": [ + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "text", + "content": "we find that the ranking order of the stitching performed by HYMA reflects that found by full grid search to strong extent. This is indicated by the normalized discounted cumulative gain (NDCG @ " + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "text", + "content": ") computed for the top 5 and 7 ranks. Additionally, Spearman's " + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "text", + "content": " across all " + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "inline_equation", + "content": "N \\times M = 27" + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "text", + "content": " ranks further corroborates this. Notably, both NDCG @ " + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "text", + "content": " and Spearman's " + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "text", + "content": " for CIFAR-100 are lower in value w.r.t ImageNet-1K. In terms of performance gains, HYMA improves upon random selection of encoder pairs to stitch, as well as selecting encoders based on their uni-modal performance. Interestingly, we find that asking a massively pretrained LLM such as Claude 4 Sonnet yields a similar result to UniT-1. For Oracle (GS), find that the best stitches generated by HYMA underperform average of " + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "inline_equation", + "content": "4.84\\%" + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "text", + "content": " and 2.88 for ImageNet-1K and CIFAR-100 across all connector types. However, this occurs at " + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "inline_equation", + "content": "10 \\times" + }, + { + "bbox": [ + 66, + 402, + 291, + 661 + ], + "type": "text", + "content": " fewer FLOPs spent." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "content": "Image-text matching: For image-text matching, we find higher values of Spearman's " + }, + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "content": ", indicating that the stitches predicted by HYMA correlates strongly in performance with those obtained by full grid search on both MSCOCO and Flickr-8K. Similar to image-classification, we find that rank correlation metrics show more positive values for one dataset, Flickr-8K over the other, i.e., MSCOCO." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 302, + 402, + 526, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 402, + 526, + 497 + ], + "spans": [ + { + "bbox": [ + 302, + 402, + 526, + 497 + ], + "type": "text", + "content": "In contrast, for image-text matching, we find that the performance gains (in recall@5) exhibited w.r.t Ask-LLM baseline do not match those of UniT-1 in cases such as Linear connectors. In comparison to Oracle (GS), average reduction in recall@5 is 3.03 for MSCOCO and 2.52 for Flickr-8K across all connectors." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 505, + 527, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 505, + 527, + 681 + ], + "spans": [ + { + "bbox": [ + 302, + 505, + 527, + 681 + ], + "type": "text", + "content": "Visual question answering: In visual question answering on both OK-VQA and Text-VQA, Linear connectors exhibit the highest values in terms of NDCG @ " + }, + { + "bbox": [ + 302, + 505, + 527, + 681 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 302, + 505, + 527, + 681 + ], + "type": "text", + "content": ", Spearman's " + }, + { + "bbox": [ + 302, + 505, + 527, + 681 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 302, + 505, + 527, + 681 + ], + "type": "text", + "content": ", as well as performance gain. In line with the preceded evaluation tasks, i.e., multi-modal image classification and image-text matching, we find that connectors predicted by HYMA outperform those found by the Random, UniT-1 and Ask-LLM baselines. Most notably, VQA emerges as the task with the least performance gap between HYMA and Oracle (GS), with 0.88 and 0.32 being the difference in the respective recall@5 values across both datasets." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 690, + 420, + 702 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 690, + 420, + 702 + ], + "spans": [ + { + "bbox": [ + 302, + 690, + 420, + 702 + ], + "type": "text", + "content": "5.3 HYMA vs AutoPair" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 708, + 527, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 527, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 527, + 775 + ], + "type": "text", + "content": "We conduct a step-wise search-and-prune procedure over 6 image encoders (evenly split across embedding dimensions 768 and 1024) and 2 text encoders (also evenly split across embedding dimensions 768 and 1024). First we initialize a FLOPs" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 792 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 792 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 792 + ], + "type": "text", + "content": "19791" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 68, + 526, + 151 + ], + "blocks": [ + { + "bbox": [ + 71, + 68, + 526, + 151 + ], + "lines": [ + { + "bbox": [ + 71, + 68, + 526, + 151 + ], + "spans": [ + { + "bbox": [ + 71, + 68, + 526, + 151 + ], + "type": "table", + "html": "
ConnectorMulti-modal Image ClassificationΔPerformance (↑)
ImageNet-1KCIFAR-100Image-Text MatchingVisual Question Answering
MSCOCOFlickr-8KOK-VQAText-VQA
Linear+11.28+10.62+11.04+11.14+2.12+2.29
MLP1+4.50+7.12+2.08+3.69+0.24+0.14
MLP2+3.25+6.21+3.62+4.55+0.75+0.24
", + "image_path": "15c119272518c7d67614d4466b5793703283f3a0111f54afe6f8e745db5fdaf0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 158, + 525, + 185 + ], + "lines": [ + { + "bbox": [ + 67, + 158, + 525, + 185 + ], + "spans": [ + { + "bbox": [ + 67, + 158, + 525, + 185 + ], + "type": "text", + "content": "Table 4: HYMA vs AutoPair Results (" + }, + { + "bbox": [ + 67, + 158, + 525, + 185 + ], + "type": "inline_equation", + "content": "N \\times M = 12" + }, + { + "bbox": [ + 67, + 158, + 525, + 185 + ], + "type": "text", + "content": "): We show the performance gain (" + }, + { + "bbox": [ + 67, + 158, + 525, + 185 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 67, + 158, + 525, + 185 + ], + "type": "text", + "content": ") of the best connector (for all connector configurations) obtained post stitching via HYMA, compared to that obtained via AutoPair." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 66, + 205, + 290, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 205, + 290, + 353 + ], + "spans": [ + { + "bbox": [ + 66, + 205, + 290, + 353 + ], + "type": "text", + "content": "budget equal to the total FLOP cost of searching over " + }, + { + "bbox": [ + 66, + 205, + 290, + 353 + ], + "type": "inline_equation", + "content": "N \\times M = 12" + }, + { + "bbox": [ + 66, + 205, + 290, + 353 + ], + "type": "text", + "content": " pairs with HYMA for 10 epochs. Next, our procedure trains connectors between all 12 pairs for 2 epochs each, after which we rank each connector by its performance on a given task and dataset. After the ranking, we prune all pairs that exhibit performance that is less than or equal to the median performance. This is repeated until we exhaust the budget. If we are left with only one model after iterative pruning, we train it until the budget is exhausted." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 354, + 291, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 291, + 423 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 291, + 423 + ], + "type": "text", + "content": "As shown in Table 4, stitches obtained by AutoPair exhibit significantly lower performance than those obtained via HYMA, as the budget finishes before the individually trained connectors can reach strong performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 439, + 161, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 439, + 161, + 451 + ], + "spans": [ + { + "bbox": [ + 67, + 439, + 161, + 451 + ], + "type": "text", + "content": "6 Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 464, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 464, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 464, + 291, + 775 + ], + "type": "text", + "content": "Vision language models. CLIP, one of the most popular VLMs, is contrastively pretrained on approximately 400M image-text pairs. Beyond multimodal image classification and image-text retrieval, it has emerged to be applicable for tasks such as open-set attribute recognition (Chen et al., 2023) and object detection (Minderer et al.). Moreover, it inspires modifications to the default InfoNCE recipe, such as image captioning with contrastive pretraining, using sigmoid in place of softmax on the InfoNCE similarity matrix, etc. (Li et al., 2022; Alayrac et al., 2022; Singh et al., 2022; Zhai et al., 2023; Singh et al., 2024). Additionally, datasets oriented towards CLIP-like vision-language pretraining have been released in recent times, including (Schuhmann et al., 2021, 2022; Thomee et al., 2016; Changpinyo et al., 2021; Desai et al., 2021), often of the scale of millions of (image, caption) pairs. As a foundation model, CLIP has been applied in image synthesis (Rombach et al., 2022; Ramesh et al., 2022), and has been extended to modalities such as video (Chai et al., 2023; Wang et al., 2022) and audio (Guzhov et al., 2022). Our" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 205, + 526, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 205, + 526, + 244 + ], + "spans": [ + { + "bbox": [ + 302, + 205, + 526, + 244 + ], + "type": "text", + "content": "work investigates how to efficiently develop multiple CLIP-like models from pretrained uni-modal encoder states." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 254, + 526, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 254, + 526, + 552 + ], + "spans": [ + { + "bbox": [ + 302, + 254, + 526, + 552 + ], + "type": "text", + "content": "Hypernetworks in LLMs and multi-modal domains. Hypernetworks (Ha et al., 2016; Schmidhuber, 1992) have been shown useful in improving training efficiency and adaptability in many machine learning pipelines (Chauhan et al., 2024). Several works explored the advantages of hypernetworks for MLLMs and multi-modal models. Specifically, (Zhang et al., 2024) proposes HyperLLaVA that predicts project parameters for MLLMs given task input. Hypernetworks have also been used to predict the parameters of the adapters in parameter efficient fine-tuning of LLMs (Mahabadi et al., 2021; Phang et al., 2023) and VLMs (Zhang et al., 2022). HyperCLIP (Akinwande et al., 2024), trains a hypernetwork to predict the parameters of image encoder layers given the task. Overall, these models improve training efficiency and adaptability of a single combination on new tasks, but require grid search for more pairs. Our work addresses this limitation by training the joint hypernetwork for multiple encoders improving the efficiency and performance significantly." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 563, + 381, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 563, + 381, + 576 + ], + "spans": [ + { + "bbox": [ + 302, + 563, + 381, + 576 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 586, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 586, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 586, + 526, + 775 + ], + "type": "text", + "content": "We present a novel investigation of the usage of hypernetworks for the M-OPS problem. HYMA is able to subvert expensive grid search across all uni-model model combinations, by learning connector parameters jointly, producing strongly initialised connectors. We demonstrate that HYMA is an efficient solution to the M-OPS problem. Also, HYMA's design affords stitching of modalities beyond only image-text: other avenues include, for instance, audio-text. We hope to inspire future work that utilizes hypernetworks for similar problems, where training several small neural networks can be expressed as a generative model that learns the parameters of the target network." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19792" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 70, + 130, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 70, + 130, + 84 + ], + "spans": [ + { + "bbox": [ + 69, + 70, + 130, + 84 + ], + "type": "text", + "content": "Limitations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 92, + 291, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 92, + 291, + 389 + ], + "spans": [ + { + "bbox": [ + 69, + 92, + 291, + 389 + ], + "type": "text", + "content": "Hypernetwork training can be less stable than training a standard connector (i.e., a single MLP). Training instabilities in hypernetworks have been previously studied (Ortiz et al., 2023; Chauhan et al., 2024), and are not unique to the specific design of our framework. However, since " + }, + { + "bbox": [ + 69, + 92, + 291, + 389 + ], + "type": "inline_equation", + "content": "H_{\\phi}" + }, + { + "bbox": [ + 69, + 92, + 291, + 389 + ], + "type": "text", + "content": " acts as a shared generating function across multiple connectors, the interaction of gradients from diverse model combinations—as well as their interplay with " + }, + { + "bbox": [ + 69, + 92, + 291, + 389 + ], + "type": "inline_equation", + "content": "B_{m}" + }, + { + "bbox": [ + 69, + 92, + 291, + 389 + ], + "type": "text", + "content": "—can still lead to instability during training. To stabilize training, we tune the " + }, + { + "bbox": [ + 69, + 92, + 291, + 389 + ], + "type": "inline_equation", + "content": "\\beta_{2}" + }, + { + "bbox": [ + 69, + 92, + 291, + 389 + ], + "type": "text", + "content": " parameter of the Adam optimizer in accordance with recommendations from the optimization literature (Cattaneo and Shigida, 2025). In practice, we observed that including certain models (for example: the MaxViT family (Tu et al., 2022)) in the " + }, + { + "bbox": [ + 69, + 92, + 291, + 389 + ], + "type": "inline_equation", + "content": "N \\times M" + }, + { + "bbox": [ + 69, + 92, + 291, + 389 + ], + "type": "text", + "content": " pool led to instability, and thus these models were excluded from our final zoo. This limitation points to the need for a deeper investigation into the training dynamics and architectural properties of similar systems, which could inform strategies to improve both stability and performance of the hypernetwork." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 400, + 170, + 413 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 400, + 170, + 413 + ], + "spans": [ + { + "bbox": [ + 69, + 400, + 170, + 413 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 421, + 291, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 421, + 291, + 569 + ], + "spans": [ + { + "bbox": [ + 69, + 421, + 291, + 569 + ], + "type": "text", + "content": "The authors thank the International Max Planck Research School for Intelligent Systems (IMPRS-IS) for supporting Diganta Misra. Jaisidh Singh is supported by the Konrad Zuse School of Excellence in Learning and Intelligent Systems (ELIZA) through the DAAD programme Konrad Zuse Schools of Excellence in Artificial Intelligence, sponsored by the Federal Ministry of Education and Research. This work was enabled by compute resources provided by Max Planck Institute for Intelligent Systems Tübingen & Amazon Science Hub." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19793" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 127, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 127, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 127, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 90, + 290, + 774 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 69, + 90, + 290, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 90, + 290, + 146 + ], + "spans": [ + { + "bbox": [ + 69, + 90, + 290, + 146 + ], + "type": "text", + "content": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, and 1 others. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 155, + 290, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 155, + 290, + 210 + ], + "spans": [ + { + "bbox": [ + 69, + 155, + 290, + 210 + ], + "type": "text", + "content": "Victor Akinwande, Mohammad Sadegh Norouzzadeh, Devin Willmott, Anna Bair, Madan Ravi Ganesh, and J Zico Kolter. 2024. Hyperclip: Adapting vision-language models with hypernetworks. arXiv preprint arXiv:2412.16777." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 221, + 290, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 221, + 290, + 331 + ], + "spans": [ + { + "bbox": [ + 69, + 221, + 290, + 331 + ], + "type": "text", + "content": "Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob L Menick, Sebastian Borgeaud, and 8 others. 2022. Flamingo: a visual language model for few-shot learning. In Advances in Neural Information Processing Systems, volume 35, pages 23716-23736. Curran Associates, Inc." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 340, + 290, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 340, + 290, + 362 + ], + "spans": [ + { + "bbox": [ + 69, + 340, + 290, + 362 + ], + "type": "text", + "content": "Anthropic. Claude 4 sonnet. https://www.anthropic.com/claude." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 372, + 290, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 372, + 290, + 417 + ], + "spans": [ + { + "bbox": [ + 69, + 372, + 290, + 417 + ], + "type": "text", + "content": "Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, and 1 others. 2023. Qwen technical report. arXiv preprint arXiv:2309.16609." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 426, + 290, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 426, + 290, + 481 + ], + "spans": [ + { + "bbox": [ + 69, + 426, + 290, + 481 + ], + "type": "text", + "content": "Jinhe Bi, Yifan Wang, Danqi Yan, Xun Xiao, Artur Hecker, Volker Tresp, and Yunpu Ma. 2025. Prism: Self-pruning intrinsic selection method for training-free multimodal data selection. Preprint, arXiv:2502.12119." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 491, + 290, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 491, + 290, + 578 + ], + "spans": [ + { + "bbox": [ + 69, + 491, + 290, + 578 + ], + "type": "text", + "content": "Stella Biderman, Hailey Schoelkopf, Quentin Gregory Anthony, Herbie Bradley, Kyle O'Brien, Eric Hallahan, Mohammad Aflah Khan, Shivanshu Purohit, USVSN Sai Prashanth, Edward Raff, and 1 others. 2023. Pythia: A suite for analyzing large language models across training and scaling. In International Conference on Machine Learning, pages 2397-2430. PMLR." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 588, + 290, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 588, + 290, + 634 + ], + "spans": [ + { + "bbox": [ + 69, + 588, + 290, + 634 + ], + "type": "text", + "content": "Matias D. Cattaneo and Boris Shigida. 2025. Tuning adam(w): Default " + }, + { + "bbox": [ + 69, + 588, + 290, + 634 + ], + "type": "inline_equation", + "content": "\\beta 2" + }, + { + "bbox": [ + 69, + 588, + 290, + 634 + ], + "type": "text", + "content": " may be too large. https://mdcattaneo.github.io/papers/Cattaneo-Shigida_2025_TuningAdam.pdf." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 643, + 290, + 698 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 643, + 290, + 698 + ], + "spans": [ + { + "bbox": [ + 69, + 643, + 290, + 698 + ], + "type": "text", + "content": "Wenhao Chai, Xun Guo, Gaoang Wang, and Yan Lu. 2023. Stablevideo: Text-driven consistency-aware diffusion video editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23040-23050." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 708, + 290, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 708, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 708, + 290, + 774 + ], + "type": "text", + "content": "Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. 2021. Conceptual " + }, + { + "bbox": [ + 69, + 708, + 290, + 774 + ], + "type": "inline_equation", + "content": "12\\mathrm{m}" + }, + { + "bbox": [ + 69, + 708, + 290, + 774 + ], + "type": "text", + "content": ": Pushing web-scale image-text pre-training to recognize long-tail visual concepts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3558-3568." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 774 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 305, + 72, + 525, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 525, + 116 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 525, + 116 + ], + "type": "text", + "content": "Vinod Kumar Chauhan, Jiandong Zhou, Ping Lu, Soheila Molaei, and David A Clifton. 2024. A brief review of hypernetworks in deep learning. Artificial Intelligence Review, 57(9):250." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 127, + 525, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 127, + 525, + 193 + ], + "spans": [ + { + "bbox": [ + 304, + 127, + 525, + 193 + ], + "type": "text", + "content": "Keyan Chen, Xiaolong Jiang, Yao Hu, Xu Tang, Yan Gao, Jianqi Chen, and Weidi Xie. 2023. Ovarnet: Towards open-vocabulary object attribute recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23518-23527." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 205, + 525, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 205, + 525, + 261 + ], + "spans": [ + { + "bbox": [ + 304, + 205, + 525, + 261 + ], + "type": "text", + "content": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. 2009. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 248-255. IEEE." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 272, + 525, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 272, + 525, + 317 + ], + "spans": [ + { + "bbox": [ + 304, + 272, + 525, + 317 + ], + "type": "text", + "content": "Karan Desai, Gaurav Kaul, Zubin Aysola, and Justin Johnson. 2021. Redcaps: Web-curated image-text data created by the people, for the people. arXiv preprint arXiv:2111.11431." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 327, + 525, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 327, + 525, + 372 + ], + "spans": [ + { + "bbox": [ + 304, + 327, + 525, + 372 + ], + "type": "text", + "content": "Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. 2023. Data filtering networks. Preprint, arXiv:2309.17425." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 384, + 525, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 384, + 525, + 439 + ], + "spans": [ + { + "bbox": [ + 304, + 384, + 525, + 439 + ], + "type": "text", + "content": "Andrey Guzhov, Federico Raue, Jorn Hees, and Andreas Dengel. 2022. Audioclip: Extending clip to image, text and audio. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 976-980. IEEE." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 450, + 525, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 450, + 525, + 473 + ], + "spans": [ + { + "bbox": [ + 304, + 450, + 525, + 473 + ], + "type": "text", + "content": "David Ha, Andrew Dai, and Quoc V Le. 2016. Hypernetworks. arXiv preprint arXiv:1609.09106." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 484, + 525, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 484, + 525, + 529 + ], + "spans": [ + { + "bbox": [ + 304, + 484, + 525, + 529 + ], + "type": "text", + "content": "Micah Hodosh, Peter Young, and Julia Hockenmaier. 2013. Framing image description as a ranking task: Data, models and evaluation metrics. Journal of Artificial Intelligence Research, 47:853-899." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 540, + 525, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 540, + 525, + 596 + ], + "spans": [ + { + "bbox": [ + 304, + 540, + 525, + 596 + ], + "type": "text", + "content": "Junlong Jia, Ying Hu, Xi Weng, Yiming Shi, Miao Li, Xingjian Zhang, Baichuan Zhou, Ziyu Liu, Jie Luo, Lei Huang, and 1 others. 2024. Tinyllava factory: A modularized codebase for small-scale large multi-modal models. arXiv preprint arXiv:2405.11788." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 607, + 525, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 607, + 525, + 661 + ], + "spans": [ + { + "bbox": [ + 304, + 607, + 525, + 661 + ], + "type": "text", + "content": "Boris Knyazev, Doha Hwang, and Simon Lacoste-Julien. 2023. Can we scale transformers to predict parameters of diverse imagenet models? In International Conference on Machine Learning, pages 17243-17259. PMLR." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 674, + 525, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 674, + 525, + 708 + ], + "spans": [ + { + "bbox": [ + 304, + 674, + 525, + 708 + ], + "type": "text", + "content": "Alex Krizhevsky, Geoffrey Hinton, and 1 others. 2009. Learning multiple layers of features from tiny images." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 719, + 525, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 719, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 719, + 525, + 774 + ], + "type": "text", + "content": "Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. 2023. Blip-2: Bootstrapping language-image pretraining with frozen image encoders and large language models. In International conference on machine learning, pages 19730-19742. PMLR." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 781, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 781, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 781, + 312, + 791 + ], + "type": "text", + "content": "19794" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 774 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 291, + 128 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 291, + 128 + ], + "type": "text", + "content": "Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. 2022. Blip: Bootstrapping language-image pretraining for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 137, + 290, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 137, + 290, + 193 + ], + "spans": [ + { + "bbox": [ + 69, + 137, + 290, + 193 + ], + "type": "text", + "content": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. 2014. Microsoft coco: Common objects in context. In European Conference on Computer Vision, pages 740-755. Springer." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 201, + 289, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 201, + 289, + 235 + ], + "spans": [ + { + "bbox": [ + 69, + 201, + 289, + 235 + ], + "type": "text", + "content": "Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. 2024. Visual instruction tuning. Advances in Neural Information Processing Systems, 36." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 244, + 290, + 299 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 244, + 290, + 299 + ], + "spans": [ + { + "bbox": [ + 69, + 244, + 290, + 299 + ], + "type": "text", + "content": "Rabeeh Karimi Mahabadi, Sebastian Ruder, Mostafa Dehghani, and James Henderson. 2021. Parameter-efficient multi-task fine-tuning for transformers via shared hypernetworks. arXiv preprint arXiv:2106.04489." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 309, + 290, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 309, + 290, + 364 + ], + "spans": [ + { + "bbox": [ + 69, + 309, + 290, + 364 + ], + "type": "text", + "content": "Anas Mahmoud, Mostafa Elhoushi, Amro Abbas, Yu Yang, Newsha Ardalani, Hugh Leather, and Ari Morcos. 2024. Sieve: Multimodal dataset pruning using image captioning models. Preprint, arXiv:2310.02110." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 374, + 290, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 374, + 290, + 439 + ], + "spans": [ + { + "bbox": [ + 69, + 374, + 290, + 439 + ], + "type": "text", + "content": "Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. 2019. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 3195-3204." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 449, + 289, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 449, + 289, + 483 + ], + "spans": [ + { + "bbox": [ + 69, + 449, + 289, + 483 + ], + "type": "text", + "content": "Jack Merullo, Louis Castricato, Carsten Eickhoff, and Ellie Pavlick. 2022. Linearly mapping from image to text space. arXiv preprint arXiv:2209.15162." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 492, + 290, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 492, + 290, + 558 + ], + "spans": [ + { + "bbox": [ + 69, + 492, + 290, + 558 + ], + "type": "text", + "content": "M Minderer, A Gritsenko, A Stone, M Neumann, D Weissenborn, A Dosovitskiy, A Mahendran, A Arnab, M Dehghani, Z Shen, and 1 others. Simple open-vocabulary object detection with vision transformers. arxiv 2022. arXiv preprint arXiv:2205.06230." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 568, + 290, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 568, + 290, + 602 + ], + "spans": [ + { + "bbox": [ + 69, + 568, + 290, + 602 + ], + "type": "text", + "content": "Aaron van den Oord, Yazhe Li, and Oriol Vinyals. 2018. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 611, + 290, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 611, + 290, + 655 + ], + "spans": [ + { + "bbox": [ + 69, + 611, + 290, + 655 + ], + "type": "text", + "content": "Jose Javier Gonzalez Ortiz, John Guttag, and Adrian Dalca. 2023. Magnitude invariant parametrizations improve hypernetwork learning. arXiv preprint arXiv:2304.07645." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 665, + 290, + 740 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 665, + 290, + 740 + ], + "spans": [ + { + "bbox": [ + 69, + 665, + 290, + 740 + ], + "type": "text", + "content": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, and 1 others. 2019. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 751, + 290, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 751, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 751, + 290, + 774 + ], + "type": "text", + "content": "Jason Phang, Yi Mao, Pengcheng He, and Weizhu Chen. 2023. Hypertuning: Toward adapting large" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 773 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 314, + 72, + 525, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 525, + 105 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 525, + 105 + ], + "type": "text", + "content": "language models without back-propagation. In International Conference on Machine Learning, pages 27854-27875. PMLR." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 115, + 525, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 115, + 525, + 191 + ], + "spans": [ + { + "bbox": [ + 304, + 115, + 525, + 191 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, and 1 others. 2021. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 201, + 525, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 201, + 525, + 246 + ], + "spans": [ + { + "bbox": [ + 304, + 201, + 525, + 246 + ], + "type": "text", + "content": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, and 1 others. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 255, + 525, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 255, + 525, + 300 + ], + "spans": [ + { + "bbox": [ + 304, + 255, + 525, + 300 + ], + "type": "text", + "content": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. 2022. Hierarchical text-conditional image generation with clip latents, 2022. arXiv preprint arXiv:2204.06125." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 309, + 525, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 309, + 525, + 364 + ], + "spans": [ + { + "bbox": [ + 304, + 309, + 525, + 364 + ], + "type": "text", + "content": "Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 374, + 525, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 374, + 525, + 439 + ], + "spans": [ + { + "bbox": [ + 304, + 374, + 525, + 439 + ], + "type": "text", + "content": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. 2022. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10684-10695." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 449, + 525, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 525, + 503 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 525, + 503 + ], + "type": "text", + "content": "Elan Rosenfeld, Preetum Nakkiran, Hadi Pouransari, Oncel Tuzel, and Fartash Faghri. 2022. Ape: Aligning pretrained encoders to quickly learn aligned multimodal representations. arXiv preprint arXiv:2210.03927." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 514, + 525, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 514, + 525, + 547 + ], + "spans": [ + { + "bbox": [ + 304, + 514, + 525, + 547 + ], + "type": "text", + "content": "Jürgen Schmidhuber. 1992. Learning to control fast-weight memories: An alternative to dynamic recurrent networks. Neural Computation, 4(1):131-139." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 556, + 525, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 556, + 525, + 634 + ], + "spans": [ + { + "bbox": [ + 304, + 556, + 525, + 634 + ], + "type": "text", + "content": "Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, and 1 others. 2022. Laion-5b: An open large-scale dataset for training next generation image-text models. Advances in Neural Information Processing Systems, 35:25278-25294." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 643, + 525, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 643, + 525, + 708 + ], + "spans": [ + { + "bbox": [ + 304, + 643, + 525, + 708 + ], + "type": "text", + "content": "Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. 2021. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 719, + 525, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 719, + 525, + 773 + ], + "spans": [ + { + "bbox": [ + 304, + 719, + 525, + 773 + ], + "type": "text", + "content": "Sheng Shen, Liunian Harold Li, Hao Tan, Mohit Bansal, Anna Rohrbach, Kai-Wei Chang, Zhewei Yao, and Kurt Keutzer. 2021. How much can clip benefit vision-and-language tasks? arXiv preprint arXiv:2107.06383." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19795" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 773 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "type": "text", + "content": "Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 2022. Flava: A foundational language and vision alignment model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15638-15650." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 147, + 290, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 147, + 290, + 202 + ], + "spans": [ + { + "bbox": [ + 69, + 147, + 290, + 202 + ], + "type": "text", + "content": "Amanpreet Singh, Vivek Natarjan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. 2019. Towards vqa models that can read. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8317-8326." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 210, + 289, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 210, + 289, + 254 + ], + "spans": [ + { + "bbox": [ + 69, + 210, + 289, + 254 + ], + "type": "text", + "content": "Jaisidh Singh, Ishaan Shrivastava, Mayank Vatsa, Richa Singh, and Aparna Bharati. 2024. Learn\" no\" to say\" yes\" better: Improving vision-language models via negations. arXiv preprint arXiv:2403.20312." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 263, + 290, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 263, + 290, + 306 + ], + "spans": [ + { + "bbox": [ + 69, + 263, + 290, + 306 + ], + "type": "text", + "content": "Haoyu Song, Li Dong, Wei-Nan Zhang, Ting Liu, and Furu Wei. 2022. Clip models are few-shot learners: Empirical studies on vqa and visual entailment. Preprint, arXiv:2203.07190." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 316, + 290, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 316, + 290, + 370 + ], + "spans": [ + { + "bbox": [ + 69, + 316, + 290, + 370 + ], + "type": "text", + "content": "Bart Thomee, David A Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. 2016. Yfcc100m: The new data in multimedia research. Communications of the ACM, (2):64-73." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 379, + 290, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 379, + 290, + 444 + ], + "spans": [ + { + "bbox": [ + 69, + 379, + 290, + 444 + ], + "type": "text", + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, and 1 others. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 454, + 290, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 454, + 290, + 509 + ], + "spans": [ + { + "bbox": [ + 69, + 454, + 290, + 509 + ], + "type": "text", + "content": "Zhengzhong Tu, Hossein Talebi, Han Zhang, Feng Yang, Peyman Milanfar, Alan Bovik, and Yinxiao Li. 2022. Maxvit: Multi-axis vision transformer. In European conference on computer vision, pages 459-479. Springer." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 517, + 290, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 517, + 290, + 593 + ], + "spans": [ + { + "bbox": [ + 69, + 517, + 290, + 593 + ], + "type": "text", + "content": "Junke Wang, Dongdong Chen, Zuxuan Wu, Chong Luo, Luowei Zhou, Yucheng Zhao, Yujia Xie, Ce Liu, YuGang Jiang, and Lu Yuan. 2022. Omnivl: One foundation model for image-language and video-language tasks. In Advances in Neural Information Processing Systems, volume 35, pages 5696-5710. Curran Associates, Inc." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 602, + 290, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 602, + 290, + 636 + ], + "spans": [ + { + "bbox": [ + 69, + 602, + 290, + 636 + ], + "type": "text", + "content": "Ross Wightman. 2019. Pytorch image models. https://github.com/rwrightman/pytorch-image-models." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 644, + 290, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 644, + 290, + 732 + ], + "spans": [ + { + "bbox": [ + 69, + 644, + 290, + 732 + ], + "type": "text", + "content": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, and 3 others. 2020. Huggingface's transformers: State-of-the-art natural language processing. Preprint, arXiv:1910.03771." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 740, + 290, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 740, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 740, + 290, + 773 + ], + "type": "text", + "content": "Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. 2023. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 297 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 314, + 72, + 524, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 524, + 94 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 524, + 94 + ], + "type": "text", + "content": "International Conference on Computer Vision, pages 11975-11986." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 103, + 525, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 103, + 525, + 168 + ], + "spans": [ + { + "bbox": [ + 304, + 103, + 525, + 168 + ], + "type": "text", + "content": "Wenqiao Zhang, Tianwei Lin, Jiang Liu, Fangxun Shu, Haoyuan Li, Lei Zhang, He Wanggui, Hao Zhou, Zheqi Lv, Hao Jiang, and 1 others. 2024. Hyperllava: Dynamic visual and language expert tuning for multimodal large language models. arXiv preprint arXiv:2403.13447." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 178, + 525, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 178, + 525, + 243 + ], + "spans": [ + { + "bbox": [ + 304, + 178, + 525, + 243 + ], + "type": "text", + "content": "Zhengkun Zhang, Wenya Guo, Xiaojun Meng, Yasheng Wang, Yadao Wang, Xin Jiang, Qun Liu, and Zhenglu Yang. 2022. Hyperpelt: Unified parameter-efficient language model tuning for both language and vision-and-language tasks. arXiv preprint arXiv:2203.03878." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 252, + 525, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 252, + 525, + 297 + ], + "spans": [ + { + "bbox": [ + 304, + 252, + 525, + 297 + ], + "type": "text", + "content": "Xun Zhu, Zheng Zhang, Xi Chen, Yiming Shi, Miao Li, and Ji Wu. 2025. Connector-s: A survey of connectors in multi-modal large language models. arXiv preprint arXiv:2502.11453." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 781, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 781, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 781, + 312, + 791 + ], + "type": "text", + "content": "19796" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 143, + 69, + 243, + 88 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 69, + 243, + 88 + ], + "spans": [ + { + "bbox": [ + 143, + 69, + 243, + 88 + ], + "type": "text", + "content": "APPENDIX" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 99, + 246, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 99, + 246, + 127 + ], + "spans": [ + { + "bbox": [ + 68, + 99, + 246, + 127 + ], + "type": "text", + "content": "A HYMA for Multi-modal Large Language Models (MLLMs)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 136, + 291, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 136, + 291, + 298 + ], + "spans": [ + { + "bbox": [ + 67, + 136, + 291, + 298 + ], + "type": "text", + "content": "Another avenue for employing a predictive model for stitching can be MLLMs, which is significantly different from the VLMs case. Not only is the causal language modeling objective different from the contrastive scheme of VLMs, the connector stitches output image encoder representations to LLM input representations. In VLMs, the connector strictly stitches output representations, i.e., features produced by the text encoder are stitched to the space of image encoder features. We are interested in investigating how HYMA responds to this setting via the following experiments." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 308, + 192, + 322 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 308, + 192, + 322 + ], + "spans": [ + { + "bbox": [ + 67, + 308, + 192, + 322 + ], + "type": "text", + "content": "A.1 " + }, + { + "bbox": [ + 67, + 308, + 192, + 322 + ], + "type": "inline_equation", + "content": "\\mathbf{MLP}_1\\mid \\mathbf{N}\\times \\mathbf{M} = 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "spans": [ + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "text", + "content": "We stitch " + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "inline_equation", + "content": "N = 1" + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "text", + "content": " image encoder (ViT-S) with " + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "inline_equation", + "content": "M = 3" + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "text", + "content": " LLMs (GPT-2 (Radford et al., 2019), Pythia-160M (Biderman et al., 2023), Qwen-200M (Bai et al., 2023)) using a 2-layer MLP " + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "inline_equation", + "content": "(\\mathrm{MLP}_1)" + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "text", + "content": " as the connector. Figure 5 shows the performance of HYMA in comparison to the Grid Search and Best Guess baselines respectively. We report the performance of the best connectors identified by each search method, with the FLOPs incurred via training. We find that HYMA reduces the cost of searching of all combinations, bringing it lower than training only one connector for the " + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "inline_equation", + "content": "N\\times M = 3" + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "text", + "content": " case. The efficiency of HYMA over the two comparative baselines at the final state (rightmost point in each plot) is " + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "inline_equation", + "content": "3\\times" + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "text", + "content": " w.r.t. Grid Search and " + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "inline_equation", + "content": "1.3\\times" + }, + { + "bbox": [ + 67, + 327, + 291, + 612 + ], + "type": "text", + "content": " w.r.t. Best Guess. Further, all search methods yield comparable optimal perplexities, 51.1 for HYMA and 51.0 for Grid Search (or Best Guess) on MSCOCO. On Flickr-8K, the perplexities are found to be 72.4 and 70.4 for HYMA and Grid Search (or Best Guess) respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 621, + 262, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 621, + 262, + 634 + ], + "spans": [ + { + "bbox": [ + 67, + 621, + 262, + 634 + ], + "type": "text", + "content": "A.2 Linear, " + }, + { + "bbox": [ + 67, + 621, + 262, + 634 + ], + "type": "inline_equation", + "content": "\\mathbf{MLP}_1,\\mathbf{MLP}_2\\mid \\mathbf{N}\\times \\mathbf{M} = 9" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "type": "text", + "content": "We scale up our experimental setting to now use " + }, + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "type": "inline_equation", + "content": "N = 3" + }, + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "type": "text", + "content": " image encoders (Clip-ViT-B, DeiT3-B, ViT-S) and " + }, + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "type": "inline_equation", + "content": "M = 3" + }, + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "type": "text", + "content": " LLMs (GPT-2, Pythia-160M, Qwen-200M). Similar to the case for VLMs, we vary the complexity of the connector from a linear layer, to an MLP with 2 hidden layers. Evaluation is done similarly to the case with " + }, + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "type": "inline_equation", + "content": "N \\times M = 3" + }, + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "type": "text", + "content": " MLLM combinations, i.e., via image captioning on MSCOCO and Flickr-8K. As shown in Table 5. HYMA struggles to match true ranking of model pairs for the" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 71, + 527, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 527, + 288 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 527, + 288 + ], + "type": "text", + "content": "MLLM case. Specifically, it performs worse on connectors of lower complexity, and consistently under-performs in terms of validation perplexity. Careful observation shows that for " + }, + { + "bbox": [ + 302, + 71, + 527, + 288 + ], + "type": "inline_equation", + "content": "N \\times M = 9" + }, + { + "bbox": [ + 302, + 71, + 527, + 288 + ], + "type": "text", + "content": " MLLMs, the ranking of connectors predicted by HYMA follows a trend of uni-modal model performance (the best image encoder (Clip-ViT-B) and LLM (Qwen-200M) show the best performance. However, independent stitching does not show such behavior. Overall, connectors obtained via independent stitching outperform those obtained from HYMA by a significant margin, and the true ranking diverges notably from that predicted by HYMA. Investigations on disentangling the effects of the causal modeling loss and the change in stitched representation spaces is left as future work." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 298, + 420, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 298, + 420, + 312 + ], + "spans": [ + { + "bbox": [ + 303, + 298, + 420, + 312 + ], + "type": "text", + "content": "B Pretrained models" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 305, + 363, + 525, + 448 + ], + "blocks": [ + { + "bbox": [ + 302, + 321, + 525, + 346 + ], + "lines": [ + { + "bbox": [ + 302, + 321, + 525, + 346 + ], + "spans": [ + { + "bbox": [ + 302, + 321, + 525, + 346 + ], + "type": "text", + "content": "B.1 Image encoders (source: timm (Wightman, 2019))" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 305, + 363, + 525, + 448 + ], + "lines": [ + { + "bbox": [ + 305, + 363, + 525, + 448 + ], + "spans": [ + { + "bbox": [ + 305, + 363, + 525, + 448 + ], + "type": "table", + "html": "
Feature Dim.ModelShorthandParam. count (M)timm specifier
384ViT-SVS22.05vit_small_batch16_224.augreg_in21k_ft_in1k
DeiT-SDS22.05deit_small_batch16_224.fb_in1k
DeiT-3SD3S22.06deit3_small_batch16_224.fb_in1k
768ViT-BVB86.57vit_base_batch16_224.augreg_in21k_ft_in1k
DeiT-BDB86.57deit_base_batch16_224.fb_in1k
DeiT3-BD3B86.88deit3_base_batch16_224.fb_in22k_ft_in1k
Clip-viT-BCVB86.86vit_base_batch1_clip_224.laion2b_ft_in12k_in1k
1024ViT-LVL304.33vit_large_batch16_224.augreg_in21k_ft_in1k
Eva2-LE2L305.08eva02_large_batch14_448.mim_m38m_ft_in22k_in1k
DeiT3-LD3L304.37deit3_large_batch16_224.fb_in22k_ft_in1k
", + "image_path": "5cba6bfc859ef058d87329667ebd57b505269931c3ac17ae01308c32377255e1.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 305, + 544, + 525, + 606 + ], + "blocks": [ + { + "bbox": [ + 302, + 454, + 525, + 491 + ], + "lines": [ + { + "bbox": [ + 302, + 454, + 525, + 491 + ], + "spans": [ + { + "bbox": [ + 302, + 454, + 525, + 491 + ], + "type": "text", + "content": "Table 6: All pretrained image encoders used in our work are given above, along with their shorthand IDs that may be referred to in the main manuscript." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 302, + 502, + 481, + 530 + ], + "lines": [ + { + "bbox": [ + 302, + 502, + 481, + 530 + ], + "spans": [ + { + "bbox": [ + 302, + 502, + 481, + 530 + ], + "type": "text", + "content": "B.2 Text encoders & LLMs (source: huggingface (Wolf et al., 2020))" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 305, + 544, + 525, + 606 + ], + "lines": [ + { + "bbox": [ + 305, + 544, + 525, + 606 + ], + "spans": [ + { + "bbox": [ + 305, + 544, + 525, + 606 + ], + "type": "table", + "html": "
Feature Dim.ModelShorthandParam. count(M)huggingface specifier
384minilm-LmLL33.4sentence-transformers/all-MiniLM-L12-v2
768mpnet-BmpB109sentence-transformer/all-mynet-base-v2
1024roberta-LroL355Msentence-transformer/all-roberta-large-v1
768GPT-2g2137openai-community/gpt2
Pythia-160Mpy213EleutherAI/pythia-160m
Qwen-200Mqw203MiniLLM/MiniPLM-Qwen-200M
", + "image_path": "51e7ad63393193474db2144cb122534afd011d7f6c56124b44e3556e74f26166.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 614, + 525, + 650 + ], + "lines": [ + { + "bbox": [ + 302, + 614, + 525, + 650 + ], + "spans": [ + { + "bbox": [ + 302, + 614, + 525, + 650 + ], + "type": "text", + "content": "Table 7: All pretrained text encoders and LLMs used in our work are given above, along with their shorthand IDs that may be referred to in the main manuscript." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 671, + 455, + 686 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 671, + 455, + 686 + ], + "spans": [ + { + "bbox": [ + 302, + 671, + 455, + 686 + ], + "type": "text", + "content": "C Designing the Model Zoo" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "type": "text", + "content": "While our empirical analysis suggests that models with larger parametric capacity or higher embedding dimensionality generally perform better after stitching, a natural question arises: why include smaller models in the model zoo at all? We justify their inclusion based on the following:" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19797" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 92, + 71, + 295, + 232 + ], + "blocks": [ + { + "bbox": [ + 92, + 71, + 295, + 232 + ], + "lines": [ + { + "bbox": [ + 92, + 71, + 295, + 232 + ], + "spans": [ + { + "bbox": [ + 92, + 71, + 295, + 232 + ], + "type": "image", + "image_path": "d5d13700476e418cbfdd431bb5f59da92b7c82226fcdf84dc9ad535c687a584c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 243, + 525, + 269 + ], + "lines": [ + { + "bbox": [ + 67, + 243, + 525, + 269 + ], + "spans": [ + { + "bbox": [ + 67, + 243, + 525, + 269 + ], + "type": "text", + "content": "Figure 5: Evaluation of HYMA for MLLMs, on MSCOCO and Flickr-8K (" + }, + { + "bbox": [ + 67, + 243, + 525, + 269 + ], + "type": "inline_equation", + "content": "N = 1" + }, + { + "bbox": [ + 67, + 243, + 525, + 269 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 243, + 525, + 269 + ], + "type": "inline_equation", + "content": "M = 3" + }, + { + "bbox": [ + 67, + 243, + 525, + 269 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 243, + 525, + 269 + ], + "type": "inline_equation", + "content": "B_{m} = 1" + }, + { + "bbox": [ + 67, + 243, + 525, + 269 + ], + "type": "text", + "content": "). We report the model combination exhibiting the best final performance for each evaluation benchmark and search method." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 299, + 70, + 502, + 232 + ], + "blocks": [ + { + "bbox": [ + 299, + 70, + 502, + 232 + ], + "lines": [ + { + "bbox": [ + 299, + 70, + 502, + 232 + ], + "spans": [ + { + "bbox": [ + 299, + 70, + 502, + 232 + ], + "type": "image", + "image_path": "27b2fac4ecf2a2e335d945d982082b59b16c63e5ff7f4750f4101b809c4cf931.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 69, + 279, + 525, + 380 + ], + "blocks": [ + { + "bbox": [ + 69, + 279, + 525, + 380 + ], + "lines": [ + { + "bbox": [ + 69, + 279, + 525, + 380 + ], + "spans": [ + { + "bbox": [ + 69, + 279, + 525, + 380 + ], + "type": "table", + "html": "
DatasetConnectorNDCG @ k (↑)ρ (↑)N×M=9ΔPerplexity (↓)
k=5k=7k=9Rand. (n=5)UniT-1Ask-LLMOracle (GS)
MSCOCOLinear0.160.420.74-0.6+3.68+6.85+3.20+6.85
MLP10.650.790.890.35+0.65+2.2+2.20+3.5
MLP20.610.740.850.39+1.13+3.58+3.58+4.01
Flickr-8KLinear0.560.730.850.12+1.65+5.54-1.46+5.54
MLP10.770.820.900.45-0.1+1.30-1.30+4.5
MLP20.580.720.830.23-3.57-0.00-0.00-0.00
", + "image_path": "d7d924d3c68504556728b5168e5a71873a31dddeccabfb761334493da51388c7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 389, + 525, + 461 + ], + "lines": [ + { + "bbox": [ + 66, + 389, + 525, + 461 + ], + "spans": [ + { + "bbox": [ + 66, + 389, + 525, + 461 + ], + "type": "text", + "content": "Table 5: HYMA MLLM Results: We report the ranking similarity between HYMA and the Oracle—Grid Search (GS)—using NDCG and Spearman's " + }, + { + "bbox": [ + 66, + 389, + 525, + 461 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 66, + 389, + 525, + 461 + ], + "type": "text", + "content": ". Across all three connector configurations, HYMA exhibits strong correlation with GS rankings. Additionally, we show the perplexity difference " + }, + { + "bbox": [ + 66, + 389, + 525, + 461 + ], + "type": "inline_equation", + "content": "(\\Delta)" + }, + { + "bbox": [ + 66, + 389, + 525, + 461 + ], + "type": "text", + "content": " of the best connector obtained post stitching via HYMA, compared to four baselines: (a) Random: random pairing and stitching (avg. over 5 runs); (b) UniT-1: stitching the best unimodal models; (c) Ask-LLM: model pairs picked by Claude 4 Sonnet; and (d) Oracle: Full grid search over all " + }, + { + "bbox": [ + 66, + 389, + 525, + 461 + ], + "type": "inline_equation", + "content": "N\\times M = 9" + }, + { + "bbox": [ + 66, + 389, + 525, + 461 + ], + "type": "text", + "content": " configurations." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 76, + 482, + 291, + 775 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 77, + 482, + 291, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 482, + 291, + 630 + ], + "spans": [ + { + "bbox": [ + 77, + 482, + 291, + 630 + ], + "type": "text", + "content": "1. First, including smaller models enables the construction of multi-modal models across a range of parametric capacities, which is crucial for deployment under varying computational or resource constraints. For example, an organization aiming to deploy multi-modal models at multiple scales would incur significantly higher training costs if relying on independent training for each configuration. In contrast, HYMA offers a substantially more cost-effective alternative." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 640, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 640, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 76, + 640, + 291, + 775 + ], + "type": "text", + "content": "2. Second, our empirical observations indicate that larger models are not always the best-performing choice when stitched into multimodal pairs. This motivates the inclusion of a diverse set of model configurations in our zoo to better explore the multi-modal design space. By covering a broader range of capacity combinations, HYMA facilitates a more comprehensive and efficient search, supported by observations from Figure 1 and Table 1." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 481, + 520, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 481, + 520, + 496 + ], + "spans": [ + { + "bbox": [ + 302, + 481, + 520, + 496 + ], + "type": "text", + "content": "D Training and hyper-parameter details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 507, + 526, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 507, + 526, + 669 + ], + "spans": [ + { + "bbox": [ + 302, + 507, + 526, + 669 + ], + "type": "text", + "content": "We tune hyperparameters for each trained model to maximize (i) validation performance, (ii) GPU utilization, and (iii) training stability. Our goal is to demonstrate that hypernetworks can efficiently approach the M-OPS problem that often requires a large amount of computational resources. Hence, we emphasize on the need to have maximum GPU utilization in order to present an efficiency-oriented method. We report the hyperparameters used for training connectors for VLMs along with the configuration for HYMA. We use 3 random seeds and report average performance in each experiment." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 680, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 680, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 680, + 526, + 775 + ], + "type": "text", + "content": "VLMs. Training individual connectors between VLMs uses hyperparameters that provides the best performance after 10 epochs of training. Our hyperparameter choice is similar to that of (Rosenfeld et al., 2022). Specifically, we use a batch size of " + }, + { + "bbox": [ + 302, + 680, + 526, + 775 + ], + "type": "inline_equation", + "content": "2^{14}" + }, + { + "bbox": [ + 302, + 680, + 526, + 775 + ], + "type": "text", + "content": ", the Adam optimizer, and a learning rate of " + }, + { + "bbox": [ + 302, + 680, + 526, + 775 + ], + "type": "inline_equation", + "content": "1e - 2" + }, + { + "bbox": [ + 302, + 680, + 526, + 775 + ], + "type": "text", + "content": " subject to a schedule that linearly warms" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19798" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 90, + 83, + 512, + 368 + ], + "blocks": [ + { + "bbox": [ + 90, + 83, + 512, + 368 + ], + "lines": [ + { + "bbox": [ + 90, + 83, + 512, + 368 + ], + "spans": [ + { + "bbox": [ + 90, + 83, + 512, + 368 + ], + "type": "text", + "content": "def train_hypernet(hypernet, data_iter, models_iter, optimizer, num_steps): hypernet.train() for step in range(num_steps): # first sample (image, caption) data with batch size B_d data_batch = next(data_iter) # then subsample the full NxM space of models with batch size B_m model_batch = next(model_batch) optimizer.zero_grad() # input to the hypernetwork are indices or ids of the respective pairs vlm_ids_in_full_zoo = get.ids_wrt_full_zoo(model_batch) # hypernet outputs parameters of the stitches between the pairs generated.params = hypernet(vlm_pair.ids) # mapped the data through the stitched model pairs # and compute multi-pair multi-modal loss loss = hypernet.forward_datasthrough( data_batch, generated.params, model_batch ) # back-propagate loss.backup() optimizer.step()" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 88, + 390, + 503, + 402 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 390, + 503, + 402 + ], + "spans": [ + { + "bbox": [ + 88, + 390, + 503, + 402 + ], + "type": "text", + "content": "Figure 6: PyTorch (Paszke et al., 2019) pseudocode for HYMA training procedure on " + }, + { + "bbox": [ + 88, + 390, + 503, + 402 + ], + "type": "inline_equation", + "content": "N \\times M" + }, + { + "bbox": [ + 88, + 390, + 503, + 402 + ], + "type": "text", + "content": " models." + } + ] + } + ], + "index": 1, + "type": "text" + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "spans": [ + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "text", + "content": "up the learning rate from 0 for 50 steps. After that, the learning rate is decayed to 0 following a cosine curve. Training HYMA for VLMs is quite sensitive to hyperparameters, as is to be expected from a complex network that outputs large spaces especially considering how it does so using indirectly (using layer-specific embeddings). The optimal batch size, i.e., that ensures the most stable training is " + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "inline_equation", + "content": "2^{9}" + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "text", + "content": ", and the learning rate is set to " + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "inline_equation", + "content": "1e - 2" + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "text", + "content": " for the Adam optimizer. As mentioned in the main manuscript, the value of the model batch size " + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "inline_equation", + "content": "B_{m}" + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "text", + "content": " affect the training strongly, hence we set it to 1 when " + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "inline_equation", + "content": "N\\times M = 3" + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "text", + "content": " and 9 when " + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "inline_equation", + "content": "N\\times M = 27" + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "text", + "content": ". For AutoPair, " + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "inline_equation", + "content": "N\\times = 12" + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "inline_equation", + "content": "B_{m} = 4" + }, + { + "bbox": [ + 67, + 423, + 291, + 613 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 619, + 291, + 741 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 619, + 291, + 741 + ], + "spans": [ + { + "bbox": [ + 67, + 619, + 291, + 741 + ], + "type": "text", + "content": "MLLMs. For MLLMs, we follow recipes given in (Jia et al., 2024) for training only the connector (referred to as the feature alignment phase of pretraining). Particularly, we use Adam with batch size of 64 for training individual connectors and learning rate " + }, + { + "bbox": [ + 67, + 619, + 291, + 741 + ], + "type": "inline_equation", + "content": "1e - 3" + }, + { + "bbox": [ + 67, + 619, + 291, + 741 + ], + "type": "text", + "content": ". This is subject to a schedule of warmup ratio " + }, + { + "bbox": [ + 67, + 619, + 291, + 741 + ], + "type": "inline_equation", + "content": "3e - 2" + }, + { + "bbox": [ + 67, + 619, + 291, + 741 + ], + "type": "text", + "content": " following a cosine decay to 0. The batch size training HYMA for MLLMs is 32 and the learning rate is " + }, + { + "bbox": [ + 67, + 619, + 291, + 741 + ], + "type": "inline_equation", + "content": "1e - 3" + }, + { + "bbox": [ + 67, + 619, + 291, + 741 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "type": "text", + "content": "Architectural experiments. For VLMs, we tried using a compression of the image encoder features" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 324, + 421, + 505, + 476 + ], + "blocks": [ + { + "bbox": [ + 324, + 421, + 505, + 476 + ], + "lines": [ + { + "bbox": [ + 324, + 421, + 505, + 476 + ], + "spans": [ + { + "bbox": [ + 324, + 421, + 505, + 476 + ], + "type": "table", + "html": "
ArchitectureIN-1K Top-1 accuracy
HYMA27.46
HYMAEC12.11
", + "image_path": "08810941962790947d25ebc5b112ed0dba964579e05596807fb39e69dc15e1a8.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 484, + 526, + 509 + ], + "lines": [ + { + "bbox": [ + 302, + 484, + 526, + 509 + ], + "spans": [ + { + "bbox": [ + 302, + 484, + 526, + 509 + ], + "type": "text", + "content": "Table 8: HYMA performs significantly better downstream in comparison to " + }, + { + "bbox": [ + 302, + 484, + 526, + 509 + ], + "type": "inline_equation", + "content": "\\mathrm{HYMA_{EC}}" + }, + { + "bbox": [ + 302, + 484, + 526, + 509 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 533, + 526, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 533, + 526, + 696 + ], + "spans": [ + { + "bbox": [ + 302, + 533, + 526, + 696 + ], + "type": "text", + "content": "as the conditional input to the hypernetwork, while keeping all other components the same. Only the learnable code-book is replaced by a learnt compression of batch-averaged image encoder features. This configuration, denoted as " + }, + { + "bbox": [ + 302, + 533, + 526, + 696 + ], + "type": "inline_equation", + "content": "\\mathrm{HYMA}_{\\mathrm{EC}}" + }, + { + "bbox": [ + 302, + 533, + 526, + 696 + ], + "type": "text", + "content": " yielded lower performance than our default methodology HYMA. Specifically for the " + }, + { + "bbox": [ + 302, + 533, + 526, + 696 + ], + "type": "inline_equation", + "content": "N\\times M = 3" + }, + { + "bbox": [ + 302, + 533, + 526, + 696 + ], + "type": "text", + "content": " case, for multi-modal image classification on ImageNet-1K, we find that the top-1 accuracy of the best model pair given by HYMA is superior to that given by " + }, + { + "bbox": [ + 302, + 533, + 526, + 696 + ], + "type": "inline_equation", + "content": "\\mathrm{HYMA}_{\\mathrm{EC}}" + }, + { + "bbox": [ + 302, + 533, + 526, + 696 + ], + "type": "text", + "content": " shown in Table 8. Figure 6 provides an example pseudo-code depicting our training setup." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 723, + 458, + 737 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 723, + 458, + 737 + ], + "spans": [ + { + "bbox": [ + 302, + 723, + 458, + 737 + ], + "type": "text", + "content": "E Factors impacting FLOPs" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 748, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 748, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 748, + 526, + 775 + ], + "type": "text", + "content": "While the numbers of parameters in the model being trained is no doubt a factor that is linearly pro" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19799" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 66, + 71, + 293, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 71, + 293, + 396 + ], + "spans": [ + { + "bbox": [ + 66, + 71, + 293, + 396 + ], + "type": "text", + "content": "portional to the total FLOPs incurred, we note that there are other factors like hyperparameters as well. For loss functions that relate linearly with the batch size, batch size has no effect on the total number of FLOPs incurred after the entire training run, as the model takes fewer update steps on a bigger batch size, but proportionately more on a smaller one. However, for loss functions that scale quadratically with the number of data samples observed, such as the InfoNCE loss (Oord et al., 2018), the value of batch size can significantly affect the FLOP count. This, after the primary design choice of iteratively loading models, which decreases the number of samples shown to a model by " + }, + { + "bbox": [ + 66, + 71, + 293, + 396 + ], + "type": "inline_equation", + "content": "N \\times M / B_{m}" + }, + { + "bbox": [ + 66, + 71, + 293, + 396 + ], + "type": "text", + "content": ", accounts for why HYMA, that training a large hypernetwork (of an average of " + }, + { + "bbox": [ + 66, + 71, + 293, + 396 + ], + "type": "inline_equation", + "content": "500 \\times" + }, + { + "bbox": [ + 66, + 71, + 293, + 396 + ], + "type": "text", + "content": " more parameters than the connector) is efficient, particularly for VLMs. For the case of MLLMs, the reasons become our design choice of iterative model batches, as well as the fact that certain LLMs are of a larger parametric capacity than others. Hence backpropagating the gradient through them into the connector for a total of " + }, + { + "bbox": [ + 66, + 71, + 293, + 396 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 66, + 71, + 293, + 396 + ], + "type": "text", + "content": " steps is more expensive than doing so for " + }, + { + "bbox": [ + 66, + 71, + 293, + 396 + ], + "type": "inline_equation", + "content": "\\mathcal{T} / (N \\times M / B_{m})" + }, + { + "bbox": [ + 66, + 71, + 293, + 396 + ], + "type": "text", + "content": " steps via HYMA." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 70, + 443, + 290, + 501 + ], + "blocks": [ + { + "bbox": [ + 68, + 408, + 235, + 423 + ], + "lines": [ + { + "bbox": [ + 68, + 408, + 235, + 423 + ], + "spans": [ + { + "bbox": [ + 68, + 408, + 235, + 423 + ], + "type": "text", + "content": "F Connection to Data Pruning" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 443, + 290, + 501 + ], + "lines": [ + { + "bbox": [ + 70, + 443, + 290, + 501 + ], + "spans": [ + { + "bbox": [ + 70, + 443, + 290, + 501 + ], + "type": "table", + "html": "
MethodBest Model configurationPerf.
C-GSDeiT-3S + miniLM-L24.07
HYMADeiT-3S + miniLM-L27.46
", + "image_path": "04f51fe6016be2aca8ce5b5faff151cdc2197a38b3dc6ade6cec1871f2b87cda.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 67, + 509, + 291, + 594 + ], + "lines": [ + { + "bbox": [ + 67, + 509, + 291, + 594 + ], + "spans": [ + { + "bbox": [ + 67, + 509, + 291, + 594 + ], + "type": "text", + "content": "Table 9: HYMA vs. Constrained Grid Search (C-GS). For the setting " + }, + { + "bbox": [ + 67, + 509, + 291, + 594 + ], + "type": "inline_equation", + "content": "N \\times M = 3" + }, + { + "bbox": [ + 67, + 509, + 291, + 594 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 509, + 291, + 594 + ], + "type": "inline_equation", + "content": "B_{m} = 1" + }, + { + "bbox": [ + 67, + 509, + 291, + 594 + ], + "type": "text", + "content": ", we constrain the total data available to Grid Search to one-third, aligning it with HYMA's data budget. While this constraint results in a comparable reduction in FLOPs relative to full Grid Search, it leads to a notable drop in performance. Perf. denotes MIC top-1 accuracy on ImageNet-1K." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 2 + }, + { + "bbox": [ + 66, + 612, + 291, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 612, + 291, + 747 + ], + "spans": [ + { + "bbox": [ + 66, + 612, + 291, + 747 + ], + "type": "text", + "content": "While HYMA provides a unified and compute-efficient framework for addressing the M-OPS problem, the primary reduction in FLOPs arises from the dual mini-batching strategy employed during training. This dual mini-batching mechanism results in each model pair configuration being exposed to a smaller subset of data compared to independent stitching, effectively mimicking randomized data pruning in the process of constructing multimodal models from unimodal pairs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "content": "Data pruning and filtering strategies for multimodal training have been extensively explored in" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 301, + 71, + 526, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 71, + 526, + 274 + ], + "spans": [ + { + "bbox": [ + 301, + 71, + 526, + 274 + ], + "type": "text", + "content": "prior work (Fang et al., 2023; Bi et al., 2025; Mahmoud et al., 2024), typically focusing on restricting the training data via heuristic-based selection. In contrast, HYMA adopts a randomized approach: the mini-batching process dynamically selects data for each model configuration, and across multiple training steps, both the data and batch assignments are shuffled. This results in a more uniform and implicit allocation of the dataset across the space of possible model configurations, while still maintaining computational efficiency. It is important to note, however, that this data reduction applies only to each model configuration independently; the hypernetwork " + }, + { + "bbox": [ + 301, + 71, + 526, + 274 + ], + "type": "inline_equation", + "content": "H_{\\phi}" + }, + { + "bbox": [ + 301, + 71, + 526, + 274 + ], + "type": "text", + "content": ", which generates the connector weights, is still trained over the entire dataset." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 275, + 526, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 275, + 526, + 370 + ], + "spans": [ + { + "bbox": [ + 302, + 275, + 526, + 370 + ], + "type": "text", + "content": "This effect is further evident when comparing HYMA to a constrained version of Oracle (Grid Search) (C-GS). As shown in Table 9, when the total data available to Grid Search is limited to one-third—matching HYMA's data budget—the best-performing model identified by C-GS performs significantly worse than HYMA." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 385, + 428, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 385, + 428, + 399 + ], + "spans": [ + { + "bbox": [ + 302, + 385, + 428, + 399 + ], + "type": "text", + "content": "G VQA implementation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 301, + 410, + 526, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 410, + 526, + 518 + ], + "spans": [ + { + "bbox": [ + 301, + 410, + 526, + 518 + ], + "type": "text", + "content": "We follow a methodology similar to the method Question Irrelevant Prompt (QIP) (Shen et al., 2021; Song et al., 2022) that creates a prompt of “QUESTION: {question} ANSWER: {answer}\" for a given image. This prompt is embedded via the text encoder and the task is to match the image to the correct prompt, as an image-text matching objective." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 533, + 495, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 533, + 495, + 547 + ], + "spans": [ + { + "bbox": [ + 302, + 533, + 495, + 547 + ], + "type": "text", + "content": "H Baselines: Ask-LLM (for VLMs)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 301, + 558, + 526, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 558, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 301, + 558, + 526, + 774 + ], + "type": "text", + "content": "We prompt Claude 4 Sonnet (Anthropic) to identify the best model pair where we specify the image encoder metadata from timm (details of the image encoder from the ImageNet-1K results database such as accuracy, parameters, image size for pretraining). The metadata of the text encoder is obtained via huggingface (details of the pretrained text encoder like embedding dimension, parameters). The \"task\" is one among multi-modal image classification, image-text matching, and visual question answering, whereas \"dataset\" is simply the name of the dataset, and \"dataset_meta\" contains the number of samples, classes, questions, and answers, as needed for the dataset. We specify the type of connector (Linear, " + }, + { + "bbox": [ + 301, + 558, + 526, + 774 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_1" + }, + { + "bbox": [ + 301, + 558, + 526, + 774 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 301, + 558, + 526, + 774 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_2" + }, + { + "bbox": [ + 301, + 558, + 526, + 774 + ], + "type": "text", + "content": ") via \"depth\"." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "19800" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 82, + 77, + 276, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 77, + 276, + 253 + ], + "spans": [ + { + "bbox": [ + 82, + 77, + 276, + 253 + ], + "type": "text", + "content": "\"You are an oracle which will predict which combination of image and text encoders will perform best on a given task. The task is to predict which (image encoder, text encoder) pair will yield the best CLIP-like VLM from a list of image encoders and text encoders. More details about this: each pair of encoders will be connected via an MLP of number of hidden layers {depth} (0 means a linear layer), which will be trained to map text embeddings to the image embedding space such that the InfoNCE loss is minimized." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 267, + 276, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 267, + 276, + 335 + ], + "spans": [ + { + "bbox": [ + 82, + 267, + 276, + 335 + ], + "type": "text", + "content": "Your job is NOT TO provide any code or run the experiment. JUST TO PREDICT WHICH PAIR WILL YIELD THE BEST {task} {task_metric} on {dataset} ({dataset_meta data})." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 349, + 276, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 349, + 276, + 389 + ], + "spans": [ + { + "bbox": [ + 83, + 349, + 276, + 389 + ], + "type": "text", + "content": "Here are the image encoders, along with their metadata: {image_encoders_with_meta data}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 402, + 276, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 402, + 276, + 444 + ], + "spans": [ + { + "bbox": [ + 83, + 402, + 276, + 444 + ], + "type": "text", + "content": "Here are the text encoders, along with their metadata: {text_encoders_with_meta data}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 83, + 456, + 277, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 456, + 277, + 524 + ], + "spans": [ + { + "bbox": [ + 83, + 456, + 277, + 524 + ], + "type": "text", + "content": "Please provide your answer in (image Encoder, text Encoder) format ONLY. NO OTHER TEXT SHOULD BE PRODUCED BY YOU EXCEPT THE ANSWER IN THE REQUIRED FORMAT.\"" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 303, + 70, + 501, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 70, + 501, + 84 + ], + "spans": [ + { + "bbox": [ + 303, + 70, + 501, + 84 + ], + "type": "text", + "content": "I Baselines: Ask-LLM (for MLLMs)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 98, + 511, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 98, + 511, + 288 + ], + "spans": [ + { + "bbox": [ + 317, + 98, + 511, + 288 + ], + "type": "text", + "content": "\"You are an oracle which will predict which combination of image encoder and LLM will perform best on image captioning task. The task is to predict which (image encoder, LLM) pair will yield the best GPT4-like MLLM from a list of image encoders and LLMs. More details about this: each pair will be connected via an MLP of number of hidden layers {depth} (0 means a linear layer), which will be trained to map patch-wise image encoder outputs to the input embedding space of LLM such that the causal language modeling loss is minimized." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 301, + 511, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 301, + 511, + 370 + ], + "spans": [ + { + "bbox": [ + 317, + 301, + 511, + 370 + ], + "type": "text", + "content": "Your job is NOT TO provide any code or run the experiment. JUST TO PREDICT WHICH PAIR WILL YIELD THE BEST {task} {task_metric} on {dataset} ({dataset_meta data})." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 318, + 383, + 511, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 383, + 511, + 424 + ], + "spans": [ + { + "bbox": [ + 318, + 383, + 511, + 424 + ], + "type": "text", + "content": "Here are the image encoders, along with their metadata: {image_encoders_with_metadata}" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 318, + 438, + 511, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 438, + 511, + 465 + ], + "spans": [ + { + "bbox": [ + 318, + 438, + 511, + 465 + ], + "type": "text", + "content": "Here are the LLMs, along with their metadata: {llms_with_metaData}" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 318, + 478, + 511, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 478, + 511, + 544 + ], + "spans": [ + { + "bbox": [ + 318, + 478, + 511, + 544 + ], + "type": "text", + "content": "Please provide your answer in (image Encoder, llm) format ONLY. NO OTHER TEXT SHOULD BE PRODUCED BY YOU EXCEPT THE ANSWER IN THE REQUIRED FORMAT.\"" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 560, + 526, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 560, + 526, + 615 + ], + "spans": [ + { + "bbox": [ + 302, + 560, + 526, + 615 + ], + "type": "text", + "content": "We specify image encoder details as done for VLMs, but LLMs details are obtained from huggingface (parameters, embedding dimension, context length)." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 311, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 311, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 311, + 791 + ], + "type": "text", + "content": "19801" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file