diff --git a/.gitattributes b/.gitattributes index 806514b0eb7b9a8077b3f33f1569389fcc16d42d..dd1d180962a11e8a9d28952651b0773b6680ab04 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1276,3 +1276,11 @@ data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_origin.pdf data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06201/43b7dceb-7067-4bc0-81f9-4f968ea096bb_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06225/48347d42-40fb-4979-b798-617f024e9b22_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06256/805981bf-d643-4b2e-955e-6bcd5ca89984_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_content_list.json b/data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f54f686fd942273d587a569ccb7b5ad5f6c7aca8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_content_list.json @@ -0,0 +1,1748 @@ +[ + { + "type": "text", + "text": "ViTaMIn: Learning Contact-Rich Tasks Through Robot-Free Visuo-Tactile Manipulation Interface", + "text_level": 1, + "bbox": [ + 148, + 87, + 851, + 137 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fangchen Liu\\*,2, Chuanyu Li\\*,1, Yihua Qin\\*, Jing Xu\\*, Pieter Abbeel\\*, Rui Chen\\*,1", + "bbox": [ + 181, + 157, + 807, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Tsinghua University, $^{2}$ University of California, Berkeley", + "bbox": [ + 285, + 175, + 712, + 191 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "* Equal contribution, † Corresponding author", + "bbox": [ + 357, + 193, + 635, + 208 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://chuanyune.github.io/ViTaMIN_page", + "bbox": [ + 279, + 210, + 714, + 226 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2b3f5d9d3857ac4bc064a25dd8f846478edc8d3d4acf76854100e1224e09f2e4.jpg", + "image_caption": [ + "Demonstrations" + ], + "image_footnote": [], + "bbox": [ + 96, + 282, + 218, + 376 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c76f0cbfcc0157a2fd0ea97bdb5758c11c6fb5d419f1af561dacdd15498d9a0f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 236, + 282, + 341, + 375 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a7c742b59dc541a88a3273a380335073e194e7444064c0f55bc20ace7ec82882.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 354, + 282, + 473, + 376 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4d0acbed64c8854765c2e9c80aa8c7e2abb13d31d7b57777b70dad4d3e6a981a.jpg", + "image_caption": [ + "Real-World Tasks" + ], + "image_footnote": [], + "bbox": [ + 486, + 282, + 611, + 376 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c67f8d30bed7d5e78cd491b955ffbcc6ad2890cd3244a3c6ba5862292c5ec665.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 622, + 282, + 756, + 376 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3c482c5b0658f8f048d2ebe3c95889ab774372793412480b02727eafd0de414c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 767, + 282, + 903, + 377 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/56817bcfa892e233432daadca6888f4cef9f11efeeb525983604173fded63e17.jpg", + "image_caption": [ + "Fig. 1: ViTaMIn overview. Our system comprises a portable data collection device that integrates visual and tactile sensing, a multimodal representation learning framework for fusing visual and tactile information, and demonstrations of various contact-rich manipulation tasks. This system facilitates efficient collection of manipulation data without requiring complex robot setups. (*Backgrounds in the images are blurred.)" + ], + "image_footnote": [], + "bbox": [ + 88, + 380, + 452, + 545 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d3b473412a83550b31125af282bc865a7484b0a11b3fe4b684aa09dfa0912134.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 468, + 383, + 625, + 542 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/bafd32d27c34e33981964ae485e4dfba8fcf84c249727c41a5a866d6121787e8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 627, + 383, + 764, + 542 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4bd4ba61306b1059468cbd4655f3e43c112065002e75360f7ec6cebdc5cb4ee4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 767, + 383, + 903, + 542 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—Tactile information plays a crucial role for humans and robots to interact effectively with their environment, particularly for tasks requiring the understanding of contact properties. Solving such dexterous manipulation tasks often relies on imitation learning from demonstration datasets, which are typically collected via teleoperation systems and often demand substantial time and effort. To address these challenges, we present ViTaMIn, an embodiment-free manipulation interface that integrates visual and tactile sensing into a hand-held gripper, enabling multi-modality data collection without the need for teleoperation. Our design employs a compliant Fin Ray gripper with tactile sensing, allowing operators to perceive force feedback during manipulation for more intuitive operation. Additionally, we propose a multi-modal representation learning strategy to obtain pre-trained tactile representations, improving data efficiency and policy robustness. Experiments on 5 contact-rich manipulation tasks demonstrate that our system is more scalable, efficient, and effective than baseline methods.", + "bbox": [ + 81, + 630, + 488, + 857 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 218, + 872, + 352, + 886 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Humans rely on both visual and tactile modalities to perform a diverse range of manipulation tasks in daily", + "bbox": [ + 81, + 896, + 488, + 926 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "life. For instance, when inserting a plug into a socket or tightening a screw, vision helps with identifying and aligning components, while tactile signals enable precise force control during contact. This seamless integration of vision and touch enhances human dexterity, particularly in tasks that require contact-rich control, handling visual occlusions, or performing in-hand manipulations.", + "bbox": [ + 504, + 628, + 913, + 734 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent progress in learning from demonstrations [1], [2], [3], [4] has shown significant potential for advancing general-purpose robots, enabling them to efficiently acquire complex skills from human demonstrations. Consequently, developing systems to collect high-quality demonstration data has been a recent key focus. Prior works have explored real-world data collection methods, including joint-mapped devices and exoskeletons [5], [6], [7], [8], and vision-based teleoperation frameworks [9], [10]. Nevertheless, these techniques require real-time teleoperation of a physical robot during data collection, which constrains efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14] present", + "bbox": [ + 504, + 744, + 913, + 926 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06156v2 [cs.RO] 1 Sep 2025", + "bbox": [ + 22, + 282, + 60, + 710 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "a more scalable and cost-effective alternative to collect demonstration without teleoperation. Moreover, they can be seamlessly integrated into various embodiments, providing a more flexible data collection approach. However, these portable devices primarily focus on capturing vision-only demonstration data, limiting their usage for contact-rich and dexterous manipulation tasks where tactile feedback plays a crucial role.", + "bbox": [ + 81, + 65, + 488, + 184 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we aim to address both the challenge of efficient data collection and the need for learning more dexterous tasks using visuo-tactile demonstrations. To this end, we introduce ViTaMIn, a novel and effective visuotactile manipulation interface designed to capture high-quality demonstrations with enhanced efficiency and flexibility. Unlike conventional approaches that rely on rigid tactile sensors, ViTaMIn leverages an omnidirectional compliant Fin Ray gripper with customized tactile sensing, which can detect contact from all directions as an expressive tactile signal for robot manipulation. We integrate the tactile-aware Fin Ray gripper [15] with UMI [14], enhancing the collected data with rich multimodal information and improving policy learning performance while maintaining the core advantages of portable devices. Additionally, our system enables operators to perceive force feedback during manipulation, facilitating more intuitive and seamless operation.", + "bbox": [ + 81, + 186, + 488, + 443 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Pre-trained visual representations have shown improved performance in robotic manipulation [16], [17], [18], [19], [20], benefiting from large-scale visual pre-training. To fully leverage the visuo-tactile datasets collected with ViTaMIn, we adopt a multimodal representation learning strategy to pre-train tactile representations, enhancing the robustness and generalizability of our sensor-based policies. Our pretraining objective integrates masked autoencoding [21] and contrastive learning for multimodal alignment [22], where future image observations are aligned with masked current images and tactile signals. Through extensive experiments on five challenging contact-rich manipulation tasks, our visuotactile policy, enhanced by multimodal pre-training, exhibits superior data and training efficiency while demonstrating strong generalization across diverse objects and environmental conditions.", + "bbox": [ + 81, + 443, + 488, + 684 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In conclusion, our contributions are:", + "bbox": [ + 99, + 686, + 346, + 699 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- ViTaMIn provides a portable and scalable visuo-tactile data collection system.", + "- ViTaMIn proposes an effective multimodal representation learning strategy, which significantly improves the data efficiency, robustness and generalization capabilities.", + "- ViTaMIn achieves superior performance over vision-only baselines across five manipulation tasks by leveraging visuo-tactile demonstrations." + ], + "bbox": [ + 99, + 703, + 486, + 838 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "II. RELATED WORK", + "text_level": 1, + "bbox": [ + 212, + 842, + 357, + 854 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A. Visuo-Tactile Manipulation", + "text_level": 1, + "bbox": [ + 81, + 862, + 290, + 876 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Tactile sensing is essential for robotic manipulation as it provides signals about physical contact in addition to visual observation. Early works [23], [24], [25] use RGB cameras", + "bbox": [ + 81, + 881, + 488, + 926 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "and force/torque sensors to infer contact status for making decisions. However, the information from force/torque sensors is low-dimensional and insufficient for more dexterous manipulation tasks.", + "bbox": [ + 504, + 65, + 911, + 125 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "More recently, vision-based tactile sensors have gained attention for their ability to capture high-resolution contact information [26], [27], [28]. Despite these advances, the rigid design of these sensors restricts the compliance of the end effector, where alternative approaches like uncalibrated tactile skins [29] and plug-and-play sensing systems [30] have improved adaptability and flexibility. In our work, we use a Fin-Ray-shaped compliant and all-directional tactile sensor, which can detect contacts from all directions and also support safe and robust contact-rich manipulation.", + "bbox": [ + 504, + 127, + 911, + 279 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "B. Data Collection System for Robot Manipulation", + "text_level": 1, + "bbox": [ + 506, + 297, + 854, + 313 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent advancements in learning from demonstrations [1], [2], [3], [4] have shown promising results in developing general-purpose robots. Therefore, efficiently collecting high-quality demonstrations has become a key research focus.", + "bbox": [ + 504, + 321, + 911, + 395 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently works have focused on efficient real-world data collection systems, such as devices or exoskeletons with joint-mapping [5], [6], [7], exoskeletons [8], or vision-based systems [9], [10]. However, these approaches require a physical robot during data collection, which limits efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14], [31], [32] offer several advantages: they are low-cost, flexible, and do not depend on a specific physical robot. Additionally, they can be seamlessly integrated into various embodiments and provide a more user-friendly experience for data collection. We extend the UMI data collection system [14] by integrating tactile sensing, which enriches the demonstrations with multimodal information, improving policy learning performance while preserving the key benefits of portable devices.", + "bbox": [ + 504, + 398, + 913, + 625 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "C. Multimodal Pre-training for Robotics", + "text_level": 1, + "bbox": [ + 506, + 643, + 784, + 657 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Pre-trained visual representations have shown improved performance and generalization in robotic manipulation [16], [17], [18], [19], [20] with self-supervised learning techniques [21], [22]. This can be extended to multimodal representation learning [33], [34], [35] by integrating visual, tactile, and proprioceptive modalities, allowing robots to perceive object properties beyond visual appearance.", + "bbox": [ + 504, + 667, + 911, + 773 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Aligning heterogeneous sensory modalities is a key challenge in multimodal learning, as different sensors have varying data structures, sampling rates, and noise characteristics [36]. Inspired by CLIP [22], researchers have developed contrastive learning techniques to align tactile and visual representations for manipulation tasks [37], [38].", + "bbox": [ + 504, + 773, + 911, + 864 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our work extends these efforts by introducing masked contrastive pre-training, where the tactile encoder learns to reconstruct future occluded visual information, further enhancing multimodal understanding.", + "bbox": [ + 504, + 866, + 911, + 926 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/eaeb262e8e8de2cc9c2c5f6bd946acaa4ad560a3e6122d16fbd8e4f0a08cfc1a.jpg", + "image_caption": [ + "Fig. 2: ViTaMIn's hardware system overview. The handheld device integrates a GoPro camera, two tactile sensors and a synchronization camera to align visual and tactile information. During data collection, the two tactile sensors and the synchronization camera are connected to the Raspberry Pi in the backbox. The total weight of the gripper is approximately $1960\\mathrm{g}$ . Left: Side view of the ViTaMIn system. Right: Top view of the ViTaMIn system with the backbox cover removed." + ], + "image_footnote": [], + "bbox": [ + 94, + 61, + 519, + 364 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/4dc08d4271758a4aabed3f5e31b55c1d21ebc7da0ae86c7b523043f7db6cbe93.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 529, + 64, + 901, + 363 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "III. VISUO-TACTILE MANIPULATION INTERFACE", + "text_level": 1, + "bbox": [ + 112, + 445, + 460, + 460 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A. System Overview", + "text_level": 1, + "bbox": [ + 81, + 467, + 225, + 482 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We design a handheld gripper to collect visuo-tactile demonstrations without requiring teleoperation on physical robots. Our gripper design is illustrated in Figure 2. The gripper consists of an RGB fisheye wrist camera (GoPro 10) for image observation, two AllTact finger [15], a synchronization camera for observation temporal alignment, and a Raspberry Pi 5 with a battery for data recording.", + "bbox": [ + 81, + 487, + 490, + 593 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Image Observation To capture comprehensive visual information, we employ a GoPro 10 camera with a $155^{\\circ}$ field-of-view (FoV) fisheye lens. The camera operates at 60 FPS with a resolution of $2704 \\times 2028$ pixels and is mounted at the end-effector of our ViTaMIn to ensure consistent visual coverage of the manipulation workspace during demonstration collection and policy deployment.", + "bbox": [ + 81, + 594, + 488, + 698 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Tactile Observation In UMI [14], two TPU-printed Fin Ray grippers are used to provide compliance and enhance grasping stability. However, these grippers lack tactile sensing capabilities. In our ViTaMIn, we employ AllTact [15], a compliant Fin Ray gripper with omnidirectional tactile sensing ability. During manipulation, the embedded camera in AllTact captures both the global deformation of the entire finger and the local deformation of the contact surface as a single image. The tactile sensor operates at 30 FPS with a resolution of $640 \\times 480$ pixels.", + "bbox": [ + 81, + 699, + 488, + 849 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Other Observations To enhance the robustness and accuracy of SLAM, we utilize the IMU data provided by the GoPro, which is synchronized with the visual observations. Gripper width is also critical for precise manipulation. Following UMI [14], we attach two ArUco markers to the", + "bbox": [ + 81, + 851, + 490, + 926 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "gripper's fingers and compute the gripper width from the visual observations.", + "bbox": [ + 504, + 445, + 911, + 474 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "B. Data Processing", + "text_level": 1, + "bbox": [ + 506, + 484, + 643, + 498 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Sensor Synchronization To synchronize the tactile sensors and GoPro camera, we use an additional low-cost camera which is connected to the Raspberry Pi and is naturally synchronized with the tactile sensors. Before data collection, both the GoPro and the synchronization camera simultaneously capture a sequence of ArUco markers displayed on a computer screen. The ArUco IDs are detected in both video streams, and when an identical ID appears in both, the corresponding timestamps are used for synchronization. Since the framereates of the GoPro and the synchronization camera are $60\\mathrm{Hz}$ and $30\\mathrm{Hz}$ respectively, the temporal alignment error is below $1/60 + 1/30 = 0.05$ seconds, which is sufficient for our tasks. Once the two videos are synchronized, they are cropped by the starting and ending signals triggered by the control button.", + "bbox": [ + 504, + 503, + 913, + 728 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Collection and Filtering We adopt a similar data collection pipeline to UMI [14]. We also utilize Simultaneous Localization and Mapping (SLAM) to capture the end-effector trajectories. While SLAM may fail in low-texture environments, it achieves a success rate of approximately $80\\%$ in our tasks, allowing the majority of collected data to be used for imitation learning.", + "bbox": [ + 504, + 729, + 913, + 835 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "IV. VISUO-TACTILE POLICY LEARNING", + "text_level": 1, + "bbox": [ + 566, + 843, + 852, + 857 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A. Visuo-Tactile Representation Learning", + "text_level": 1, + "bbox": [ + 504, + 862, + 790, + 877 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "UMI uses a pre-trained CLIP [22] encoder to extract visual representations. However, the tactile images in ViTaMIn are very different from the CLIP's training distribution, which", + "bbox": [ + 504, + 881, + 913, + 926 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "can lead to suboptimal representation. To tackle this, we pretrain an effective tactile encoder using the collected action-free datasets, which doesn't rely on the SLAM success.", + "bbox": [ + 81, + 65, + 488, + 111 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Taking the tactile image in Figure 3 as an example, we want the encoder to capture the essential contact properties, such as the object's in-hand pose and gripper's deformation. These signals are complementary information from pixel observations, and are crucial for making future decisions.", + "bbox": [ + 81, + 112, + 488, + 186 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To achieve this, we employ a multimodal contrastive learning approach as illustrated in Figure 3. Given the current masked image $\\tilde{I}_V^k$ and current full tactile observation $I_T^k$ of step $k$ , we want the combination of $\\tilde{I}_V^k$ and $I_T^k$ align with the future full image observation $I_V^{k + 1}$ in the CLIP embedding space. The intuition behind this is to make the tactile encoder focus on the contact information to predict future images based on the current corrupted image.", + "bbox": [ + 81, + 188, + 490, + 309 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a9d59bd91c7d90305bf0363e93a7675127b8a317c02f04e3257aa1fddbcebcc7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 101, + 333, + 465, + 579 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/dfac30919f736ceede8d6fefd2d847d22d809cb8f4e923239b4620efb4776ebf.jpg", + "image_caption": [ + "Fig. 3: The illustration of the multimodal contrastive representation pre-training phase. The tactile encoder is trained to capture complementary information to predict the missing content for the future image." + ], + "image_footnote": [], + "bbox": [ + 148, + 583, + 416, + 785 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To ensure stable training, we freeze the image CLIP encoder $\\phi_V(\\cdot)$ but only fine-tune the tactile encoder $\\phi_T(\\cdot)$ . We first obtain the tactile embedding $T_{k}$ from $\\phi_T(I_T^k)$ , and $V_{k}$ from $\\phi_V(\\tilde{I}_V^k)$ . These embeddings are concatenated and", + "bbox": [ + 81, + 864, + 490, + 926 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "passed through a fully connected projection layer, mapping them back to the original 512-dimensional CLIP embedding space as a fused feature $F_{k}$ . Finally, we train the tactile encoder using the standard CLIP loss on $F_{k}$ and $V_{k + 1}$ :", + "bbox": [ + 504, + 65, + 911, + 127 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {C L I P}} = \\frac {1}{2} \\left(\\mathcal {L} _ {\\mathrm {f - v}} + \\mathcal {L} _ {\\mathrm {v - f}}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 627, + 131, + 911, + 160 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where", + "bbox": [ + 506, + 164, + 552, + 176 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {v - f}} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp \\left(\\cos \\left(V _ {i + 1} , F _ {i}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(V _ {i + 1} , F _ {j}\\right) / \\tau\\right)} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 544, + 181, + 911, + 224 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {f - v}} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp \\left(\\cos \\left(F _ {i} , V _ {i + 1}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(F _ {i} , V _ {j + 1}\\right) / \\tau\\right)} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 544, + 237, + 911, + 279 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "here $\\tau$ is a learnable temperature parameter.", + "bbox": [ + 504, + 282, + 807, + 297 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Different from [39], where they directly apply the CLIP loss on the time-aligned visuo-tactile images, we instead fuse the tactile observation with a masked current image to predict the future image. We make this choice for two main reasons. First, in [39], the tactile representation is conditioned on proprioceptive states, which are unavailable in our dataset before the success of SLAM. Second, since different tasks may have varying images but similar tactile observations, fusing a masked current image helps the network learn a more expressive tactile representation. Without sufficient masking, the alignment becomes trivial.", + "bbox": [ + 504, + 297, + 913, + 464 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After pre-training, we train a Diffusion Policy [4] on the SLAM-filtered data. Following [4], we use a U-Net [40] as the noise prediction network and apply DDIM [41] to accelerate the inference for action prediction.", + "bbox": [ + 504, + 464, + 913, + 525 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/66ceb8edcaeb309670260252767cb93432455bba815e9dc4a4ca645ef94a855b.jpg", + "image_caption": [ + "V. EXPERIMENTS", + "Fig. 4: Hardware setup for policy deployment." + ], + "image_footnote": [], + "bbox": [ + 513, + 561, + 908, + 672 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A. Experimental Setup", + "text_level": 1, + "bbox": [ + 504, + 710, + 665, + 726 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Hardware Figure 4 shows the policy deployment setup. Our system consists of a Rokae xMate ER3PRO robotic arm equipped with a PGI-140-80-W-S parallel gripper. The 7-DOF robotic arm provides flexible manipulation capabilities, while the gripper features an 8cm stroke range from fully open to closed position. The system is implemented using ROS Noetic on Ubuntu 20.04. The control loop operates at $10\\mathrm{Hz}$ , with separate threads handling robot control, visual and tactile sensing. The system architecture is designed to minimize latency while maintaining reliable real-time performance.", + "bbox": [ + 504, + 729, + 913, + 895 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Similar to UMI [14], our system compensates for various sources of latency in the perception-action loop through", + "bbox": [ + 504, + 896, + 913, + 926 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "predictive buffering and timestamp-based synchronization between visual and tactile feedback streams. The policy generates 16 consecutive trajectories at each inference step, with 10 trajectories being executed based on our temporal compensation strategy.", + "bbox": [ + 81, + 65, + 486, + 141 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Manipulation Tasks As shown in Figure 5, we propose diverse contact-rich manipulation tasks to evaluate the effectiveness of ViTaMIn. These tasks are specifically crafted to demonstrate the following key capabilities: (1) Robust pick-and-place of diverse objects, including fragile and small objects; (2) Dexterous manipulation, such as in-hand reorientation; (3) Task success determination, allowing the robot to repeat attempts until success; (4) Dynamic and precise manipulation.", + "bbox": [ + 81, + 141, + 486, + 277 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We design the following 5 manipulation tasks:", + "bbox": [ + 99, + 277, + 416, + 292 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Orange Placement: Put a fragile orange from a randomized position to a randomized plate.", + "- Dynamic Peg Insertion: Grasp a peg and approach a hole, which is moving at a constant speed of $10\\mathrm{mm / s}$ . And precisely insert the peg to the hole.", + "- Test Tube Reorientation: Grasp a transparent test tube from a shelf and adjust its pose through extrinsic dexterity based on tactile feedback.", + "- Scissor Hanging: Grasp a pair of scissors and hang them on a hook. Adjust the pose and keep attempting until it succeeds.", + "- Dual-Arm Knife Pulling: The left arm first grasps a knife from a cup, orients it horizontally. The right arm grasps and pulls it out with a constrained prismatic motion. This task requires tactile feedback to grasp the thin object and perform the correct pulling motion." + ], + "bbox": [ + 99, + 291, + 486, + 530 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/eae2bd4dd37d3b7a1249a136a2e4e36453a4e27aed461598963d0c97dc63fade.jpg", + "table_caption": [ + "TABLE I: Data Collection Statistics for Different Tasks" + ], + "table_footnote": [ + "*Valid data refers to demonstrations with successful SLAM tracking" + ], + "table_body": "
TaskRaw DataValid Data*Avg. Length
Orange Placement8773435
Dynamic Peg Insertion201141321
Test Tube Reorientation150125619
Scissor Hanging172137642
Knife Pulling (Left)188131403
Knife Pulling (Right)180134254
", + "bbox": [ + 89, + 569, + 483, + 671 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table I shows the statistics of the demonstration data. We collect demonstrations for both single-arm and dual-arm manipulation tasks. For single-arm tasks, we gather between 87 and 172 raw demonstrations per task according to the task difficulty, with successful SLAM tracking achieved in approximately $80\\%$ of the trajectories. The dual-arm knife pulling task requires coordinated motion between both arms, with similar data collection volumes but slightly different average demonstration lengths for left and right arm movements.", + "bbox": [ + 81, + 699, + 486, + 848 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We compare our approach against the following methods: (1) Vision: the policy only takes visual observation from the GoPro camera, which is encoded by the pre-trained CLIP model (identical to the original UMI [14] paper); (2) Ours w/o Pre-training: This baseline simply concatenate visual and", + "bbox": [ + 81, + 849, + 486, + 926 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "tactile observations after separate CLIP ViT-B/16 encoders, and fine-tuned with behavior cloning.", + "bbox": [ + 504, + 65, + 911, + 95 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/2a3bf65b454c923e9b5bb949a3116a5dfef20d4712dfc9369a08f9361b171127.jpg", + "table_caption": [], + "table_footnote": [ + "TABLE II: Comparisons on 5 tasks with baselines. Our approach improves the performance on 5 tasks through multimodal sensing and pre-training." + ], + "table_body": "
TaskVisionw/o Pre-trainingOurs
Single-Arm Tasks
Orange placement0.850.91
Test Tube Reorientation0.40.70.9
Scissor Hanging0.10.450.7
Dynamic Peg Insertion0.450.80.9
Dual-Arm Task
Knife Pulling0.60.80.9
", + "bbox": [ + 531, + 111, + 888, + 239 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The results are presented in Table II. For each task, we conduct 20 trials with randomized initial conditions and report the average performance. The vision-only policy performs the worst across all five tasks, particularly in contact-rich tasks like test tube reorientation and scissor hanging, where tactile feedback is crucial for success. Across all tasks, pre-training enhances the performance, highlighting the importance of learning effective tactile representations.", + "bbox": [ + 504, + 299, + 911, + 419 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "B. Failure Analysis", + "text_level": 1, + "bbox": [ + 506, + 438, + 640, + 453 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the Orange placement task, the robot picks up an orange from a random position within a $50\\mathrm{cm} \\times 50\\mathrm{cm}$ workspace and places it on a plate. Failures stem from table collisions, unstable placement, or motion planning errors despite correct object detection. In Dynamic peg insertion, the robot inserts a grasped peg into a moving hole. Vision-only methods often fail due to imprecise localization and alignment.", + "bbox": [ + 504, + 460, + 911, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Test tube reorientation, the robot must pick up a tube from a random rack location and reorient it vertically, with success defined by less than $10^{\\circ}$ orientation error. Failures include rack collisions, over-lifting, and incorrect final orientation. Scissor hanging requires picking up scissors and hanging them on a narrow hook, where common issues include misdetection, misalignment, and failure to release. In Knife pulling, a dual-arm policy reorients the knife with one arm while the other pulls it out of a holder. Failures often result from poor coordination, weak grasps, or incomplete pulling. Overall, vision-only policies struggle with contact-rich tasks, highlighting the limitations of unimodal sensing.", + "bbox": [ + 504, + 568, + 911, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "C. Compliant Articulated Object Manipulation", + "text_level": 1, + "bbox": [ + 506, + 767, + 825, + 782 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To demonstrate the compliance capabilities of ViTaMIn, we designed a compliant-controlled articulated object manipulation task. The robotic arm needs to grasp a handle (connected to a force gauge) and rotate it 90 degrees to open a switch. During the rotation process, the arm must minimize axial forces to ensure smooth operation. We conduct 10 experiments for each condition and calculate the average forces. The results show that ViTaMIn achieves significantly lower average forces compared to using pure vision as input.", + "bbox": [ + 504, + 789, + 911, + 926 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/ff5d1182fc87c0d6043cdc51c2604c67d7dd26e1c42f06dddaec7cbdb5b6fff2.jpg", + "image_caption": [ + "Task 1. Orange Placement" + ], + "image_footnote": [], + "bbox": [ + 99, + 85, + 202, + 183 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/512cac75dfa1a461ecf945a565d55ee173bc79056728365d8bbabcdff20497f1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 207, + 88, + 308, + 183 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8ffaab5c8e792fcf6faad355dd789f84bac2ae3b63606a4eac5401f023e77b6c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 313, + 88, + 415, + 183 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/145b0ff91ccc913134917d84f27ec9288b5cc0e7a4e6ccca1027d09a1eb5522f.jpg", + "image_caption": [ + "Task 2. Dynamic Peg Insertion" + ], + "image_footnote": [], + "bbox": [ + 419, + 87, + 542, + 183 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c1639d459b6280e0d616c0b61ca5027d7312dc27193311d49fc82c533e5e3614.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 542, + 87, + 653, + 183 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ed9be295452bb2b609707999c0d7ce53274abf084feefa571723224f2e442fef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 653, + 87, + 776, + 183 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5a72a662adc1c1ba0bfd167d4f4af69842d450e5c116e4daa0ea7c7387c99b10.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 777, + 87, + 898, + 183 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2eb0d57179fb5c021a773de17ac4443e984ccc352e0dc3e5d824297b87a58824.jpg", + "image_caption": [ + "Task 3. Test Tube Reorientation", + "Stage I" + ], + "image_footnote": [], + "bbox": [ + 99, + 196, + 254, + 277 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d9d86998bcb7355813c2ec3771bc9be86562ca597b9726d312f20d51db3d0713.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 259, + 196, + 413, + 277 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ad52e3e1fffe97ce097f5acd4e97f9d17c9f5a5940fed40ac9f7275aebb29b3d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 421, + 196, + 575, + 294 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6e0490d1099f2e64b741b9dd1f95e5ae865168537ab2ca60ee6fd37e533eacdb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 196, + 735, + 294 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/41e27ceecac4c9235a249ac029abee0e7fc30124d187cd6077bf037a65e93fd4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 743, + 196, + 898, + 294 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/00f85838005136fdca15b5fe4bb78ee82f7340dea6f3e5a9b2e65bd76936a94c.jpg", + "image_caption": [ + "Task 4. Scissor Hanging" + ], + "image_footnote": [], + "bbox": [ + 99, + 309, + 254, + 388 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/67d2c9e7967010da05736086d3a0fca8814cf40da4a222bb5e6737e56f406e1e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 259, + 309, + 413, + 405 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7445085cbc517fd3cd93fbb3a2bd9f6db8580e6c84d599414a68d9405529f3b0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 421, + 309, + 575, + 405 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5e5f3b7ca4ef1ce5b7a8ef47b005c756ed1fc850e06dd280623fc0528eb1a89d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 309, + 735, + 405 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6fd39913d482519aa7b6f7a9a91a5fd878297b9f18bd5d9df7c2afe47a5f641f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 743, + 309, + 897, + 405 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/99c0a32a6a7ff267400458289cc0fbf487ba3fbe191ce416aad8bac7243d1355.jpg", + "image_caption": [ + "Task 5. Knife Pulling (Bimanual)", + "Fig. 5: We test ViTaMIn on 5 contact-rich manipulation tasks, including precise and dynamic insertion, object hanging with multimodal feedback, and transparent in-hand object manipulation." + ], + "image_footnote": [], + "bbox": [ + 99, + 422, + 254, + 518 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3ce4769ee6b1bde42a17eee61d58d48bb5431619637f3963972110f5eafc4433.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 261, + 422, + 413, + 518 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d26514e671a1cabe35615727660c95426ee9c01df609ba34f6407ddd70a97fc4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 421, + 422, + 575, + 518 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/45234ed6e963ca64aacca0aeebac163393943ee8f94523b029c03b09faa1b450.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 422, + 735, + 518 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b83fcb747acca716d74ef5c58839df6114300388b8d0e6ee2f936782a0e64c43.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 743, + 422, + 897, + 518 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ce63d3a7c6dc3449fc08f0a14ed53567368fbb562685332477dc26bd0e8072a3.jpg", + "image_caption": [ + "Fig. 6: The robot needs to flip open a switch (fixed to a force gauge) by rotating it 90 degrees. During the rotation, the robot must minimize axial forces to ensure smooth operation." + ], + "image_footnote": [], + "bbox": [ + 98, + 592, + 282, + 714 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c6980fdc3266252190f984cdc73c9a2bab1431c731bc03e678d86b4b54eeb2be.jpg", + "image_caption": [ + "Maximum Force Comparison: Vision vs. Ours" + ], + "image_footnote": [], + "bbox": [ + 287, + 604, + 460, + 712 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/94c8d59ccbd0cb0de5a5b3206f5de9360ecd718d47d6d0d3ca249decbf1ffc98.jpg", + "image_caption": [ + "Novel Objects", + "Fig. 7: Showcase of novel objects and different lighting in the generalization tasks. The right columns demonstrate colored flashlight/high-power/normal lighting conditions." + ], + "image_footnote": [], + "bbox": [ + 516, + 593, + 658, + 713 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c7ca6a14e745ecd4ed221682030a13fc964ba2820d452880f11c7800ce40073a.jpg", + "image_caption": [ + "Different Lighting" + ], + "image_footnote": [], + "bbox": [ + 661, + 593, + 898, + 713 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "D. Ablation Studies", + "text_level": 1, + "bbox": [ + 83, + 810, + 220, + 824 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "a) Data Efficiency: We evaluate the performance of policies trained on different amounts (25%, 50%, and 100%) of demonstrations. All the models are evaluated in 20 real-world trials with different initializations. For a more in-depth analysis, we calculate the success rates of each stage separately, as illustrated in Figure 8. With the pre-trained", + "bbox": [ + 81, + 835, + 488, + 926 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "tactile representations, our method can achieve consistently higher success rates on all the tasks across different amounts of data, and can even master the task with limited data (25%) for test tube reorientation.", + "b) Training Efficiency: We further evaluate the policies trained with different numbers of epochs to understand its training efficiency under the same evaluation protocol. The" + ], + "bbox": [ + 504, + 816, + 913, + 926 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/827a914e84c52597b4da5d6a8593513b04ba1f5b5f8f15324c3d07e8a040904d.jpg", + "image_caption": [ + "Stage I" + ], + "image_footnote": [], + "bbox": [ + 86, + 90, + 289, + 252 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0d76e53a8c6ce1241acff7eeef8a2fcf95cd1b9821ab719fc786e6b8b40a1ec2.jpg", + "image_caption": [ + "Tube Reorientation" + ], + "image_footnote": [], + "bbox": [ + 295, + 78, + 495, + 252 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a285013001ef5630297fdcd051b65b0ca161561fb3c9bcad3b85e8b0d8170ccc.jpg", + "image_caption": [ + "Stage I", + "Fig. 8: Ablation study on the effect of pre-training on data efficiency. The performance of the policy improves as the quantity of data increases. After pre-training on the action-free, task-ignorant dataset, our method can achieve a high success rate even with limited data (25%)." + ], + "image_footnote": [], + "bbox": [ + 501, + 90, + 702, + 252 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4b5c950d25456db7d2d940404eb103086fe309d96067fca9478d24545376c057.jpg", + "image_caption": [ + "Scissor Hanging", + "Stage II" + ], + "image_footnote": [], + "bbox": [ + 707, + 88, + 908, + 252 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d8dcc28916f7268aa5ffb965d055ef3eb9daf033798758dca22c4625f78d2473.jpg", + "image_caption": [ + "Tube Reorientation", + "Stage I", + "Fig. 9: Ablation study on the effect of pre-training on training efficiency. Policies with pre-training are able to learn to complete the first-stage task at a remarkably early stage of training (within 10 epochs). Additionally, when the policy network is pre-trained, the overall success rates increase more rapidly." + ], + "image_footnote": [], + "bbox": [ + 86, + 343, + 287, + 503 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/20f9fa3b2ed644154b3075e0c925e50cd264ba0e5235b21294c9f2bd1334e309.jpg", + "image_caption": [ + "Stage II" + ], + "image_footnote": [], + "bbox": [ + 294, + 343, + 493, + 503 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bad020bfaf946a16dcc60d68034d145033f7b7475137443f7b26c1c2e7ca1978.jpg", + "image_caption": [ + "Scissor Hanging", + "Stage I" + ], + "image_footnote": [], + "bbox": [ + 500, + 342, + 700, + 503 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3e856f4f3818833cac9099e64bb7f58858c535a342f9000c483c2c4ffb29e705.jpg", + "image_caption": [ + "Stage II" + ], + "image_footnote": [], + "bbox": [ + 707, + 342, + 908, + 503 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "results are illustrated in Figure 9. We also observe consistent task performance improvements with pre-training. The policy can complete the first stage of the task at a remarkably early training stage (within 10 epochs).", + "bbox": [ + 81, + 575, + 488, + 638 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/8105d743b48c767516e10ef93cc71f7fc5122df736e327dea3f051cc7bfb6c47.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TaskMethodOriginalNovel ObjectsDifferent Lighting
Orange PlacementVision0.850.70.55
Ours w/o Pre-training0.90.80.6
Ours1.01.00.85
Scissor HangingVision0.00.00.0
Ours w/o Pre-training0.450.40.4
Ours0.70.70.5
", + "bbox": [ + 84, + 651, + 488, + 765 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "TABLE III: Generalization under different objects and scenes. The results demonstrate that our multi-modal policy is more robust to novel objects and different lighting conditions.", + "bbox": [ + 81, + 770, + 488, + 830 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "E. Generalization Capability", + "text_level": 1, + "bbox": [ + 83, + 861, + 284, + 876 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We also evaluate our policy's generalizability to unseen objects and environments. As shown in Figure 7, beyond the training orange and scissor, we introduce 6 unseen small", + "bbox": [ + 81, + 880, + 488, + 926 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "objects and 3 unseen scissors to assess object generalization. Additionally, we modify lighting conditions by increasing brightness and introducing colored disco ball lighting. Table III presents results on the tasks of orange placement and scissor hanging. Our method with pre-training achieves consistent better performance across various generalization settings.", + "bbox": [ + 504, + 575, + 911, + 681 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "VI. CONCLUSION", + "text_level": 1, + "bbox": [ + 643, + 694, + 774, + 707 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this paper, we present ViTaMIn, a portable visuo-tactile manipulation interface designed for efficiently collecting high-quality demonstrations by capturing both visual and tactile signals. Furthermore, ViTaMIn introduces an effective pre-training strategy that leverages all the collected action-free data to learn a robust and generalizable tactile representation through multimodal contrastive learning. Our approach significantly outperforms vision-only policies across 5 real-world contact-rich manipulation tasks and demonstrates improved data efficiency, robustness, and generalizability with pre-trained visuo-tactile representations.", + "bbox": [ + 504, + 714, + 913, + 880 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our method primarily focuses on fixed-base single-arm and dual-arm tasks with parallel-jaw grippers. While this setup is suitable for a wide range of manipulation tasks,", + "bbox": [ + 504, + 881, + 913, + 926 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "future work could extend our approach to dexterous hands, enabling richer and more versatile manipulation skills that better approximate human-level dexterity.", + "bbox": [ + 81, + 66, + 488, + 111 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 238, + 121, + 334, + 133 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] S. Levine, C. Finn, T. Darrell, and P. Abbeel, \"End-to-end training of deep visuomotor policies,\" Journal of Machine Learning Research, vol. 17, no. 39, pp. 1-40, 2016.", + "[2] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, J. Dabis, C. Finn, K. Gopalakrishnan, K. Hausman, A. Herzog, J. Hsu et al., \"Rt-1: Robotics transformer for real-world control at scale,\" arXiv preprint arXiv:2212.06817, 2022.", + "[3] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, X. Chen, K. Choromanski, T. Ding, D. Driess, A. Dubey, C. Finn et al., \"Rt-2: Vision-language-action models transfer web knowledge to robotic control,\" arXiv preprint arXiv:2307.15818, 2023.", + "[4] C. Chi, S. Feng, Y. Du, Z. Xu, E. Cousineau, B. Burchfiel, and S. Song, \"Diffusion policy: Visuomotor policy learning via action diffusion,\" arXiv preprint arXiv:2303.04137, 2023.", + "[5] J. Aldaco, T. Armstrong, R. Baruch, J. Bingham, S. Chan, K. Draper, D. Dwibedi, C. Finn, P. Florence, S. Goodrich et al., \"Aloha 2: An enhanced low-cost hardware for bimanual teleoperation,\" arXiv preprint arXiv:2405.02292, 2024.", + "[6] Z. Fu, T. Z. Zhao, and C. Finn, \"Mobile aloha: Learning bimanual mobile manipulation with low-cost whole-body teleoperation,\" arXiv preprint arXiv:2401.02117, 2024.", + "[7] T. Z. Zhao, V. Kumar, S. Levine, and C. Finn, “Learning fine-grained bimanual manipulation with low-cost hardware,” arXiv preprint arXiv:2304.13705, 2023.", + "[8] H. Fang, H.-S. Fang, Y. Wang, J. Ren, J. Chen, R. Zhang, W. Wang, and C. Lu, \"Airexo: Low-cost exoskeletons for learning whole-arm manipulation in the wild,\" in 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2024, pp. 15031-15038.", + "[9] X. Cheng, J. Li, S. Yang, G. Yang, and X. Wang, “Open-television: Teleoperation with immersive active visual feedback,” arXiv preprint arXiv:2407.01512, 2024.", + "[10] Y. Qin, W. Yang, B. Huang, K. Van Wyk, H. Su, X. Wang, Y.-W. Chao, and D. Fox, \"Anyteleop: A general vision-based dexterous robot arm-hand teleoperation system,\" arXiv preprint arXiv:2307.04577, 2023.", + "[11] F. Sanches, G. Gao, N. Elangovan, R. V. Godoy, J. Chapman, K. Wang, P. Jarvis, and M. Liarokapis, \"Scalable. intuitive human to robot skill transfer with wearable human machine interfaces: On complex, dexterous tasks,\" in 2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2023, pp. 6318-6325.", + "[12] K. Doshi, Y. Huang, and S. Coros, \"On hand-held grippers and the morphological gap in human manipulation demonstration,\" arXiv preprint arXiv:2311.01832, 2023.", + "[13] N. M. M. Shafiullah, A. Rai, H. Etukuru, Y. Liu, I. Misra, S. Chintala, and L. Pinto, \"On bringing robots home,\" arXiv preprint arXiv:2311.16098, 2023.", + "[14] C. Chi, Z. Xu, C. Pan, E. Cousineau, B. Burchfiel, S. Feng, R. Tedrake, and S. Song, \"Universal manipulation interface: In-the-wild robot teaching without in-the-wild robots,\" arXiv preprint arXiv:2402.10329, 2024.", + "[15] S. Liang, Y. Guan, J. Xu, H. Qian, X. Zhang, D. Wu, W. Ding, and R. Chen, \"Alltact fin ray: A compliant robot gripper with omnidirectional tactile sensing,\" arXiv preprint arXiv:2504.18064, 2025.", + "[16] S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta, “R3m: A universal visual representation for robot manipulation,” in Proceedings of The 6th Conference on Robot Learning (CoRL), vol. 205. PMLR, 2022, pp. 892–909.", + "[17] Y. J. Ma, S. Sodhani, D. Jayaraman, O. Bastani, V. Kumar, and A. Zhang, “VIP: Towards universal visual reward and representation via value-implicit pre-training,” in The Eleventh International Conference on Learning Representations, 2023.", + "[18] T. Xiao, I. Radosavovic, T. Darrell, and J. Malik, “Masked visual pretraining for motor control,” arXiv:2203.06173, 2022.", + "[19] I. Radosavovic, T. Xiao, S. James, P. Abbeel, J. Malik, and T. Darrell, “Real-world robot learning with masked visual pre-training,” in Conference on Robot Learning. PMLR, 2023, pp. 416–426.", + "[20] A. Majumdar, K. Yadav, S. Arnaud, J. Ma, C. Chen, S. Silwal, A. Jain, V.-P. Berges, T. Wu, J. Vakil et al., \"Where are we in the search for an artificial visual cortex for embodied intelligence?\" Advances in Neural Information Processing Systems, vol. 36, pp. 655-677, 2023." + ], + "bbox": [ + 86, + 142, + 488, + 926 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[21] K. He, X. Chen, S. Xie, Y. Li, P. Dollar, and R. Girshick, “Masked autoencoders are scalable vision learners,” in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2022, pp. 16000-16009.", + "[22] A. Radford, J. W. Kim, C. Hallacy, A. Ramesh, G. Goh, S. Agarwal, G. Sastry, A. Askell, P. Mishkin, J. Clark et al., \"Learning transferable visual models from natural language supervision,\" in International conference on machine learning. PMLR, 2021, pp. 8748-8763.", + "[23] K. Hosoda, K. Igarashi, and M. Asada, \"Adaptive hybrid visual servoing/force control in unknown environment,\" in Proceedings of IEEE/RSJ International Conference on Intelligent Robots and Systems. IROS'96, vol. 3. IEEE, 1996, pp. 1097-1103.", + "[24] H. Nakagaki, K. Kitagaki, T. Ogasawara, and H. Tsukune, \"Study of deformation and insertion tasks of a flexible wire,\" in Proceedings of International Conference on Robotics and Automation, vol. 3. IEEE, 1997, pp. 2397-2402.", + "[25] P. Miller and P. Leibowitz, \"Integration of vision, force and tactile sensing for grasping,\" Int. J. Intell. Mach, vol. 4, pp. 129-149, 1999.", + "[26] H. Qi, B. Yi, S. Suresh, M. Lambeta, Y. Ma, R. Calandra, and J. Malik, \"General in-hand object rotation with vision and touch,\" in Conference on Robot Learning. PMLR, 2023, pp. 2549-2564.", + "[27] S. Li, H. Yu, W. Ding, H. Liu, L. Ye, C. Xia, X. Wang, and X.-P. Zhang, “Visual-tactile fusion for transparent object grasping in complex backgrounds,” IEEE Transactions on Robotics, 2023.", + "[28] Y. Han, K. Yu, R. Batra, N. Boyd, C. Mehta, T. Zhao, Y. She, S. Hutchinson, and Y. Zhao, “Learning generalizable vision-tactile robotic grasping strategy for deformable objects via transformer,” IEEE/ASME Transactions on Mechatronics, 2024.", + "[29] R. Bhirangi, V. Pattabiraman, E. Erciyes, Y. Cao, T. Hellebrekers, and L. Pinto, “Anyskin: Plug-and-play skin sensing for robotic touch,” arXiv preprint arXiv:2409.08276, 2024.", + "[30] V. Pattabiraman, Y. Cao, S. Haldar, L. Pinto, and R. Bhirangi, “Learning precise, contact-rich manipulation through uncalibrated tactile skins,” arXiv preprint arXiv:2410.17246, 2024.", + "[31] Liu, Guan, Jia, Wu, Liu, Wang, Liang, Chen, Zhang, Song et al., \"Fastumi: A scalable and hardware-independent universal manipulation interface with dataset,\" arXiv e-prints, pp. arXiv-2409, 2024.", + "[32] Liu, Chi, Cousineau, Kuppuswamy, Burchfiel, and Song, \"Maniwav: Learning robot manipulation from in-the-wild audio-visual data,\" in CoRL, 2024.", + "[33] C. Sferrazza, Y. Seo, H. Liu, Y. Lee, and P. Abbeel, \"The power of the senses: Generalizable manipulation from vision and touch through masked multimodal learning,\" in 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2024, pp. 9698-9705.", + "[34] Z. Xu, R. Uppuluri, X. Zhang, C. Fitch, P. G. Crandall, W. Shou, D. Wang, and Y. She, \"UniT: Unified tactile representation for robot learning,\" 2024. [Online]. Available: https://arxiv.org/abs/2408.06481", + "[35] X. Zhang and et al., “Fusing multimodal sensory data for robotic perception,” IEEE Transactions on Robotics, 2022.", + "[36] A. Nagabandi, G. Kahn, S. Levine, and C. Finn, \"Deep reinforcement learning for vision-based robotic control with multimodal inputs,\" in Conference on Robot Learning (CoRL), 2020.", + "[37] L. Fu, G. Datta, H. Huang, W. C.-H. Panitch, J. Drake, J. Ortiz, M. Mukadam, M. Lambeta, R. Calandra, and K. Goldberg, \"A touch, vision, and language dataset for multimodal alignment,\" in Forty-first International Conference on Machine Learning, 2024. [Online]. Available: https://openreview.net/forum?id=tFEOOH9eH0", + "[38] F. Yang, C. Feng, Z. Chen, H. Park, D. Wang, Y. Dou, Z. Zeng, X. Chen, R. Gangopadhyay, A. Owens, and A. Wong, \"Binding touch to everything: Learning unified multimodal tactile representations,\" arXiv:2401.18084, 2024.", + "[39] A. George, S. Gano, P. Katragadda, and A. Farimani, “Vital pretraining: Visuo-tactile pretraining for tactile and non-tactile manipulation policies,” arXiv preprint arXiv:2403.11898, 2024.", + "[40] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks for biomedical image segmentation,” in Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18. Springer, 2015, pp. 234-241.", + "[41] J. Song, C. Meng, and S. Ermon, “Denoising diffusion implicit models,” arXiv preprint arXiv:2010.02502, 2020." + ], + "bbox": [ + 509, + 66, + 911, + 883 + ], + "page_idx": 7 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_model.json b/data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e2e90bd1f2f2d6cb9adfec3e4e95b0224955d29b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_model.json @@ -0,0 +1,2460 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.149, + 0.088, + 0.852, + 0.138 + ], + "angle": 0, + "content": "ViTaMIn: Learning Contact-Rich Tasks Through Robot-Free Visuo-Tactile Manipulation Interface" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.158, + 0.808, + 0.175 + ], + "angle": 0, + "content": "Fangchen Liu\\*,2, Chuanyu Li\\*,1, Yihua Qin\\*, Jing Xu\\*, Pieter Abbeel\\*, Rui Chen\\*,1" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.176, + 0.714, + 0.193 + ], + "angle": 0, + "content": "\\(^{1}\\)Tsinghua University, \\(^{2}\\)University of California, Berkeley" + }, + { + "type": "text", + "bbox": [ + 0.358, + 0.194, + 0.636, + 0.209 + ], + "angle": 0, + "content": "* Equal contribution, † Corresponding author" + }, + { + "type": "text", + "bbox": [ + 0.281, + 0.211, + 0.715, + 0.227 + ], + "angle": 0, + "content": "https://chuanyune.github.io/ViTaMIN_page" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.283, + 0.061, + 0.712 + ], + "angle": 270, + "content": "arXiv:2504.06156v2 [cs.RO] 1 Sep 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.094, + 0.271, + 0.221, + 0.283 + ], + "angle": 0, + "content": "Demonstrations" + }, + { + "type": "image", + "bbox": [ + 0.098, + 0.284, + 0.22, + 0.377 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.238, + 0.283, + 0.343, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.355, + 0.283, + 0.474, + 0.377 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.48, + 0.269, + 0.617, + 0.282 + ], + "angle": 0, + "content": "Real-World Tasks" + }, + { + "type": "image", + "bbox": [ + 0.487, + 0.283, + 0.612, + 0.377 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.623, + 0.283, + 0.758, + 0.377 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.769, + 0.283, + 0.905, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.089, + 0.381, + 0.454, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.469, + 0.384, + 0.627, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.629, + 0.384, + 0.766, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.768, + 0.384, + 0.904, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.557, + 0.913, + 0.618 + ], + "angle": 0, + "content": "Fig. 1: ViTaMIn overview. Our system comprises a portable data collection device that integrates visual and tactile sensing, a multimodal representation learning framework for fusing visual and tactile information, and demonstrations of various contact-rich manipulation tasks. This system facilitates efficient collection of manipulation data without requiring complex robot setups. (*Backgrounds in the images are blurred.)" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.631, + 0.49, + 0.858 + ], + "angle": 0, + "content": "Abstract—Tactile information plays a crucial role for humans and robots to interact effectively with their environment, particularly for tasks requiring the understanding of contact properties. Solving such dexterous manipulation tasks often relies on imitation learning from demonstration datasets, which are typically collected via teleoperation systems and often demand substantial time and effort. To address these challenges, we present ViTaMIn, an embodiment-free manipulation interface that integrates visual and tactile sensing into a hand-held gripper, enabling multi-modality data collection without the need for teleoperation. Our design employs a compliant Fin Ray gripper with tactile sensing, allowing operators to perceive force feedback during manipulation for more intuitive operation. Additionally, we propose a multi-modal representation learning strategy to obtain pre-trained tactile representations, improving data efficiency and policy robustness. Experiments on 5 contact-rich manipulation tasks demonstrate that our system is more scalable, efficient, and effective than baseline methods." + }, + { + "type": "title", + "bbox": [ + 0.219, + 0.873, + 0.353, + 0.887 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.897, + 0.49, + 0.927 + ], + "angle": 0, + "content": "Humans rely on both visual and tactile modalities to perform a diverse range of manipulation tasks in daily" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.63, + 0.914, + 0.736 + ], + "angle": 0, + "content": "life. For instance, when inserting a plug into a socket or tightening a screw, vision helps with identifying and aligning components, while tactile signals enable precise force control during contact. This seamless integration of vision and touch enhances human dexterity, particularly in tasks that require contact-rich control, handling visual occlusions, or performing in-hand manipulations." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.746, + 0.914, + 0.927 + ], + "angle": 0, + "content": "Recent progress in learning from demonstrations [1], [2], [3], [4] has shown significant potential for advancing general-purpose robots, enabling them to efficiently acquire complex skills from human demonstrations. Consequently, developing systems to collect high-quality demonstration data has been a recent key focus. Prior works have explored real-world data collection methods, including joint-mapped devices and exoskeletons [5], [6], [7], [8], and vision-based teleoperation frameworks [9], [10]. Nevertheless, these techniques require real-time teleoperation of a physical robot during data collection, which constrains efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14] present" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.082, + 0.066, + 0.49, + 0.185 + ], + "angle": 0, + "content": "a more scalable and cost-effective alternative to collect demonstration without teleoperation. Moreover, they can be seamlessly integrated into various embodiments, providing a more flexible data collection approach. However, these portable devices primarily focus on capturing vision-only demonstration data, limiting their usage for contact-rich and dexterous manipulation tasks where tactile feedback plays a crucial role." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.187, + 0.49, + 0.444 + ], + "angle": 0, + "content": "In this work, we aim to address both the challenge of efficient data collection and the need for learning more dexterous tasks using visuo-tactile demonstrations. To this end, we introduce ViTaMIn, a novel and effective visuotactile manipulation interface designed to capture high-quality demonstrations with enhanced efficiency and flexibility. Unlike conventional approaches that rely on rigid tactile sensors, ViTaMIn leverages an omnidirectional compliant Fin Ray gripper with customized tactile sensing, which can detect contact from all directions as an expressive tactile signal for robot manipulation. We integrate the tactile-aware Fin Ray gripper [15] with UMI [14], enhancing the collected data with rich multimodal information and improving policy learning performance while maintaining the core advantages of portable devices. Additionally, our system enables operators to perceive force feedback during manipulation, facilitating more intuitive and seamless operation." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.444, + 0.49, + 0.685 + ], + "angle": 0, + "content": "Pre-trained visual representations have shown improved performance in robotic manipulation [16], [17], [18], [19], [20], benefiting from large-scale visual pre-training. To fully leverage the visuo-tactile datasets collected with ViTaMIn, we adopt a multimodal representation learning strategy to pre-train tactile representations, enhancing the robustness and generalizability of our sensor-based policies. Our pretraining objective integrates masked autoencoding [21] and contrastive learning for multimodal alignment [22], where future image observations are aligned with masked current images and tactile signals. Through extensive experiments on five challenging contact-rich manipulation tasks, our visuotactile policy, enhanced by multimodal pre-training, exhibits superior data and training efficiency while demonstrating strong generalization across diverse objects and environmental conditions." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.687, + 0.348, + 0.7 + ], + "angle": 0, + "content": "In conclusion, our contributions are:" + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.704, + 0.488, + 0.733 + ], + "angle": 0, + "content": "- ViTaMIn provides a portable and scalable visuo-tactile data collection system." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.735, + 0.488, + 0.792 + ], + "angle": 0, + "content": "- ViTaMIn proposes an effective multimodal representation learning strategy, which significantly improves the data efficiency, robustness and generalization capabilities." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.795, + 0.488, + 0.839 + ], + "angle": 0, + "content": "- ViTaMIn achieves superior performance over vision-only baselines across five manipulation tasks by leveraging visuo-tactile demonstrations." + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.704, + 0.488, + 0.839 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.214, + 0.843, + 0.359, + 0.856 + ], + "angle": 0, + "content": "II. RELATED WORK" + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.863, + 0.292, + 0.877 + ], + "angle": 0, + "content": "A. Visuo-Tactile Manipulation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.882, + 0.49, + 0.927 + ], + "angle": 0, + "content": "Tactile sensing is essential for robotic manipulation as it provides signals about physical contact in addition to visual observation. Early works [23], [24], [25] use RGB cameras" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.066, + 0.913, + 0.126 + ], + "angle": 0, + "content": "and force/torque sensors to infer contact status for making decisions. However, the information from force/torque sensors is low-dimensional and insufficient for more dexterous manipulation tasks." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.128, + 0.913, + 0.28 + ], + "angle": 0, + "content": "More recently, vision-based tactile sensors have gained attention for their ability to capture high-resolution contact information [26], [27], [28]. Despite these advances, the rigid design of these sensors restricts the compliance of the end effector, where alternative approaches like uncalibrated tactile skins [29] and plug-and-play sensing systems [30] have improved adaptability and flexibility. In our work, we use a Fin-Ray-shaped compliant and all-directional tactile sensor, which can detect contacts from all directions and also support safe and robust contact-rich manipulation." + }, + { + "type": "title", + "bbox": [ + 0.507, + 0.298, + 0.855, + 0.314 + ], + "angle": 0, + "content": "B. Data Collection System for Robot Manipulation" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.322, + 0.913, + 0.396 + ], + "angle": 0, + "content": "Recent advancements in learning from demonstrations [1], [2], [3], [4] have shown promising results in developing general-purpose robots. Therefore, efficiently collecting high-quality demonstrations has become a key research focus." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.399, + 0.914, + 0.626 + ], + "angle": 0, + "content": "Recently works have focused on efficient real-world data collection systems, such as devices or exoskeletons with joint-mapping [5], [6], [7], exoskeletons [8], or vision-based systems [9], [10]. However, these approaches require a physical robot during data collection, which limits efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14], [31], [32] offer several advantages: they are low-cost, flexible, and do not depend on a specific physical robot. Additionally, they can be seamlessly integrated into various embodiments and provide a more user-friendly experience for data collection. We extend the UMI data collection system [14] by integrating tactile sensing, which enriches the demonstrations with multimodal information, improving policy learning performance while preserving the key benefits of portable devices." + }, + { + "type": "title", + "bbox": [ + 0.507, + 0.644, + 0.785, + 0.659 + ], + "angle": 0, + "content": "C. Multimodal Pre-training for Robotics" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.668, + 0.913, + 0.774 + ], + "angle": 0, + "content": "Pre-trained visual representations have shown improved performance and generalization in robotic manipulation [16], [17], [18], [19], [20] with self-supervised learning techniques [21], [22]. This can be extended to multimodal representation learning [33], [34], [35] by integrating visual, tactile, and proprioceptive modalities, allowing robots to perceive object properties beyond visual appearance." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.775, + 0.913, + 0.866 + ], + "angle": 0, + "content": "Aligning heterogeneous sensory modalities is a key challenge in multimodal learning, as different sensors have varying data structures, sampling rates, and noise characteristics [36]. Inspired by CLIP [22], researchers have developed contrastive learning techniques to align tactile and visual representations for manipulation tasks [37], [38]." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.867, + 0.913, + 0.927 + ], + "angle": 0, + "content": "Our work extends these efforts by introducing masked contrastive pre-training, where the tactile encoder learns to reconstruct future occluded visual information, further enhancing multimodal understanding." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.063, + 0.52, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.53, + 0.065, + 0.903, + 0.364 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.373, + 0.916, + 0.437 + ], + "angle": 0, + "content": "Fig. 2: ViTaMIn's hardware system overview. The handheld device integrates a GoPro camera, two tactile sensors and a synchronization camera to align visual and tactile information. During data collection, the two tactile sensors and the synchronization camera are connected to the Raspberry Pi in the backbox. The total weight of the gripper is approximately \\(1960\\mathrm{g}\\). Left: Side view of the ViTaMIn system. Right: Top view of the ViTaMIn system with the backbox cover removed." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.446, + 0.462, + 0.461 + ], + "angle": 0, + "content": "III. VISUO-TACTILE MANIPULATION INTERFACE" + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.468, + 0.226, + 0.483 + ], + "angle": 0, + "content": "A. System Overview" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.488, + 0.491, + 0.594 + ], + "angle": 0, + "content": "We design a handheld gripper to collect visuo-tactile demonstrations without requiring teleoperation on physical robots. Our gripper design is illustrated in Figure 2. The gripper consists of an RGB fisheye wrist camera (GoPro 10) for image observation, two AllTact finger [15], a synchronization camera for observation temporal alignment, and a Raspberry Pi 5 with a battery for data recording." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.595, + 0.49, + 0.699 + ], + "angle": 0, + "content": "Image Observation To capture comprehensive visual information, we employ a GoPro 10 camera with a \\(155^{\\circ}\\) field-of-view (FoV) fisheye lens. The camera operates at 60 FPS with a resolution of \\(2704 \\times 2028\\) pixels and is mounted at the end-effector of our ViTaMIn to ensure consistent visual coverage of the manipulation workspace during demonstration collection and policy deployment." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.7, + 0.49, + 0.851 + ], + "angle": 0, + "content": "Tactile Observation In UMI [14], two TPU-printed Fin Ray grippers are used to provide compliance and enhance grasping stability. However, these grippers lack tactile sensing capabilities. In our ViTaMIn, we employ AllTact [15], a compliant Fin Ray gripper with omnidirectional tactile sensing ability. During manipulation, the embedded camera in AllTact captures both the global deformation of the entire finger and the local deformation of the contact surface as a single image. The tactile sensor operates at 30 FPS with a resolution of \\(640 \\times 480\\) pixels." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.852, + 0.491, + 0.928 + ], + "angle": 0, + "content": "Other Observations To enhance the robustness and accuracy of SLAM, we utilize the IMU data provided by the GoPro, which is synchronized with the visual observations. Gripper width is also critical for precise manipulation. Following UMI [14], we attach two ArUco markers to the" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.446, + 0.913, + 0.476 + ], + "angle": 0, + "content": "gripper's fingers and compute the gripper width from the visual observations." + }, + { + "type": "title", + "bbox": [ + 0.507, + 0.485, + 0.645, + 0.499 + ], + "angle": 0, + "content": "B. Data Processing" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.504, + 0.914, + 0.729 + ], + "angle": 0, + "content": "Sensor Synchronization To synchronize the tactile sensors and GoPro camera, we use an additional low-cost camera which is connected to the Raspberry Pi and is naturally synchronized with the tactile sensors. Before data collection, both the GoPro and the synchronization camera simultaneously capture a sequence of ArUco markers displayed on a computer screen. The ArUco IDs are detected in both video streams, and when an identical ID appears in both, the corresponding timestamps are used for synchronization. Since the framereates of the GoPro and the synchronization camera are \\(60\\mathrm{Hz}\\) and \\(30\\mathrm{Hz}\\) respectively, the temporal alignment error is below \\(1/60 + 1/30 = 0.05\\) seconds, which is sufficient for our tasks. Once the two videos are synchronized, they are cropped by the starting and ending signals triggered by the control button." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.73, + 0.914, + 0.837 + ], + "angle": 0, + "content": "Data Collection and Filtering We adopt a similar data collection pipeline to UMI [14]. We also utilize Simultaneous Localization and Mapping (SLAM) to capture the end-effector trajectories. While SLAM may fail in low-texture environments, it achieves a success rate of approximately \\(80\\%\\) in our tasks, allowing the majority of collected data to be used for imitation learning." + }, + { + "type": "title", + "bbox": [ + 0.567, + 0.844, + 0.853, + 0.858 + ], + "angle": 0, + "content": "IV. VISUO-TACTILE POLICY LEARNING" + }, + { + "type": "title", + "bbox": [ + 0.506, + 0.863, + 0.791, + 0.878 + ], + "angle": 0, + "content": "A. Visuo-Tactile Representation Learning" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.882, + 0.914, + 0.928 + ], + "angle": 0, + "content": "UMI uses a pre-trained CLIP [22] encoder to extract visual representations. However, the tactile images in ViTaMIn are very different from the CLIP's training distribution, which" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.082, + 0.066, + 0.49, + 0.112 + ], + "angle": 0, + "content": "can lead to suboptimal representation. To tackle this, we pretrain an effective tactile encoder using the collected action-free datasets, which doesn't rely on the SLAM success." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.113, + 0.49, + 0.188 + ], + "angle": 0, + "content": "Taking the tactile image in Figure 3 as an example, we want the encoder to capture the essential contact properties, such as the object's in-hand pose and gripper's deformation. These signals are complementary information from pixel observations, and are crucial for making future decisions." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.189, + 0.491, + 0.31 + ], + "angle": 0, + "content": "To achieve this, we employ a multimodal contrastive learning approach as illustrated in Figure 3. Given the current masked image \\(\\tilde{I}_V^k\\) and current full tactile observation \\(I_T^k\\) of step \\(k\\), we want the combination of \\(\\tilde{I}_V^k\\) and \\(I_T^k\\) align with the future full image observation \\(I_V^{k + 1}\\) in the CLIP embedding space. The intuition behind this is to make the tactile encoder focus on the contact information to predict future images based on the current corrupted image." + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.334, + 0.466, + 0.58 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.584, + 0.418, + 0.786 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.797, + 0.49, + 0.858 + ], + "angle": 0, + "content": "Fig. 3: The illustration of the multimodal contrastive representation pre-training phase. The tactile encoder is trained to capture complementary information to predict the missing content for the future image." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.866, + 0.491, + 0.928 + ], + "angle": 0, + "content": "To ensure stable training, we freeze the image CLIP encoder \\(\\phi_V(\\cdot)\\) but only fine-tune the tactile encoder \\(\\phi_T(\\cdot)\\). We first obtain the tactile embedding \\(T_{k}\\) from \\(\\phi_T(I_T^k)\\), and \\(V_{k}\\) from \\(\\phi_V(\\tilde{I}_V^k)\\). These embeddings are concatenated and" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.066, + 0.913, + 0.128 + ], + "angle": 0, + "content": "passed through a fully connected projection layer, mapping them back to the original 512-dimensional CLIP embedding space as a fused feature \\( F_{k} \\). Finally, we train the tactile encoder using the standard CLIP loss on \\( F_{k} \\) and \\( V_{k + 1} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.629, + 0.132, + 0.913, + 0.161 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {C L I P}} = \\frac {1}{2} \\left(\\mathcal {L} _ {\\mathrm {f - v}} + \\mathcal {L} _ {\\mathrm {v - f}}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.165, + 0.553, + 0.178 + ], + "angle": 0, + "content": "where" + }, + { + "type": "equation", + "bbox": [ + 0.545, + 0.183, + 0.913, + 0.226 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {v - f}} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp \\left(\\cos \\left(V _ {i + 1} , F _ {i}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(V _ {i + 1} , F _ {j}\\right) / \\tau\\right)} \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.545, + 0.238, + 0.913, + 0.28 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {f - v}} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp \\left(\\cos \\left(F _ {i} , V _ {i + 1}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(F _ {i} , V _ {j + 1}\\right) / \\tau\\right)} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.284, + 0.808, + 0.299 + ], + "angle": 0, + "content": "here \\(\\tau\\) is a learnable temperature parameter." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.299, + 0.914, + 0.465 + ], + "angle": 0, + "content": "Different from [39], where they directly apply the CLIP loss on the time-aligned visuo-tactile images, we instead fuse the tactile observation with a masked current image to predict the future image. We make this choice for two main reasons. First, in [39], the tactile representation is conditioned on proprioceptive states, which are unavailable in our dataset before the success of SLAM. Second, since different tasks may have varying images but similar tactile observations, fusing a masked current image helps the network learn a more expressive tactile representation. Without sufficient masking, the alignment becomes trivial." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.465, + 0.914, + 0.526 + ], + "angle": 0, + "content": "After pre-training, we train a Diffusion Policy [4] on the SLAM-filtered data. Following [4], we use a U-Net [40] as the noise prediction network and apply DDIM [41] to accelerate the inference for action prediction." + }, + { + "type": "image_caption", + "bbox": [ + 0.647, + 0.533, + 0.774, + 0.547 + ], + "angle": 0, + "content": "V. EXPERIMENTS" + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.562, + 0.91, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.681, + 0.868, + 0.697 + ], + "angle": 0, + "content": "Fig. 4: Hardware setup for policy deployment." + }, + { + "type": "title", + "bbox": [ + 0.506, + 0.712, + 0.666, + 0.727 + ], + "angle": 0, + "content": "A. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.731, + 0.914, + 0.896 + ], + "angle": 0, + "content": "Hardware Figure 4 shows the policy deployment setup. Our system consists of a Rokae xMate ER3PRO robotic arm equipped with a PGI-140-80-W-S parallel gripper. The 7-DOF robotic arm provides flexible manipulation capabilities, while the gripper features an 8cm stroke range from fully open to closed position. The system is implemented using ROS Noetic on Ubuntu 20.04. The control loop operates at \\(10\\mathrm{Hz}\\), with separate threads handling robot control, visual and tactile sensing. The system architecture is designed to minimize latency while maintaining reliable real-time performance." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.897, + 0.914, + 0.928 + ], + "angle": 0, + "content": "Similar to UMI [14], our system compensates for various sources of latency in the perception-action loop through" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.082, + 0.066, + 0.488, + 0.142 + ], + "angle": 0, + "content": "predictive buffering and timestamp-based synchronization between visual and tactile feedback streams. The policy generates 16 consecutive trajectories at each inference step, with 10 trajectories being executed based on our temporal compensation strategy." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.142, + 0.488, + 0.278 + ], + "angle": 0, + "content": "Manipulation Tasks As shown in Figure 5, we propose diverse contact-rich manipulation tasks to evaluate the effectiveness of ViTaMIn. These tasks are specifically crafted to demonstrate the following key capabilities: (1) Robust pick-and-place of diverse objects, including fragile and small objects; (2) Dexterous manipulation, such as in-hand reorientation; (3) Task success determination, allowing the robot to repeat attempts until success; (4) Dynamic and precise manipulation." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.279, + 0.417, + 0.293 + ], + "angle": 0, + "content": "We design the following 5 manipulation tasks:" + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.292, + 0.488, + 0.319 + ], + "angle": 0, + "content": "- Orange Placement: Put a fragile orange from a randomized position to a randomized plate." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.321, + 0.488, + 0.365 + ], + "angle": 0, + "content": "- Dynamic Peg Insertion: Grasp a peg and approach a hole, which is moving at a constant speed of \\(10\\mathrm{mm / s}\\). And precisely insert the peg to the hole." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.366, + 0.488, + 0.41 + ], + "angle": 0, + "content": "- Test Tube Reorientation: Grasp a transparent test tube from a shelf and adjust its pose through extrinsic dexterity based on tactile feedback." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.411, + 0.488, + 0.453 + ], + "angle": 0, + "content": "- Scissor Hanging: Grasp a pair of scissors and hang them on a hook. Adjust the pose and keep attempting until it succeeds." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.456, + 0.488, + 0.531 + ], + "angle": 0, + "content": "- Dual-Arm Knife Pulling: The left arm first grasps a knife from a cup, orients it horizontally. The right arm grasps and pulls it out with a constrained prismatic motion. This task requires tactile feedback to grasp the thin object and perform the correct pulling motion." + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.292, + 0.488, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.098, + 0.548, + 0.476, + 0.563 + ], + "angle": 0, + "content": "TABLE I: Data Collection Statistics for Different Tasks" + }, + { + "type": "table", + "bbox": [ + 0.09, + 0.57, + 0.484, + 0.672 + ], + "angle": 0, + "content": "
TaskRaw DataValid Data*Avg. Length
Orange Placement8773435
Dynamic Peg Insertion201141321
Test Tube Reorientation150125619
Scissor Hanging172137642
Knife Pulling (Left)188131403
Knife Pulling (Right)180134254
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.099, + 0.672, + 0.472, + 0.683 + ], + "angle": 0, + "content": "*Valid data refers to demonstrations with successful SLAM tracking" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.7, + 0.488, + 0.849 + ], + "angle": 0, + "content": "Table I shows the statistics of the demonstration data. We collect demonstrations for both single-arm and dual-arm manipulation tasks. For single-arm tasks, we gather between 87 and 172 raw demonstrations per task according to the task difficulty, with successful SLAM tracking achieved in approximately \\(80\\%\\) of the trajectories. The dual-arm knife pulling task requires coordinated motion between both arms, with similar data collection volumes but slightly different average demonstration lengths for left and right arm movements." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.851, + 0.488, + 0.927 + ], + "angle": 0, + "content": "We compare our approach against the following methods: (1) Vision: the policy only takes visual observation from the GoPro camera, which is encoded by the pre-trained CLIP model (identical to the original UMI [14] paper); (2) Ours w/o Pre-training: This baseline simply concatenate visual and" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.066, + 0.912, + 0.097 + ], + "angle": 0, + "content": "tactile observations after separate CLIP ViT-B/16 encoders, and fine-tuned with behavior cloning." + }, + { + "type": "table", + "bbox": [ + 0.532, + 0.112, + 0.89, + 0.24 + ], + "angle": 0, + "content": "
TaskVisionw/o Pre-trainingOurs
Single-Arm Tasks
Orange placement0.850.91
Test Tube Reorientation0.40.70.9
Scissor Hanging0.10.450.7
Dynamic Peg Insertion0.450.80.9
Dual-Arm Task
Knife Pulling0.60.80.9
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.506, + 0.245, + 0.912, + 0.291 + ], + "angle": 0, + "content": "TABLE II: Comparisons on 5 tasks with baselines. Our approach improves the performance on 5 tasks through multimodal sensing and pre-training." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.3, + 0.912, + 0.42 + ], + "angle": 0, + "content": "The results are presented in Table II. For each task, we conduct 20 trials with randomized initial conditions and report the average performance. The vision-only policy performs the worst across all five tasks, particularly in contact-rich tasks like test tube reorientation and scissor hanging, where tactile feedback is crucial for success. Across all tasks, pre-training enhances the performance, highlighting the importance of learning effective tactile representations." + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.439, + 0.642, + 0.454 + ], + "angle": 0, + "content": "B. Failure Analysis" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.462, + 0.912, + 0.568 + ], + "angle": 0, + "content": "In the Orange placement task, the robot picks up an orange from a random position within a \\(50\\mathrm{cm} \\times 50\\mathrm{cm}\\) workspace and places it on a plate. Failures stem from table collisions, unstable placement, or motion planning errors despite correct object detection. In Dynamic peg insertion, the robot inserts a grasped peg into a moving hole. Vision-only methods often fail due to imprecise localization and alignment." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.569, + 0.912, + 0.75 + ], + "angle": 0, + "content": "In Test tube reorientation, the robot must pick up a tube from a random rack location and reorient it vertically, with success defined by less than \\(10^{\\circ}\\) orientation error. Failures include rack collisions, over-lifting, and incorrect final orientation. Scissor hanging requires picking up scissors and hanging them on a narrow hook, where common issues include misdetection, misalignment, and failure to release. In Knife pulling, a dual-arm policy reorients the knife with one arm while the other pulls it out of a holder. Failures often result from poor coordination, weak grasps, or incomplete pulling. Overall, vision-only policies struggle with contact-rich tasks, highlighting the limitations of unimodal sensing." + }, + { + "type": "title", + "bbox": [ + 0.507, + 0.768, + 0.826, + 0.784 + ], + "angle": 0, + "content": "C. Compliant Articulated Object Manipulation" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.79, + 0.912, + 0.927 + ], + "angle": 0, + "content": "To demonstrate the compliance capabilities of ViTaMIn, we designed a compliant-controlled articulated object manipulation task. The robotic arm needs to grasp a handle (connected to a force gauge) and rotate it 90 degrees to open a switch. During the rotation process, the arm must minimize axial forces to ensure smooth operation. We conduct 10 experiments for each condition and calculate the average forces. The results show that ViTaMIn achieves significantly lower average forces compared to using pure vision as input." + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.102, + 0.07, + 0.308, + 0.086 + ], + "angle": 0, + "content": "Task 1. Orange Placement" + }, + { + "type": "image", + "bbox": [ + 0.1, + 0.087, + 0.203, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.208, + 0.089, + 0.309, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.089, + 0.416, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.419, + 0.07, + 0.658, + 0.086 + ], + "angle": 0, + "content": "Task 2. Dynamic Peg Insertion" + }, + { + "type": "image", + "bbox": [ + 0.42, + 0.088, + 0.543, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.543, + 0.088, + 0.655, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.655, + 0.088, + 0.777, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.778, + 0.088, + 0.9, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.102, + 0.185, + 0.35, + 0.198 + ], + "angle": 0, + "content": "Task 3. Test Tube Reorientation" + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.198, + 0.255, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.261, + 0.198, + 0.415, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.422, + 0.198, + 0.576, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.198, + 0.736, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.744, + 0.198, + 0.899, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.229, + 0.28, + 0.285, + 0.293 + ], + "angle": 0, + "content": "Stage I" + }, + { + "type": "image_caption", + "bbox": [ + 0.102, + 0.295, + 0.293, + 0.31 + ], + "angle": 0, + "content": "Task 4. Scissor Hanging" + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.31, + 0.256, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.261, + 0.31, + 0.415, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.422, + 0.31, + 0.576, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.31, + 0.736, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.744, + 0.31, + 0.898, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.102, + 0.409, + 0.353, + 0.424 + ], + "angle": 0, + "content": "Task 5. Knife Pulling (Bimanual)" + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.424, + 0.256, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.262, + 0.424, + 0.415, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.422, + 0.424, + 0.576, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.424, + 0.736, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.744, + 0.424, + 0.898, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.54, + 0.913, + 0.571 + ], + "angle": 0, + "content": "Fig. 5: We test ViTaMIn on 5 contact-rich manipulation tasks, including precise and dynamic insertion, object hanging with multimodal feedback, and transparent in-hand object manipulation." + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.593, + 0.283, + 0.715 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.289, + 0.595, + 0.485, + 0.605 + ], + "angle": 0, + "content": "Maximum Force Comparison: Vision vs. Ours" + }, + { + "type": "image", + "bbox": [ + 0.289, + 0.606, + 0.461, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.594, + 0.66, + 0.714 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.579, + 0.715, + 0.652, + 0.726 + ], + "angle": 0, + "content": "Novel Objects" + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.594, + 0.899, + 0.714 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.739, + 0.715, + 0.822, + 0.727 + ], + "angle": 0, + "content": "Different Lighting" + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.731, + 0.49, + 0.777 + ], + "angle": 0, + "content": "Fig. 6: The robot needs to flip open a switch (fixed to a force gauge) by rotating it 90 degrees. During the rotation, the robot must minimize axial forces to ensure smooth operation." + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.737, + 0.913, + 0.782 + ], + "angle": 0, + "content": "Fig. 7: Showcase of novel objects and different lighting in the generalization tasks. The right columns demonstrate colored flashlight/high-power/normal lighting conditions." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.811, + 0.222, + 0.825 + ], + "angle": 0, + "content": "D. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.836, + 0.49, + 0.928 + ], + "angle": 0, + "content": "a) Data Efficiency: We evaluate the performance of policies trained on different amounts (25%, 50%, and 100%) of demonstrations. All the models are evaluated in 20 real-world trials with different initializations. For a more in-depth analysis, we calculate the success rates of each stage separately, as illustrated in Figure 8. With the pre-trained" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.818, + 0.913, + 0.877 + ], + "angle": 0, + "content": "tactile representations, our method can achieve consistently higher success rates on all the tasks across different amounts of data, and can even master the task with limited data (25%) for test tube reorientation." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.882, + 0.914, + 0.928 + ], + "angle": 0, + "content": "b) Training Efficiency: We further evaluate the policies trained with different numbers of epochs to understand its training efficiency under the same evaluation protocol. The" + }, + { + "type": "list", + "bbox": [ + 0.506, + 0.818, + 0.914, + 0.928 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.064, + 0.364, + 0.076 + ], + "angle": 0, + "content": "Tube Reorientation" + }, + { + "type": "image_caption", + "bbox": [ + 0.659, + 0.064, + 0.771, + 0.077 + ], + "angle": 0, + "content": "Scissor Hanging" + }, + { + "type": "image_caption", + "bbox": [ + 0.179, + 0.078, + 0.221, + 0.09 + ], + "angle": 0, + "content": "Stage I" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.092, + 0.29, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.079, + 0.496, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.593, + 0.079, + 0.635, + 0.089 + ], + "angle": 0, + "content": "Stage I" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.092, + 0.703, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.8, + 0.079, + 0.845, + 0.089 + ], + "angle": 0, + "content": "Stage II" + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.089, + 0.91, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.259, + 0.915, + 0.306 + ], + "angle": 0, + "content": "Fig. 8: Ablation study on the effect of pre-training on data efficiency. The performance of the policy improves as the quantity of data increases. After pre-training on the action-free, task-ignorant dataset, our method can achieve a high success rate even with limited data (25%)." + }, + { + "type": "image_caption", + "bbox": [ + 0.232, + 0.315, + 0.362, + 0.327 + ], + "angle": 0, + "content": "Tube Reorientation" + }, + { + "type": "image_caption", + "bbox": [ + 0.178, + 0.332, + 0.22, + 0.343 + ], + "angle": 0, + "content": "Stage I" + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.344, + 0.288, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.385, + 0.332, + 0.43, + 0.343 + ], + "angle": 0, + "content": "Stage II" + }, + { + "type": "image", + "bbox": [ + 0.295, + 0.344, + 0.495, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.659, + 0.315, + 0.771, + 0.328 + ], + "angle": 0, + "content": "Scissor Hanging" + }, + { + "type": "image_caption", + "bbox": [ + 0.591, + 0.332, + 0.635, + 0.342 + ], + "angle": 0, + "content": "Stage I" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.343, + 0.702, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.8, + 0.332, + 0.844, + 0.342 + ], + "angle": 0, + "content": "Stage II" + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.343, + 0.909, + 0.504 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.512, + 0.913, + 0.558 + ], + "angle": 0, + "content": "Fig. 9: Ablation study on the effect of pre-training on training efficiency. Policies with pre-training are able to learn to complete the first-stage task at a remarkably early stage of training (within 10 epochs). Additionally, when the policy network is pre-trained, the overall success rates increase more rapidly." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.577, + 0.49, + 0.639 + ], + "angle": 0, + "content": "results are illustrated in Figure 9. We also observe consistent task performance improvements with pre-training. The policy can complete the first stage of the task at a remarkably early training stage (within 10 epochs)." + }, + { + "type": "table", + "bbox": [ + 0.085, + 0.652, + 0.49, + 0.766 + ], + "angle": 0, + "content": "
TaskMethodOriginalNovel ObjectsDifferent Lighting
Orange PlacementVision0.850.70.55
Ours w/o Pre-training0.90.80.6
Ours1.01.00.85
Scissor HangingVision0.00.00.0
Ours w/o Pre-training0.450.40.4
Ours0.70.70.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.771, + 0.489, + 0.831 + ], + "angle": 0, + "content": "TABLE III: Generalization under different objects and scenes. The results demonstrate that our multi-modal policy is more robust to novel objects and different lighting conditions." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.862, + 0.285, + 0.877 + ], + "angle": 0, + "content": "E. Generalization Capability" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.881, + 0.49, + 0.927 + ], + "angle": 0, + "content": "We also evaluate our policy's generalizability to unseen objects and environments. As shown in Figure 7, beyond the training orange and scissor, we introduce 6 unseen small" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.577, + 0.913, + 0.682 + ], + "angle": 0, + "content": "objects and 3 unseen scissors to assess object generalization. Additionally, we modify lighting conditions by increasing brightness and introducing colored disco ball lighting. Table III presents results on the tasks of orange placement and scissor hanging. Our method with pre-training achieves consistent better performance across various generalization settings." + }, + { + "type": "title", + "bbox": [ + 0.645, + 0.695, + 0.776, + 0.708 + ], + "angle": 0, + "content": "VI. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.715, + 0.914, + 0.881 + ], + "angle": 0, + "content": "In this paper, we present ViTaMIn, a portable visuo-tactile manipulation interface designed for efficiently collecting high-quality demonstrations by capturing both visual and tactile signals. Furthermore, ViTaMIn introduces an effective pre-training strategy that leverages all the collected action-free data to learn a robust and generalizable tactile representation through multimodal contrastive learning. Our approach significantly outperforms vision-only policies across 5 real-world contact-rich manipulation tasks and demonstrates improved data efficiency, robustness, and generalizability with pre-trained visuo-tactile representations." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.882, + 0.914, + 0.927 + ], + "angle": 0, + "content": "Our method primarily focuses on fixed-base single-arm and dual-arm tasks with parallel-jaw grippers. While this setup is suitable for a wide range of manipulation tasks," + } + ], + [ + { + "type": "text", + "bbox": [ + 0.083, + 0.067, + 0.49, + 0.112 + ], + "angle": 0, + "content": "future work could extend our approach to dexterous hands, enabling richer and more versatile manipulation skills that better approximate human-level dexterity." + }, + { + "type": "title", + "bbox": [ + 0.239, + 0.122, + 0.335, + 0.134 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.143, + 0.49, + 0.178 + ], + "angle": 0, + "content": "[1] S. Levine, C. Finn, T. Darrell, and P. Abbeel, \"End-to-end training of deep visuomotor policies,\" Journal of Machine Learning Research, vol. 17, no. 39, pp. 1-40, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.178, + 0.49, + 0.222 + ], + "angle": 0, + "content": "[2] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, J. Dabis, C. Finn, K. Gopalakrishnan, K. Hausman, A. Herzog, J. Hsu et al., \"Rt-1: Robotics transformer for real-world control at scale,\" arXiv preprint arXiv:2212.06817, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.223, + 0.49, + 0.268 + ], + "angle": 0, + "content": "[3] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, X. Chen, K. Choromanski, T. Ding, D. Driess, A. Dubey, C. Finn et al., \"Rt-2: Vision-language-action models transfer web knowledge to robotic control,\" arXiv preprint arXiv:2307.15818, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.268, + 0.49, + 0.302 + ], + "angle": 0, + "content": "[4] C. Chi, S. Feng, Y. Du, Z. Xu, E. Cousineau, B. Burchfiel, and S. Song, \"Diffusion policy: Visuomotor policy learning via action diffusion,\" arXiv preprint arXiv:2303.04137, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.302, + 0.49, + 0.348 + ], + "angle": 0, + "content": "[5] J. Aldaco, T. Armstrong, R. Baruch, J. Bingham, S. Chan, K. Draper, D. Dwibedi, C. Finn, P. Florence, S. Goodrich et al., \"Aloha 2: An enhanced low-cost hardware for bimanual teleoperation,\" arXiv preprint arXiv:2405.02292, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.348, + 0.49, + 0.381 + ], + "angle": 0, + "content": "[6] Z. Fu, T. Z. Zhao, and C. Finn, \"Mobile aloha: Learning bimanual mobile manipulation with low-cost whole-body teleoperation,\" arXiv preprint arXiv:2401.02117, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.381, + 0.49, + 0.415 + ], + "angle": 0, + "content": "[7] T. Z. Zhao, V. Kumar, S. Levine, and C. Finn, “Learning fine-grained bimanual manipulation with low-cost hardware,” arXiv preprint arXiv:2304.13705, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.416, + 0.49, + 0.461 + ], + "angle": 0, + "content": "[8] H. Fang, H.-S. Fang, Y. Wang, J. Ren, J. Chen, R. Zhang, W. Wang, and C. Lu, \"Airexo: Low-cost exoskeletons for learning whole-arm manipulation in the wild,\" in 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2024, pp. 15031-15038." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.461, + 0.49, + 0.495 + ], + "angle": 0, + "content": "[9] X. Cheng, J. Li, S. Yang, G. Yang, and X. Wang, “Open-television: Teleoperation with immersive active visual feedback,” arXiv preprint arXiv:2407.01512, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.495, + 0.49, + 0.529 + ], + "angle": 0, + "content": "[10] Y. Qin, W. Yang, B. Huang, K. Van Wyk, H. Su, X. Wang, Y.-W. Chao, and D. Fox, \"Anyteleop: A general vision-based dexterous robot arm-hand teleoperation system,\" arXiv preprint arXiv:2307.04577, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.529, + 0.49, + 0.585 + ], + "angle": 0, + "content": "[11] F. Sanches, G. Gao, N. Elangovan, R. V. Godoy, J. Chapman, K. Wang, P. Jarvis, and M. Liarokapis, \"Scalable. intuitive human to robot skill transfer with wearable human machine interfaces: On complex, dexterous tasks,\" in 2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2023, pp. 6318-6325." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.586, + 0.49, + 0.62 + ], + "angle": 0, + "content": "[12] K. Doshi, Y. Huang, and S. Coros, \"On hand-held grippers and the morphological gap in human manipulation demonstration,\" arXiv preprint arXiv:2311.01832, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.62, + 0.49, + 0.653 + ], + "angle": 0, + "content": "[13] N. M. M. Shafiullah, A. Rai, H. Etukuru, Y. Liu, I. Misra, S. Chintala, and L. Pinto, \"On bringing robots home,\" arXiv preprint arXiv:2311.16098, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.654, + 0.49, + 0.698 + ], + "angle": 0, + "content": "[14] C. Chi, Z. Xu, C. Pan, E. Cousineau, B. Burchfiel, S. Feng, R. Tedrake, and S. Song, \"Universal manipulation interface: In-the-wild robot teaching without in-the-wild robots,\" arXiv preprint arXiv:2402.10329, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.699, + 0.49, + 0.734 + ], + "angle": 0, + "content": "[15] S. Liang, Y. Guan, J. Xu, H. Qian, X. Zhang, D. Wu, W. Ding, and R. Chen, \"Alltact fin ray: A compliant robot gripper with omnidirectional tactile sensing,\" arXiv preprint arXiv:2504.18064, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.734, + 0.49, + 0.779 + ], + "angle": 0, + "content": "[16] S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta, “R3m: A universal visual representation for robot manipulation,” in Proceedings of The 6th Conference on Robot Learning (CoRL), vol. 205. PMLR, 2022, pp. 892–909." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.779, + 0.49, + 0.824 + ], + "angle": 0, + "content": "[17] Y. J. Ma, S. Sodhani, D. Jayaraman, O. Bastani, V. Kumar, and A. Zhang, “VIP: Towards universal visual reward and representation via value-implicit pre-training,” in The Eleventh International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.824, + 0.49, + 0.846 + ], + "angle": 0, + "content": "[18] T. Xiao, I. Radosavovic, T. Darrell, and J. Malik, “Masked visual pretraining for motor control,” arXiv:2203.06173, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.846, + 0.49, + 0.881 + ], + "angle": 0, + "content": "[19] I. Radosavovic, T. Xiao, S. James, P. Abbeel, J. Malik, and T. Darrell, “Real-world robot learning with masked visual pre-training,” in Conference on Robot Learning. PMLR, 2023, pp. 416–426." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.881, + 0.49, + 0.927 + ], + "angle": 0, + "content": "[20] A. Majumdar, K. Yadav, S. Arnaud, J. Ma, C. Chen, S. Silwal, A. Jain, V.-P. Berges, T. Wu, J. Vakil et al., \"Where are we in the search for an artificial visual cortex for embodied intelligence?\" Advances in Neural Information Processing Systems, vol. 36, pp. 655-677, 2023." + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.143, + 0.49, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.068, + 0.913, + 0.114 + ], + "angle": 0, + "content": "[21] K. He, X. Chen, S. Xie, Y. Li, P. Dollar, and R. Girshick, “Masked autoencoders are scalable vision learners,” in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2022, pp. 16000-16009." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.115, + 0.913, + 0.16 + ], + "angle": 0, + "content": "[22] A. Radford, J. W. Kim, C. Hallacy, A. Ramesh, G. Goh, S. Agarwal, G. Sastry, A. Askell, P. Mishkin, J. Clark et al., \"Learning transferable visual models from natural language supervision,\" in International conference on machine learning. PMLR, 2021, pp. 8748-8763." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.16, + 0.913, + 0.205 + ], + "angle": 0, + "content": "[23] K. Hosoda, K. Igarashi, and M. Asada, \"Adaptive hybrid visual servoing/force control in unknown environment,\" in Proceedings of IEEE/RSJ International Conference on Intelligent Robots and Systems. IROS'96, vol. 3. IEEE, 1996, pp. 1097-1103." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.205, + 0.913, + 0.25 + ], + "angle": 0, + "content": "[24] H. Nakagaki, K. Kitagaki, T. Ogasawara, and H. Tsukune, \"Study of deformation and insertion tasks of a flexible wire,\" in Proceedings of International Conference on Robotics and Automation, vol. 3. IEEE, 1997, pp. 2397-2402." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.25, + 0.913, + 0.274 + ], + "angle": 0, + "content": "[25] P. Miller and P. Leibowitz, \"Integration of vision, force and tactile sensing for grasping,\" Int. J. Intell. Mach, vol. 4, pp. 129-149, 1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.274, + 0.913, + 0.307 + ], + "angle": 0, + "content": "[26] H. Qi, B. Yi, S. Suresh, M. Lambeta, Y. Ma, R. Calandra, and J. Malik, \"General in-hand object rotation with vision and touch,\" in Conference on Robot Learning. PMLR, 2023, pp. 2549-2564." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.307, + 0.913, + 0.341 + ], + "angle": 0, + "content": "[27] S. Li, H. Yu, W. Ding, H. Liu, L. Ye, C. Xia, X. Wang, and X.-P. Zhang, “Visual-tactile fusion for transparent object grasping in complex backgrounds,” IEEE Transactions on Robotics, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.341, + 0.913, + 0.386 + ], + "angle": 0, + "content": "[28] Y. Han, K. Yu, R. Batra, N. Boyd, C. Mehta, T. Zhao, Y. She, S. Hutchinson, and Y. Zhao, “Learning generalizable vision-tactile robotic grasping strategy for deformable objects via transformer,” IEEE/ASME Transactions on Mechatronics, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.386, + 0.913, + 0.42 + ], + "angle": 0, + "content": "[29] R. Bhirangi, V. Pattabiraman, E. Erciyes, Y. Cao, T. Hellebrekers, and L. Pinto, “Anyskin: Plug-and-play skin sensing for robotic touch,” arXiv preprint arXiv:2409.08276, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.421, + 0.913, + 0.453 + ], + "angle": 0, + "content": "[30] V. Pattabiraman, Y. Cao, S. Haldar, L. Pinto, and R. Bhirangi, “Learning precise, contact-rich manipulation through uncalibrated tactile skins,” arXiv preprint arXiv:2410.17246, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.453, + 0.913, + 0.489 + ], + "angle": 0, + "content": "[31] Liu, Guan, Jia, Wu, Liu, Wang, Liang, Chen, Zhang, Song et al., \"Fastumi: A scalable and hardware-independent universal manipulation interface with dataset,\" arXiv e-prints, pp. arXiv-2409, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.489, + 0.913, + 0.522 + ], + "angle": 0, + "content": "[32] Liu, Chi, Cousineau, Kuppuswamy, Burchfiel, and Song, \"Maniwav: Learning robot manipulation from in-the-wild audio-visual data,\" in CoRL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.522, + 0.913, + 0.579 + ], + "angle": 0, + "content": "[33] C. Sferrazza, Y. Seo, H. Liu, Y. Lee, and P. Abbeel, \"The power of the senses: Generalizable manipulation from vision and touch through masked multimodal learning,\" in 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2024, pp. 9698-9705." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.579, + 0.913, + 0.613 + ], + "angle": 0, + "content": "[34] Z. Xu, R. Uppuluri, X. Zhang, C. Fitch, P. G. Crandall, W. Shou, D. Wang, and Y. She, \"UniT: Unified tactile representation for robot learning,\" 2024. [Online]. Available: https://arxiv.org/abs/2408.06481" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.613, + 0.913, + 0.636 + ], + "angle": 0, + "content": "[35] X. Zhang and et al., “Fusing multimodal sensory data for robotic perception,” IEEE Transactions on Robotics, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.636, + 0.913, + 0.669 + ], + "angle": 0, + "content": "[36] A. Nagabandi, G. Kahn, S. Levine, and C. Finn, \"Deep reinforcement learning for vision-based robotic control with multimodal inputs,\" in Conference on Robot Learning (CoRL), 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.669, + 0.913, + 0.727 + ], + "angle": 0, + "content": "[37] L. Fu, G. Datta, H. Huang, W. C.-H. Panitch, J. Drake, J. Ortiz, M. Mukadam, M. Lambeta, R. Calandra, and K. Goldberg, \"A touch, vision, and language dataset for multimodal alignment,\" in Forty-first International Conference on Machine Learning, 2024. [Online]. Available: https://openreview.net/forum?id=tFEOOH9eH0" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.727, + 0.913, + 0.771 + ], + "angle": 0, + "content": "[38] F. Yang, C. Feng, Z. Chen, H. Park, D. Wang, Y. Dou, Z. Zeng, X. Chen, R. Gangopadhyay, A. Owens, and A. Wong, \"Binding touch to everything: Learning unified multimodal tactile representations,\" arXiv:2401.18084, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.771, + 0.913, + 0.805 + ], + "angle": 0, + "content": "[39] A. George, S. Gano, P. Katragadda, and A. Farimani, “Vital pretraining: Visuo-tactile pretraining for tactile and non-tactile manipulation policies,” arXiv preprint arXiv:2403.11898, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.805, + 0.913, + 0.863 + ], + "angle": 0, + "content": "[40] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks for biomedical image segmentation,” in Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18. Springer, 2015, pp. 234-241." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.863, + 0.913, + 0.885 + ], + "angle": 0, + "content": "[41] J. Song, C. Meng, and S. Ermon, “Denoising diffusion implicit models,” arXiv preprint arXiv:2010.02502, 2020." + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.068, + 0.913, + 0.885 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_origin.pdf b/data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7bad29e4c52f5706b49411a69b97ca35106c745e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cf4ffb32b8c332a6005f607064abc7cedea7b129b5afa50f833807501d927fb +size 4293227 diff --git a/data/2025/2504_06xxx/2504.06156/full.md b/data/2025/2504_06xxx/2504.06156/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1b457de03153faace93ddb8f5a451508fe109b51 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/full.md @@ -0,0 +1,359 @@ +# ViTaMIn: Learning Contact-Rich Tasks Through Robot-Free Visuo-Tactile Manipulation Interface + +Fangchen Liu\*,2, Chuanyu Li\*,1, Yihua Qin\*, Jing Xu\*, Pieter Abbeel\*, Rui Chen\*,1 + +$^{1}$ Tsinghua University, $^{2}$ University of California, Berkeley + +* Equal contribution, † Corresponding author + +https://chuanyune.github.io/ViTaMIN_page + +![](images/2b3f5d9d3857ac4bc064a25dd8f846478edc8d3d4acf76854100e1224e09f2e4.jpg) +Demonstrations + +![](images/c76f0cbfcc0157a2fd0ea97bdb5758c11c6fb5d419f1af561dacdd15498d9a0f.jpg) + +![](images/a7c742b59dc541a88a3273a380335073e194e7444064c0f55bc20ace7ec82882.jpg) + +![](images/4d0acbed64c8854765c2e9c80aa8c7e2abb13d31d7b57777b70dad4d3e6a981a.jpg) +Real-World Tasks + +![](images/c67f8d30bed7d5e78cd491b955ffbcc6ad2890cd3244a3c6ba5862292c5ec665.jpg) + +![](images/3c482c5b0658f8f048d2ebe3c95889ab774372793412480b02727eafd0de414c.jpg) + +![](images/56817bcfa892e233432daadca6888f4cef9f11efeeb525983604173fded63e17.jpg) +Fig. 1: ViTaMIn overview. Our system comprises a portable data collection device that integrates visual and tactile sensing, a multimodal representation learning framework for fusing visual and tactile information, and demonstrations of various contact-rich manipulation tasks. This system facilitates efficient collection of manipulation data without requiring complex robot setups. (*Backgrounds in the images are blurred.) + +![](images/d3b473412a83550b31125af282bc865a7484b0a11b3fe4b684aa09dfa0912134.jpg) + +![](images/bafd32d27c34e33981964ae485e4dfba8fcf84c249727c41a5a866d6121787e8.jpg) + +![](images/4bd4ba61306b1059468cbd4655f3e43c112065002e75360f7ec6cebdc5cb4ee4.jpg) + +Abstract—Tactile information plays a crucial role for humans and robots to interact effectively with their environment, particularly for tasks requiring the understanding of contact properties. Solving such dexterous manipulation tasks often relies on imitation learning from demonstration datasets, which are typically collected via teleoperation systems and often demand substantial time and effort. To address these challenges, we present ViTaMIn, an embodiment-free manipulation interface that integrates visual and tactile sensing into a hand-held gripper, enabling multi-modality data collection without the need for teleoperation. Our design employs a compliant Fin Ray gripper with tactile sensing, allowing operators to perceive force feedback during manipulation for more intuitive operation. Additionally, we propose a multi-modal representation learning strategy to obtain pre-trained tactile representations, improving data efficiency and policy robustness. Experiments on 5 contact-rich manipulation tasks demonstrate that our system is more scalable, efficient, and effective than baseline methods. + +# I. INTRODUCTION + +Humans rely on both visual and tactile modalities to perform a diverse range of manipulation tasks in daily + +life. For instance, when inserting a plug into a socket or tightening a screw, vision helps with identifying and aligning components, while tactile signals enable precise force control during contact. This seamless integration of vision and touch enhances human dexterity, particularly in tasks that require contact-rich control, handling visual occlusions, or performing in-hand manipulations. + +Recent progress in learning from demonstrations [1], [2], [3], [4] has shown significant potential for advancing general-purpose robots, enabling them to efficiently acquire complex skills from human demonstrations. Consequently, developing systems to collect high-quality demonstration data has been a recent key focus. Prior works have explored real-world data collection methods, including joint-mapped devices and exoskeletons [5], [6], [7], [8], and vision-based teleoperation frameworks [9], [10]. Nevertheless, these techniques require real-time teleoperation of a physical robot during data collection, which constrains efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14] present + +a more scalable and cost-effective alternative to collect demonstration without teleoperation. Moreover, they can be seamlessly integrated into various embodiments, providing a more flexible data collection approach. However, these portable devices primarily focus on capturing vision-only demonstration data, limiting their usage for contact-rich and dexterous manipulation tasks where tactile feedback plays a crucial role. + +In this work, we aim to address both the challenge of efficient data collection and the need for learning more dexterous tasks using visuo-tactile demonstrations. To this end, we introduce ViTaMIn, a novel and effective visuotactile manipulation interface designed to capture high-quality demonstrations with enhanced efficiency and flexibility. Unlike conventional approaches that rely on rigid tactile sensors, ViTaMIn leverages an omnidirectional compliant Fin Ray gripper with customized tactile sensing, which can detect contact from all directions as an expressive tactile signal for robot manipulation. We integrate the tactile-aware Fin Ray gripper [15] with UMI [14], enhancing the collected data with rich multimodal information and improving policy learning performance while maintaining the core advantages of portable devices. Additionally, our system enables operators to perceive force feedback during manipulation, facilitating more intuitive and seamless operation. + +Pre-trained visual representations have shown improved performance in robotic manipulation [16], [17], [18], [19], [20], benefiting from large-scale visual pre-training. To fully leverage the visuo-tactile datasets collected with ViTaMIn, we adopt a multimodal representation learning strategy to pre-train tactile representations, enhancing the robustness and generalizability of our sensor-based policies. Our pretraining objective integrates masked autoencoding [21] and contrastive learning for multimodal alignment [22], where future image observations are aligned with masked current images and tactile signals. Through extensive experiments on five challenging contact-rich manipulation tasks, our visuotactile policy, enhanced by multimodal pre-training, exhibits superior data and training efficiency while demonstrating strong generalization across diverse objects and environmental conditions. + +In conclusion, our contributions are: + +- ViTaMIn provides a portable and scalable visuo-tactile data collection system. +- ViTaMIn proposes an effective multimodal representation learning strategy, which significantly improves the data efficiency, robustness and generalization capabilities. +- ViTaMIn achieves superior performance over vision-only baselines across five manipulation tasks by leveraging visuo-tactile demonstrations. + +# II. RELATED WORK + +# A. Visuo-Tactile Manipulation + +Tactile sensing is essential for robotic manipulation as it provides signals about physical contact in addition to visual observation. Early works [23], [24], [25] use RGB cameras + +and force/torque sensors to infer contact status for making decisions. However, the information from force/torque sensors is low-dimensional and insufficient for more dexterous manipulation tasks. + +More recently, vision-based tactile sensors have gained attention for their ability to capture high-resolution contact information [26], [27], [28]. Despite these advances, the rigid design of these sensors restricts the compliance of the end effector, where alternative approaches like uncalibrated tactile skins [29] and plug-and-play sensing systems [30] have improved adaptability and flexibility. In our work, we use a Fin-Ray-shaped compliant and all-directional tactile sensor, which can detect contacts from all directions and also support safe and robust contact-rich manipulation. + +# B. Data Collection System for Robot Manipulation + +Recent advancements in learning from demonstrations [1], [2], [3], [4] have shown promising results in developing general-purpose robots. Therefore, efficiently collecting high-quality demonstrations has become a key research focus. + +Recently works have focused on efficient real-world data collection systems, such as devices or exoskeletons with joint-mapping [5], [6], [7], exoskeletons [8], or vision-based systems [9], [10]. However, these approaches require a physical robot during data collection, which limits efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14], [31], [32] offer several advantages: they are low-cost, flexible, and do not depend on a specific physical robot. Additionally, they can be seamlessly integrated into various embodiments and provide a more user-friendly experience for data collection. We extend the UMI data collection system [14] by integrating tactile sensing, which enriches the demonstrations with multimodal information, improving policy learning performance while preserving the key benefits of portable devices. + +# C. Multimodal Pre-training for Robotics + +Pre-trained visual representations have shown improved performance and generalization in robotic manipulation [16], [17], [18], [19], [20] with self-supervised learning techniques [21], [22]. This can be extended to multimodal representation learning [33], [34], [35] by integrating visual, tactile, and proprioceptive modalities, allowing robots to perceive object properties beyond visual appearance. + +Aligning heterogeneous sensory modalities is a key challenge in multimodal learning, as different sensors have varying data structures, sampling rates, and noise characteristics [36]. Inspired by CLIP [22], researchers have developed contrastive learning techniques to align tactile and visual representations for manipulation tasks [37], [38]. + +Our work extends these efforts by introducing masked contrastive pre-training, where the tactile encoder learns to reconstruct future occluded visual information, further enhancing multimodal understanding. + +![](images/eaeb262e8e8de2cc9c2c5f6bd946acaa4ad560a3e6122d16fbd8e4f0a08cfc1a.jpg) +Fig. 2: ViTaMIn's hardware system overview. The handheld device integrates a GoPro camera, two tactile sensors and a synchronization camera to align visual and tactile information. During data collection, the two tactile sensors and the synchronization camera are connected to the Raspberry Pi in the backbox. The total weight of the gripper is approximately $1960\mathrm{g}$ . Left: Side view of the ViTaMIn system. Right: Top view of the ViTaMIn system with the backbox cover removed. + +![](images/4dc08d4271758a4aabed3f5e31b55c1d21ebc7da0ae86c7b523043f7db6cbe93.jpg) + +# III. VISUO-TACTILE MANIPULATION INTERFACE + +# A. System Overview + +We design a handheld gripper to collect visuo-tactile demonstrations without requiring teleoperation on physical robots. Our gripper design is illustrated in Figure 2. The gripper consists of an RGB fisheye wrist camera (GoPro 10) for image observation, two AllTact finger [15], a synchronization camera for observation temporal alignment, and a Raspberry Pi 5 with a battery for data recording. + +Image Observation To capture comprehensive visual information, we employ a GoPro 10 camera with a $155^{\circ}$ field-of-view (FoV) fisheye lens. The camera operates at 60 FPS with a resolution of $2704 \times 2028$ pixels and is mounted at the end-effector of our ViTaMIn to ensure consistent visual coverage of the manipulation workspace during demonstration collection and policy deployment. + +Tactile Observation In UMI [14], two TPU-printed Fin Ray grippers are used to provide compliance and enhance grasping stability. However, these grippers lack tactile sensing capabilities. In our ViTaMIn, we employ AllTact [15], a compliant Fin Ray gripper with omnidirectional tactile sensing ability. During manipulation, the embedded camera in AllTact captures both the global deformation of the entire finger and the local deformation of the contact surface as a single image. The tactile sensor operates at 30 FPS with a resolution of $640 \times 480$ pixels. + +Other Observations To enhance the robustness and accuracy of SLAM, we utilize the IMU data provided by the GoPro, which is synchronized with the visual observations. Gripper width is also critical for precise manipulation. Following UMI [14], we attach two ArUco markers to the + +gripper's fingers and compute the gripper width from the visual observations. + +# B. Data Processing + +Sensor Synchronization To synchronize the tactile sensors and GoPro camera, we use an additional low-cost camera which is connected to the Raspberry Pi and is naturally synchronized with the tactile sensors. Before data collection, both the GoPro and the synchronization camera simultaneously capture a sequence of ArUco markers displayed on a computer screen. The ArUco IDs are detected in both video streams, and when an identical ID appears in both, the corresponding timestamps are used for synchronization. Since the framereates of the GoPro and the synchronization camera are $60\mathrm{Hz}$ and $30\mathrm{Hz}$ respectively, the temporal alignment error is below $1/60 + 1/30 = 0.05$ seconds, which is sufficient for our tasks. Once the two videos are synchronized, they are cropped by the starting and ending signals triggered by the control button. + +Data Collection and Filtering We adopt a similar data collection pipeline to UMI [14]. We also utilize Simultaneous Localization and Mapping (SLAM) to capture the end-effector trajectories. While SLAM may fail in low-texture environments, it achieves a success rate of approximately $80\%$ in our tasks, allowing the majority of collected data to be used for imitation learning. + +# IV. VISUO-TACTILE POLICY LEARNING + +# A. Visuo-Tactile Representation Learning + +UMI uses a pre-trained CLIP [22] encoder to extract visual representations. However, the tactile images in ViTaMIn are very different from the CLIP's training distribution, which + +can lead to suboptimal representation. To tackle this, we pretrain an effective tactile encoder using the collected action-free datasets, which doesn't rely on the SLAM success. + +Taking the tactile image in Figure 3 as an example, we want the encoder to capture the essential contact properties, such as the object's in-hand pose and gripper's deformation. These signals are complementary information from pixel observations, and are crucial for making future decisions. + +To achieve this, we employ a multimodal contrastive learning approach as illustrated in Figure 3. Given the current masked image $\tilde{I}_V^k$ and current full tactile observation $I_T^k$ of step $k$ , we want the combination of $\tilde{I}_V^k$ and $I_T^k$ align with the future full image observation $I_V^{k + 1}$ in the CLIP embedding space. The intuition behind this is to make the tactile encoder focus on the contact information to predict future images based on the current corrupted image. + +![](images/a9d59bd91c7d90305bf0363e93a7675127b8a317c02f04e3257aa1fddbcebcc7.jpg) + +![](images/dfac30919f736ceede8d6fefd2d847d22d809cb8f4e923239b4620efb4776ebf.jpg) +Fig. 3: The illustration of the multimodal contrastive representation pre-training phase. The tactile encoder is trained to capture complementary information to predict the missing content for the future image. + +To ensure stable training, we freeze the image CLIP encoder $\phi_V(\cdot)$ but only fine-tune the tactile encoder $\phi_T(\cdot)$ . We first obtain the tactile embedding $T_{k}$ from $\phi_T(I_T^k)$ , and $V_{k}$ from $\phi_V(\tilde{I}_V^k)$ . These embeddings are concatenated and + +passed through a fully connected projection layer, mapping them back to the original 512-dimensional CLIP embedding space as a fused feature $F_{k}$ . Finally, we train the tactile encoder using the standard CLIP loss on $F_{k}$ and $V_{k + 1}$ : + +$$ +\mathcal {L} _ {\mathrm {C L I P}} = \frac {1}{2} \left(\mathcal {L} _ {\mathrm {f - v}} + \mathcal {L} _ {\mathrm {v - f}}\right) \tag {1} +$$ + +where + +$$ +\mathcal {L} _ {\mathrm {v - f}} = - \frac {1}{N} \sum_ {i = 1} ^ {N} \log \frac {\exp \left(\cos \left(V _ {i + 1} , F _ {i}\right) / \tau\right)}{\sum_ {j = 1} ^ {N} \exp \left(\cos \left(V _ {i + 1} , F _ {j}\right) / \tau\right)} \tag {2} +$$ + +$$ +\mathcal {L} _ {\mathrm {f - v}} = - \frac {1}{N} \sum_ {i = 1} ^ {N} \log \frac {\exp \left(\cos \left(F _ {i} , V _ {i + 1}\right) / \tau\right)}{\sum_ {j = 1} ^ {N} \exp \left(\cos \left(F _ {i} , V _ {j + 1}\right) / \tau\right)} \tag {3} +$$ + +here $\tau$ is a learnable temperature parameter. + +Different from [39], where they directly apply the CLIP loss on the time-aligned visuo-tactile images, we instead fuse the tactile observation with a masked current image to predict the future image. We make this choice for two main reasons. First, in [39], the tactile representation is conditioned on proprioceptive states, which are unavailable in our dataset before the success of SLAM. Second, since different tasks may have varying images but similar tactile observations, fusing a masked current image helps the network learn a more expressive tactile representation. Without sufficient masking, the alignment becomes trivial. + +After pre-training, we train a Diffusion Policy [4] on the SLAM-filtered data. Following [4], we use a U-Net [40] as the noise prediction network and apply DDIM [41] to accelerate the inference for action prediction. + +![](images/66ceb8edcaeb309670260252767cb93432455bba815e9dc4a4ca645ef94a855b.jpg) +V. EXPERIMENTS +Fig. 4: Hardware setup for policy deployment. + +# A. Experimental Setup + +Hardware Figure 4 shows the policy deployment setup. Our system consists of a Rokae xMate ER3PRO robotic arm equipped with a PGI-140-80-W-S parallel gripper. The 7-DOF robotic arm provides flexible manipulation capabilities, while the gripper features an 8cm stroke range from fully open to closed position. The system is implemented using ROS Noetic on Ubuntu 20.04. The control loop operates at $10\mathrm{Hz}$ , with separate threads handling robot control, visual and tactile sensing. The system architecture is designed to minimize latency while maintaining reliable real-time performance. + +Similar to UMI [14], our system compensates for various sources of latency in the perception-action loop through + +predictive buffering and timestamp-based synchronization between visual and tactile feedback streams. The policy generates 16 consecutive trajectories at each inference step, with 10 trajectories being executed based on our temporal compensation strategy. + +Manipulation Tasks As shown in Figure 5, we propose diverse contact-rich manipulation tasks to evaluate the effectiveness of ViTaMIn. These tasks are specifically crafted to demonstrate the following key capabilities: (1) Robust pick-and-place of diverse objects, including fragile and small objects; (2) Dexterous manipulation, such as in-hand reorientation; (3) Task success determination, allowing the robot to repeat attempts until success; (4) Dynamic and precise manipulation. + +We design the following 5 manipulation tasks: + +- Orange Placement: Put a fragile orange from a randomized position to a randomized plate. +- Dynamic Peg Insertion: Grasp a peg and approach a hole, which is moving at a constant speed of $10\mathrm{mm / s}$ . And precisely insert the peg to the hole. +- Test Tube Reorientation: Grasp a transparent test tube from a shelf and adjust its pose through extrinsic dexterity based on tactile feedback. +- Scissor Hanging: Grasp a pair of scissors and hang them on a hook. Adjust the pose and keep attempting until it succeeds. +- Dual-Arm Knife Pulling: The left arm first grasps a knife from a cup, orients it horizontally. The right arm grasps and pulls it out with a constrained prismatic motion. This task requires tactile feedback to grasp the thin object and perform the correct pulling motion. + +TABLE I: Data Collection Statistics for Different Tasks + +
TaskRaw DataValid Data*Avg. Length
Orange Placement8773435
Dynamic Peg Insertion201141321
Test Tube Reorientation150125619
Scissor Hanging172137642
Knife Pulling (Left)188131403
Knife Pulling (Right)180134254
+ +*Valid data refers to demonstrations with successful SLAM tracking + +Table I shows the statistics of the demonstration data. We collect demonstrations for both single-arm and dual-arm manipulation tasks. For single-arm tasks, we gather between 87 and 172 raw demonstrations per task according to the task difficulty, with successful SLAM tracking achieved in approximately $80\%$ of the trajectories. The dual-arm knife pulling task requires coordinated motion between both arms, with similar data collection volumes but slightly different average demonstration lengths for left and right arm movements. + +We compare our approach against the following methods: (1) Vision: the policy only takes visual observation from the GoPro camera, which is encoded by the pre-trained CLIP model (identical to the original UMI [14] paper); (2) Ours w/o Pre-training: This baseline simply concatenate visual and + +tactile observations after separate CLIP ViT-B/16 encoders, and fine-tuned with behavior cloning. + +
TaskVisionw/o Pre-trainingOurs
Single-Arm Tasks
Orange placement0.850.91
Test Tube Reorientation0.40.70.9
Scissor Hanging0.10.450.7
Dynamic Peg Insertion0.450.80.9
Dual-Arm Task
Knife Pulling0.60.80.9
+ +TABLE II: Comparisons on 5 tasks with baselines. Our approach improves the performance on 5 tasks through multimodal sensing and pre-training. + +The results are presented in Table II. For each task, we conduct 20 trials with randomized initial conditions and report the average performance. The vision-only policy performs the worst across all five tasks, particularly in contact-rich tasks like test tube reorientation and scissor hanging, where tactile feedback is crucial for success. Across all tasks, pre-training enhances the performance, highlighting the importance of learning effective tactile representations. + +# B. Failure Analysis + +In the Orange placement task, the robot picks up an orange from a random position within a $50\mathrm{cm} \times 50\mathrm{cm}$ workspace and places it on a plate. Failures stem from table collisions, unstable placement, or motion planning errors despite correct object detection. In Dynamic peg insertion, the robot inserts a grasped peg into a moving hole. Vision-only methods often fail due to imprecise localization and alignment. + +In Test tube reorientation, the robot must pick up a tube from a random rack location and reorient it vertically, with success defined by less than $10^{\circ}$ orientation error. Failures include rack collisions, over-lifting, and incorrect final orientation. Scissor hanging requires picking up scissors and hanging them on a narrow hook, where common issues include misdetection, misalignment, and failure to release. In Knife pulling, a dual-arm policy reorients the knife with one arm while the other pulls it out of a holder. Failures often result from poor coordination, weak grasps, or incomplete pulling. Overall, vision-only policies struggle with contact-rich tasks, highlighting the limitations of unimodal sensing. + +# C. Compliant Articulated Object Manipulation + +To demonstrate the compliance capabilities of ViTaMIn, we designed a compliant-controlled articulated object manipulation task. The robotic arm needs to grasp a handle (connected to a force gauge) and rotate it 90 degrees to open a switch. During the rotation process, the arm must minimize axial forces to ensure smooth operation. We conduct 10 experiments for each condition and calculate the average forces. The results show that ViTaMIn achieves significantly lower average forces compared to using pure vision as input. + +![](images/ff5d1182fc87c0d6043cdc51c2604c67d7dd26e1c42f06dddaec7cbdb5b6fff2.jpg) +Task 1. Orange Placement + +![](images/512cac75dfa1a461ecf945a565d55ee173bc79056728365d8bbabcdff20497f1.jpg) + +![](images/8ffaab5c8e792fcf6faad355dd789f84bac2ae3b63606a4eac5401f023e77b6c.jpg) + +![](images/145b0ff91ccc913134917d84f27ec9288b5cc0e7a4e6ccca1027d09a1eb5522f.jpg) +Task 2. Dynamic Peg Insertion + +![](images/c1639d459b6280e0d616c0b61ca5027d7312dc27193311d49fc82c533e5e3614.jpg) + +![](images/ed9be295452bb2b609707999c0d7ce53274abf084feefa571723224f2e442fef.jpg) + +![](images/5a72a662adc1c1ba0bfd167d4f4af69842d450e5c116e4daa0ea7c7387c99b10.jpg) + +![](images/2eb0d57179fb5c021a773de17ac4443e984ccc352e0dc3e5d824297b87a58824.jpg) +Task 3. Test Tube Reorientation +Stage I + +![](images/d9d86998bcb7355813c2ec3771bc9be86562ca597b9726d312f20d51db3d0713.jpg) + +![](images/ad52e3e1fffe97ce097f5acd4e97f9d17c9f5a5940fed40ac9f7275aebb29b3d.jpg) + +![](images/6e0490d1099f2e64b741b9dd1f95e5ae865168537ab2ca60ee6fd37e533eacdb.jpg) + +![](images/41e27ceecac4c9235a249ac029abee0e7fc30124d187cd6077bf037a65e93fd4.jpg) + +![](images/00f85838005136fdca15b5fe4bb78ee82f7340dea6f3e5a9b2e65bd76936a94c.jpg) +Task 4. Scissor Hanging + +![](images/67d2c9e7967010da05736086d3a0fca8814cf40da4a222bb5e6737e56f406e1e.jpg) + +![](images/7445085cbc517fd3cd93fbb3a2bd9f6db8580e6c84d599414a68d9405529f3b0.jpg) + +![](images/5e5f3b7ca4ef1ce5b7a8ef47b005c756ed1fc850e06dd280623fc0528eb1a89d.jpg) + +![](images/6fd39913d482519aa7b6f7a9a91a5fd878297b9f18bd5d9df7c2afe47a5f641f.jpg) + +![](images/99c0a32a6a7ff267400458289cc0fbf487ba3fbe191ce416aad8bac7243d1355.jpg) +Task 5. Knife Pulling (Bimanual) +Fig. 5: We test ViTaMIn on 5 contact-rich manipulation tasks, including precise and dynamic insertion, object hanging with multimodal feedback, and transparent in-hand object manipulation. + +![](images/3ce4769ee6b1bde42a17eee61d58d48bb5431619637f3963972110f5eafc4433.jpg) + +![](images/d26514e671a1cabe35615727660c95426ee9c01df609ba34f6407ddd70a97fc4.jpg) + +![](images/45234ed6e963ca64aacca0aeebac163393943ee8f94523b029c03b09faa1b450.jpg) + +![](images/b83fcb747acca716d74ef5c58839df6114300388b8d0e6ee2f936782a0e64c43.jpg) + +![](images/ce63d3a7c6dc3449fc08f0a14ed53567368fbb562685332477dc26bd0e8072a3.jpg) +Fig. 6: The robot needs to flip open a switch (fixed to a force gauge) by rotating it 90 degrees. During the rotation, the robot must minimize axial forces to ensure smooth operation. + +![](images/c6980fdc3266252190f984cdc73c9a2bab1431c731bc03e678d86b4b54eeb2be.jpg) +Maximum Force Comparison: Vision vs. Ours + +![](images/94c8d59ccbd0cb0de5a5b3206f5de9360ecd718d47d6d0d3ca249decbf1ffc98.jpg) +Novel Objects +Fig. 7: Showcase of novel objects and different lighting in the generalization tasks. The right columns demonstrate colored flashlight/high-power/normal lighting conditions. + +![](images/c7ca6a14e745ecd4ed221682030a13fc964ba2820d452880f11c7800ce40073a.jpg) +Different Lighting + +# D. Ablation Studies + +a) Data Efficiency: We evaluate the performance of policies trained on different amounts (25%, 50%, and 100%) of demonstrations. All the models are evaluated in 20 real-world trials with different initializations. For a more in-depth analysis, we calculate the success rates of each stage separately, as illustrated in Figure 8. With the pre-trained + +tactile representations, our method can achieve consistently higher success rates on all the tasks across different amounts of data, and can even master the task with limited data (25%) for test tube reorientation. +b) Training Efficiency: We further evaluate the policies trained with different numbers of epochs to understand its training efficiency under the same evaluation protocol. The + +![](images/827a914e84c52597b4da5d6a8593513b04ba1f5b5f8f15324c3d07e8a040904d.jpg) +Stage I + +![](images/0d76e53a8c6ce1241acff7eeef8a2fcf95cd1b9821ab719fc786e6b8b40a1ec2.jpg) +Tube Reorientation + +![](images/a285013001ef5630297fdcd051b65b0ca161561fb3c9bcad3b85e8b0d8170ccc.jpg) +Stage I +Fig. 8: Ablation study on the effect of pre-training on data efficiency. The performance of the policy improves as the quantity of data increases. After pre-training on the action-free, task-ignorant dataset, our method can achieve a high success rate even with limited data (25%). + +![](images/4b5c950d25456db7d2d940404eb103086fe309d96067fca9478d24545376c057.jpg) +Scissor Hanging +Stage II + +![](images/d8dcc28916f7268aa5ffb965d055ef3eb9daf033798758dca22c4625f78d2473.jpg) +Tube Reorientation +Stage I +Fig. 9: Ablation study on the effect of pre-training on training efficiency. Policies with pre-training are able to learn to complete the first-stage task at a remarkably early stage of training (within 10 epochs). Additionally, when the policy network is pre-trained, the overall success rates increase more rapidly. + +![](images/20f9fa3b2ed644154b3075e0c925e50cd264ba0e5235b21294c9f2bd1334e309.jpg) +Stage II + +![](images/bad020bfaf946a16dcc60d68034d145033f7b7475137443f7b26c1c2e7ca1978.jpg) +Scissor Hanging +Stage I + +![](images/3e856f4f3818833cac9099e64bb7f58858c535a342f9000c483c2c4ffb29e705.jpg) +Stage II + +results are illustrated in Figure 9. We also observe consistent task performance improvements with pre-training. The policy can complete the first stage of the task at a remarkably early training stage (within 10 epochs). + +
TaskMethodOriginalNovel ObjectsDifferent Lighting
Orange PlacementVision0.850.70.55
Ours w/o Pre-training0.90.80.6
Ours1.01.00.85
Scissor HangingVision0.00.00.0
Ours w/o Pre-training0.450.40.4
Ours0.70.70.5
+ +TABLE III: Generalization under different objects and scenes. The results demonstrate that our multi-modal policy is more robust to novel objects and different lighting conditions. + +# E. Generalization Capability + +We also evaluate our policy's generalizability to unseen objects and environments. As shown in Figure 7, beyond the training orange and scissor, we introduce 6 unseen small + +objects and 3 unseen scissors to assess object generalization. Additionally, we modify lighting conditions by increasing brightness and introducing colored disco ball lighting. Table III presents results on the tasks of orange placement and scissor hanging. Our method with pre-training achieves consistent better performance across various generalization settings. + +# VI. CONCLUSION + +In this paper, we present ViTaMIn, a portable visuo-tactile manipulation interface designed for efficiently collecting high-quality demonstrations by capturing both visual and tactile signals. Furthermore, ViTaMIn introduces an effective pre-training strategy that leverages all the collected action-free data to learn a robust and generalizable tactile representation through multimodal contrastive learning. Our approach significantly outperforms vision-only policies across 5 real-world contact-rich manipulation tasks and demonstrates improved data efficiency, robustness, and generalizability with pre-trained visuo-tactile representations. + +Our method primarily focuses on fixed-base single-arm and dual-arm tasks with parallel-jaw grippers. While this setup is suitable for a wide range of manipulation tasks, + +future work could extend our approach to dexterous hands, enabling richer and more versatile manipulation skills that better approximate human-level dexterity. + +# REFERENCES + +[1] S. Levine, C. Finn, T. Darrell, and P. Abbeel, "End-to-end training of deep visuomotor policies," Journal of Machine Learning Research, vol. 17, no. 39, pp. 1-40, 2016. +[2] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, J. Dabis, C. Finn, K. Gopalakrishnan, K. Hausman, A. Herzog, J. Hsu et al., "Rt-1: Robotics transformer for real-world control at scale," arXiv preprint arXiv:2212.06817, 2022. +[3] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, X. Chen, K. Choromanski, T. Ding, D. Driess, A. Dubey, C. Finn et al., "Rt-2: Vision-language-action models transfer web knowledge to robotic control," arXiv preprint arXiv:2307.15818, 2023. +[4] C. Chi, S. Feng, Y. Du, Z. Xu, E. Cousineau, B. Burchfiel, and S. Song, "Diffusion policy: Visuomotor policy learning via action diffusion," arXiv preprint arXiv:2303.04137, 2023. +[5] J. Aldaco, T. Armstrong, R. Baruch, J. Bingham, S. Chan, K. Draper, D. Dwibedi, C. Finn, P. Florence, S. Goodrich et al., "Aloha 2: An enhanced low-cost hardware for bimanual teleoperation," arXiv preprint arXiv:2405.02292, 2024. +[6] Z. Fu, T. Z. Zhao, and C. Finn, "Mobile aloha: Learning bimanual mobile manipulation with low-cost whole-body teleoperation," arXiv preprint arXiv:2401.02117, 2024. +[7] T. Z. Zhao, V. Kumar, S. Levine, and C. Finn, “Learning fine-grained bimanual manipulation with low-cost hardware,” arXiv preprint arXiv:2304.13705, 2023. +[8] H. Fang, H.-S. Fang, Y. Wang, J. Ren, J. Chen, R. Zhang, W. Wang, and C. Lu, "Airexo: Low-cost exoskeletons for learning whole-arm manipulation in the wild," in 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2024, pp. 15031-15038. +[9] X. Cheng, J. Li, S. Yang, G. Yang, and X. Wang, “Open-television: Teleoperation with immersive active visual feedback,” arXiv preprint arXiv:2407.01512, 2024. +[10] Y. Qin, W. Yang, B. Huang, K. Van Wyk, H. Su, X. Wang, Y.-W. Chao, and D. Fox, "Anyteleop: A general vision-based dexterous robot arm-hand teleoperation system," arXiv preprint arXiv:2307.04577, 2023. +[11] F. Sanches, G. Gao, N. Elangovan, R. V. Godoy, J. Chapman, K. Wang, P. Jarvis, and M. Liarokapis, "Scalable. intuitive human to robot skill transfer with wearable human machine interfaces: On complex, dexterous tasks," in 2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2023, pp. 6318-6325. +[12] K. Doshi, Y. Huang, and S. Coros, "On hand-held grippers and the morphological gap in human manipulation demonstration," arXiv preprint arXiv:2311.01832, 2023. +[13] N. M. M. Shafiullah, A. Rai, H. Etukuru, Y. Liu, I. Misra, S. Chintala, and L. Pinto, "On bringing robots home," arXiv preprint arXiv:2311.16098, 2023. +[14] C. Chi, Z. Xu, C. Pan, E. Cousineau, B. Burchfiel, S. Feng, R. Tedrake, and S. Song, "Universal manipulation interface: In-the-wild robot teaching without in-the-wild robots," arXiv preprint arXiv:2402.10329, 2024. +[15] S. Liang, Y. Guan, J. Xu, H. Qian, X. Zhang, D. Wu, W. Ding, and R. Chen, "Alltact fin ray: A compliant robot gripper with omnidirectional tactile sensing," arXiv preprint arXiv:2504.18064, 2025. +[16] S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta, “R3m: A universal visual representation for robot manipulation,” in Proceedings of The 6th Conference on Robot Learning (CoRL), vol. 205. PMLR, 2022, pp. 892–909. +[17] Y. J. Ma, S. Sodhani, D. Jayaraman, O. Bastani, V. Kumar, and A. Zhang, “VIP: Towards universal visual reward and representation via value-implicit pre-training,” in The Eleventh International Conference on Learning Representations, 2023. +[18] T. Xiao, I. Radosavovic, T. Darrell, and J. Malik, “Masked visual pretraining for motor control,” arXiv:2203.06173, 2022. +[19] I. Radosavovic, T. Xiao, S. James, P. Abbeel, J. Malik, and T. Darrell, “Real-world robot learning with masked visual pre-training,” in Conference on Robot Learning. PMLR, 2023, pp. 416–426. +[20] A. Majumdar, K. Yadav, S. Arnaud, J. Ma, C. Chen, S. Silwal, A. Jain, V.-P. Berges, T. Wu, J. Vakil et al., "Where are we in the search for an artificial visual cortex for embodied intelligence?" Advances in Neural Information Processing Systems, vol. 36, pp. 655-677, 2023. + +[21] K. He, X. Chen, S. Xie, Y. Li, P. Dollar, and R. Girshick, “Masked autoencoders are scalable vision learners,” in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2022, pp. 16000-16009. +[22] A. Radford, J. W. Kim, C. Hallacy, A. Ramesh, G. Goh, S. Agarwal, G. Sastry, A. Askell, P. Mishkin, J. Clark et al., "Learning transferable visual models from natural language supervision," in International conference on machine learning. PMLR, 2021, pp. 8748-8763. +[23] K. Hosoda, K. Igarashi, and M. Asada, "Adaptive hybrid visual servoing/force control in unknown environment," in Proceedings of IEEE/RSJ International Conference on Intelligent Robots and Systems. IROS'96, vol. 3. IEEE, 1996, pp. 1097-1103. +[24] H. Nakagaki, K. Kitagaki, T. Ogasawara, and H. Tsukune, "Study of deformation and insertion tasks of a flexible wire," in Proceedings of International Conference on Robotics and Automation, vol. 3. IEEE, 1997, pp. 2397-2402. +[25] P. Miller and P. Leibowitz, "Integration of vision, force and tactile sensing for grasping," Int. J. Intell. Mach, vol. 4, pp. 129-149, 1999. +[26] H. Qi, B. Yi, S. Suresh, M. Lambeta, Y. Ma, R. Calandra, and J. Malik, "General in-hand object rotation with vision and touch," in Conference on Robot Learning. PMLR, 2023, pp. 2549-2564. +[27] S. Li, H. Yu, W. Ding, H. Liu, L. Ye, C. Xia, X. Wang, and X.-P. Zhang, “Visual-tactile fusion for transparent object grasping in complex backgrounds,” IEEE Transactions on Robotics, 2023. +[28] Y. Han, K. Yu, R. Batra, N. Boyd, C. Mehta, T. Zhao, Y. She, S. Hutchinson, and Y. Zhao, “Learning generalizable vision-tactile robotic grasping strategy for deformable objects via transformer,” IEEE/ASME Transactions on Mechatronics, 2024. +[29] R. Bhirangi, V. Pattabiraman, E. Erciyes, Y. Cao, T. Hellebrekers, and L. Pinto, “Anyskin: Plug-and-play skin sensing for robotic touch,” arXiv preprint arXiv:2409.08276, 2024. +[30] V. Pattabiraman, Y. Cao, S. Haldar, L. Pinto, and R. Bhirangi, “Learning precise, contact-rich manipulation through uncalibrated tactile skins,” arXiv preprint arXiv:2410.17246, 2024. +[31] Liu, Guan, Jia, Wu, Liu, Wang, Liang, Chen, Zhang, Song et al., "Fastumi: A scalable and hardware-independent universal manipulation interface with dataset," arXiv e-prints, pp. arXiv-2409, 2024. +[32] Liu, Chi, Cousineau, Kuppuswamy, Burchfiel, and Song, "Maniwav: Learning robot manipulation from in-the-wild audio-visual data," in CoRL, 2024. +[33] C. Sferrazza, Y. Seo, H. Liu, Y. Lee, and P. Abbeel, "The power of the senses: Generalizable manipulation from vision and touch through masked multimodal learning," in 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2024, pp. 9698-9705. +[34] Z. Xu, R. Uppuluri, X. Zhang, C. Fitch, P. G. Crandall, W. Shou, D. Wang, and Y. She, "UniT: Unified tactile representation for robot learning," 2024. [Online]. Available: https://arxiv.org/abs/2408.06481 +[35] X. Zhang and et al., “Fusing multimodal sensory data for robotic perception,” IEEE Transactions on Robotics, 2022. +[36] A. Nagabandi, G. Kahn, S. Levine, and C. Finn, "Deep reinforcement learning for vision-based robotic control with multimodal inputs," in Conference on Robot Learning (CoRL), 2020. +[37] L. Fu, G. Datta, H. Huang, W. C.-H. Panitch, J. Drake, J. Ortiz, M. Mukadam, M. Lambeta, R. Calandra, and K. Goldberg, "A touch, vision, and language dataset for multimodal alignment," in Forty-first International Conference on Machine Learning, 2024. [Online]. Available: https://openreview.net/forum?id=tFEOOH9eH0 +[38] F. Yang, C. Feng, Z. Chen, H. Park, D. Wang, Y. Dou, Z. Zeng, X. Chen, R. Gangopadhyay, A. Owens, and A. Wong, "Binding touch to everything: Learning unified multimodal tactile representations," arXiv:2401.18084, 2024. +[39] A. George, S. Gano, P. Katragadda, and A. Farimani, “Vital pretraining: Visuo-tactile pretraining for tactile and non-tactile manipulation policies,” arXiv preprint arXiv:2403.11898, 2024. +[40] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks for biomedical image segmentation,” in Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18. Springer, 2015, pp. 234-241. +[41] J. Song, C. Meng, and S. Ermon, “Denoising diffusion implicit models,” arXiv preprint arXiv:2010.02502, 2020. \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06156/images/00f85838005136fdca15b5fe4bb78ee82f7340dea6f3e5a9b2e65bd76936a94c.jpg b/data/2025/2504_06xxx/2504.06156/images/00f85838005136fdca15b5fe4bb78ee82f7340dea6f3e5a9b2e65bd76936a94c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8c3b9e95236aa5dedb70afd651750f0f39dd27b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/00f85838005136fdca15b5fe4bb78ee82f7340dea6f3e5a9b2e65bd76936a94c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b436f402adc0ab5774c0684738d89e4b583d9e89624bd108497db1ce395a195 +size 7422 diff --git a/data/2025/2504_06xxx/2504.06156/images/0d76e53a8c6ce1241acff7eeef8a2fcf95cd1b9821ab719fc786e6b8b40a1ec2.jpg b/data/2025/2504_06xxx/2504.06156/images/0d76e53a8c6ce1241acff7eeef8a2fcf95cd1b9821ab719fc786e6b8b40a1ec2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db3ece825de3337d04f6e55ad16eefcfbd2c7182 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/0d76e53a8c6ce1241acff7eeef8a2fcf95cd1b9821ab719fc786e6b8b40a1ec2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31e536af065a7b46d05cd54b004c9d9041a7f3e26db91aa5ef4580982732b6e2 +size 17367 diff --git a/data/2025/2504_06xxx/2504.06156/images/145b0ff91ccc913134917d84f27ec9288b5cc0e7a4e6ccca1027d09a1eb5522f.jpg b/data/2025/2504_06xxx/2504.06156/images/145b0ff91ccc913134917d84f27ec9288b5cc0e7a4e6ccca1027d09a1eb5522f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c7cf862c0ff34f2139e7cc28488a0d77cc5b093 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/145b0ff91ccc913134917d84f27ec9288b5cc0e7a4e6ccca1027d09a1eb5522f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34449953d2e6c67da933dc9992c831a30ac30ea6fab023f11b32c6d995bf9402 +size 6974 diff --git a/data/2025/2504_06xxx/2504.06156/images/20f9fa3b2ed644154b3075e0c925e50cd264ba0e5235b21294c9f2bd1334e309.jpg b/data/2025/2504_06xxx/2504.06156/images/20f9fa3b2ed644154b3075e0c925e50cd264ba0e5235b21294c9f2bd1334e309.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df678aa592e25ca943ec428b32af999ed3c2c865 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/20f9fa3b2ed644154b3075e0c925e50cd264ba0e5235b21294c9f2bd1334e309.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a25357b4084acae27c36b21bbd6dd3550c5db25b45445d00ba102d95562577c9 +size 16710 diff --git a/data/2025/2504_06xxx/2504.06156/images/2a3bf65b454c923e9b5bb949a3116a5dfef20d4712dfc9369a08f9361b171127.jpg b/data/2025/2504_06xxx/2504.06156/images/2a3bf65b454c923e9b5bb949a3116a5dfef20d4712dfc9369a08f9361b171127.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39edf2bd9ff6c38060ade8f5e5e71d81d1719bf9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/2a3bf65b454c923e9b5bb949a3116a5dfef20d4712dfc9369a08f9361b171127.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ad181baef3b014a5cd1caf05f22d37c9c55f0094a2f5dd7512da3c2b7ee14a1 +size 33144 diff --git a/data/2025/2504_06xxx/2504.06156/images/2b3f5d9d3857ac4bc064a25dd8f846478edc8d3d4acf76854100e1224e09f2e4.jpg b/data/2025/2504_06xxx/2504.06156/images/2b3f5d9d3857ac4bc064a25dd8f846478edc8d3d4acf76854100e1224e09f2e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75c7f67a7c7e6a9212f73fb53ed692644154881c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/2b3f5d9d3857ac4bc064a25dd8f846478edc8d3d4acf76854100e1224e09f2e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba589db43b6bef224dfa66bcee97fabda6586b7781f32ee805e53f853b587b0c +size 7730 diff --git a/data/2025/2504_06xxx/2504.06156/images/2eb0d57179fb5c021a773de17ac4443e984ccc352e0dc3e5d824297b87a58824.jpg b/data/2025/2504_06xxx/2504.06156/images/2eb0d57179fb5c021a773de17ac4443e984ccc352e0dc3e5d824297b87a58824.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9903dc4c1c42a23144b7c814af04651908b6cd55 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/2eb0d57179fb5c021a773de17ac4443e984ccc352e0dc3e5d824297b87a58824.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e316c1f2935f0f39f642e41536b645fc986344a5ec4531bd83d26e2725c9163 +size 7488 diff --git a/data/2025/2504_06xxx/2504.06156/images/2ec98aac269313a4a3cc98c76d6cba7f37ecc7b2a02ed422fa6eb8b07c3cd183.jpg b/data/2025/2504_06xxx/2504.06156/images/2ec98aac269313a4a3cc98c76d6cba7f37ecc7b2a02ed422fa6eb8b07c3cd183.jpg new file mode 100644 index 0000000000000000000000000000000000000000..463dadcb98380108140963765ddf2babdf8bc227 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/2ec98aac269313a4a3cc98c76d6cba7f37ecc7b2a02ed422fa6eb8b07c3cd183.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6a07551dfaa3b22035e461486ff21e9dd9566ef2899dc6fe7a8d2bc485aec06 +size 10650 diff --git a/data/2025/2504_06xxx/2504.06156/images/3c482c5b0658f8f048d2ebe3c95889ab774372793412480b02727eafd0de414c.jpg b/data/2025/2504_06xxx/2504.06156/images/3c482c5b0658f8f048d2ebe3c95889ab774372793412480b02727eafd0de414c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36c2c8615b483fc23f65cc02c0be6dc3ae8bfa07 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/3c482c5b0658f8f048d2ebe3c95889ab774372793412480b02727eafd0de414c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8589473b3d02bf5867f02e9bd38b828bcadaece288c4493f066f66d8ab80a14 +size 9075 diff --git a/data/2025/2504_06xxx/2504.06156/images/3ce4769ee6b1bde42a17eee61d58d48bb5431619637f3963972110f5eafc4433.jpg b/data/2025/2504_06xxx/2504.06156/images/3ce4769ee6b1bde42a17eee61d58d48bb5431619637f3963972110f5eafc4433.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ee6f1537728b9c3260a8cd2c9e6dddddcb3307a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/3ce4769ee6b1bde42a17eee61d58d48bb5431619637f3963972110f5eafc4433.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3072f50e93846c3b2bf8ed1ca75d0f84d43dd8214622575a6efc18d153050c69 +size 8736 diff --git a/data/2025/2504_06xxx/2504.06156/images/3e856f4f3818833cac9099e64bb7f58858c535a342f9000c483c2c4ffb29e705.jpg b/data/2025/2504_06xxx/2504.06156/images/3e856f4f3818833cac9099e64bb7f58858c535a342f9000c483c2c4ffb29e705.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8c07ac9f6157eb565f9b1fce6d0330a99a5250c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/3e856f4f3818833cac9099e64bb7f58858c535a342f9000c483c2c4ffb29e705.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2edc72430532a5d2cd85af4be88834bb355cb77cf2491289243e2cd38e76c105 +size 15808 diff --git a/data/2025/2504_06xxx/2504.06156/images/41e27ceecac4c9235a249ac029abee0e7fc30124d187cd6077bf037a65e93fd4.jpg b/data/2025/2504_06xxx/2504.06156/images/41e27ceecac4c9235a249ac029abee0e7fc30124d187cd6077bf037a65e93fd4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..afc9f9f4c8368da1e5d7c4ba28773f9b069ba1e9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/41e27ceecac4c9235a249ac029abee0e7fc30124d187cd6077bf037a65e93fd4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:987ef9e0b72599473463220ccefbe20f8198dc7afd125c4462011a0cc44f64e7 +size 9190 diff --git a/data/2025/2504_06xxx/2504.06156/images/45234ed6e963ca64aacca0aeebac163393943ee8f94523b029c03b09faa1b450.jpg b/data/2025/2504_06xxx/2504.06156/images/45234ed6e963ca64aacca0aeebac163393943ee8f94523b029c03b09faa1b450.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5dc379a1c776cb22584f8775d8ae85a1c062c1eb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/45234ed6e963ca64aacca0aeebac163393943ee8f94523b029c03b09faa1b450.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa273679d1637158a49f8f28bc2008bf1645acbe73e2cf73d99da7d13aa3082d +size 9080 diff --git a/data/2025/2504_06xxx/2504.06156/images/4b5c950d25456db7d2d940404eb103086fe309d96067fca9478d24545376c057.jpg b/data/2025/2504_06xxx/2504.06156/images/4b5c950d25456db7d2d940404eb103086fe309d96067fca9478d24545376c057.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f346446c64d8bfeadfb2eb669e1ca02ce0cb3fd --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/4b5c950d25456db7d2d940404eb103086fe309d96067fca9478d24545376c057.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f11faf46fb1fa2cb0ec205634b2b890b45793c43ba5e6dfa595b0d6c5882904 +size 15349 diff --git a/data/2025/2504_06xxx/2504.06156/images/4bd4ba61306b1059468cbd4655f3e43c112065002e75360f7ec6cebdc5cb4ee4.jpg b/data/2025/2504_06xxx/2504.06156/images/4bd4ba61306b1059468cbd4655f3e43c112065002e75360f7ec6cebdc5cb4ee4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c061ca822f3bb029240a1df1e0b76779d4abb624 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/4bd4ba61306b1059468cbd4655f3e43c112065002e75360f7ec6cebdc5cb4ee4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9db5514abc30c2d497bfe2bc5a439156db9bed0db55da137869459cb2ac23581 +size 11638 diff --git a/data/2025/2504_06xxx/2504.06156/images/4d0acbed64c8854765c2e9c80aa8c7e2abb13d31d7b57777b70dad4d3e6a981a.jpg b/data/2025/2504_06xxx/2504.06156/images/4d0acbed64c8854765c2e9c80aa8c7e2abb13d31d7b57777b70dad4d3e6a981a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0860dceb3fdb6fc54f72c250db41b4ec58333459 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/4d0acbed64c8854765c2e9c80aa8c7e2abb13d31d7b57777b70dad4d3e6a981a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d065bf984a506ad6a8e29e268b08d2380d639e3b729866b2fe7a6905860964b6 +size 8230 diff --git a/data/2025/2504_06xxx/2504.06156/images/4dc08d4271758a4aabed3f5e31b55c1d21ebc7da0ae86c7b523043f7db6cbe93.jpg b/data/2025/2504_06xxx/2504.06156/images/4dc08d4271758a4aabed3f5e31b55c1d21ebc7da0ae86c7b523043f7db6cbe93.jpg new file mode 100644 index 0000000000000000000000000000000000000000..850fcd45fcbb3fc5e2c04ab3388f6a91ed418ad3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/4dc08d4271758a4aabed3f5e31b55c1d21ebc7da0ae86c7b523043f7db6cbe93.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da0b6128ccb90f4ef776005a41ad59efbd882862e40d147646228097d5ce0d77 +size 56891 diff --git a/data/2025/2504_06xxx/2504.06156/images/512cac75dfa1a461ecf945a565d55ee173bc79056728365d8bbabcdff20497f1.jpg b/data/2025/2504_06xxx/2504.06156/images/512cac75dfa1a461ecf945a565d55ee173bc79056728365d8bbabcdff20497f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..405e8734abbc35cff5387b5ad4e23f8970034026 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/512cac75dfa1a461ecf945a565d55ee173bc79056728365d8bbabcdff20497f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef2f951c2f96cdcc180bf03606dd75ebb9e97b28afb7de083e84c1f37eb4d1b6 +size 7302 diff --git a/data/2025/2504_06xxx/2504.06156/images/56817bcfa892e233432daadca6888f4cef9f11efeeb525983604173fded63e17.jpg b/data/2025/2504_06xxx/2504.06156/images/56817bcfa892e233432daadca6888f4cef9f11efeeb525983604173fded63e17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5d29ccaecb4c45b54f6d9be0c7bd17a54c5a14e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/56817bcfa892e233432daadca6888f4cef9f11efeeb525983604173fded63e17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8324ec92d19a50b0a2a48ace6a1942fb8ab8187bd6d78e27b7b1b2a3d851ae6 +size 25220 diff --git a/data/2025/2504_06xxx/2504.06156/images/5a72a662adc1c1ba0bfd167d4f4af69842d450e5c116e4daa0ea7c7387c99b10.jpg b/data/2025/2504_06xxx/2504.06156/images/5a72a662adc1c1ba0bfd167d4f4af69842d450e5c116e4daa0ea7c7387c99b10.jpg new file mode 100644 index 0000000000000000000000000000000000000000..722a82dd39116b1c06cb588ac787219927746e05 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/5a72a662adc1c1ba0bfd167d4f4af69842d450e5c116e4daa0ea7c7387c99b10.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9993f26466543d760921cf9b191b8b09d8f3235e246036aa95c625f47ca190d0 +size 7053 diff --git a/data/2025/2504_06xxx/2504.06156/images/5e5f3b7ca4ef1ce5b7a8ef47b005c756ed1fc850e06dd280623fc0528eb1a89d.jpg b/data/2025/2504_06xxx/2504.06156/images/5e5f3b7ca4ef1ce5b7a8ef47b005c756ed1fc850e06dd280623fc0528eb1a89d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd75c6dcfd378d4f46c8a6ac6e54b52f40856a27 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/5e5f3b7ca4ef1ce5b7a8ef47b005c756ed1fc850e06dd280623fc0528eb1a89d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a28689252d7fa04baceff0d0ed4b7ea7c2837c4a36db81c70a90a411cdc2db2a +size 9184 diff --git a/data/2025/2504_06xxx/2504.06156/images/66ceb8edcaeb309670260252767cb93432455bba815e9dc4a4ca645ef94a855b.jpg b/data/2025/2504_06xxx/2504.06156/images/66ceb8edcaeb309670260252767cb93432455bba815e9dc4a4ca645ef94a855b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c9f85fc0b005d566a88e1fae2c74a2ea2a2b8a26 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/66ceb8edcaeb309670260252767cb93432455bba815e9dc4a4ca645ef94a855b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7244062666863e4244819c1b59854f74e3f5b33d26035bb96fff689c8d66b301 +size 24600 diff --git a/data/2025/2504_06xxx/2504.06156/images/67d2c9e7967010da05736086d3a0fca8814cf40da4a222bb5e6737e56f406e1e.jpg b/data/2025/2504_06xxx/2504.06156/images/67d2c9e7967010da05736086d3a0fca8814cf40da4a222bb5e6737e56f406e1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6fd36b9a09733400bf043e6ccef2e0eae988db9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/67d2c9e7967010da05736086d3a0fca8814cf40da4a222bb5e6737e56f406e1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29aa3922186d5317618134a58e4a604ecd791d34f175dbc9393b2e0146dcf143 +size 9128 diff --git a/data/2025/2504_06xxx/2504.06156/images/6e0490d1099f2e64b741b9dd1f95e5ae865168537ab2ca60ee6fd37e533eacdb.jpg b/data/2025/2504_06xxx/2504.06156/images/6e0490d1099f2e64b741b9dd1f95e5ae865168537ab2ca60ee6fd37e533eacdb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7cd9984032a3dc94cac14f8571d4e9376b6d4d11 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/6e0490d1099f2e64b741b9dd1f95e5ae865168537ab2ca60ee6fd37e533eacdb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c085fb8a6f344947695f20b0b99de177fe62a015f3c5dc9bbf75cd245a6ac2b +size 9557 diff --git a/data/2025/2504_06xxx/2504.06156/images/6fd39913d482519aa7b6f7a9a91a5fd878297b9f18bd5d9df7c2afe47a5f641f.jpg b/data/2025/2504_06xxx/2504.06156/images/6fd39913d482519aa7b6f7a9a91a5fd878297b9f18bd5d9df7c2afe47a5f641f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4884e9d6aa2b3560d64543f9716b1d999aba17d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/6fd39913d482519aa7b6f7a9a91a5fd878297b9f18bd5d9df7c2afe47a5f641f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93d19361e14980380c2006849e4c45c2345cd416705710b1e4a4f1ed631ed9fe +size 9281 diff --git a/data/2025/2504_06xxx/2504.06156/images/7445085cbc517fd3cd93fbb3a2bd9f6db8580e6c84d599414a68d9405529f3b0.jpg b/data/2025/2504_06xxx/2504.06156/images/7445085cbc517fd3cd93fbb3a2bd9f6db8580e6c84d599414a68d9405529f3b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04c90b7da97dd4c19205b553e125391256be3cc5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/7445085cbc517fd3cd93fbb3a2bd9f6db8580e6c84d599414a68d9405529f3b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e78fc283bdf8f867068b1e353d29cdf4ec83acdae81bac06bfa05d1eb4da40e7 +size 9693 diff --git a/data/2025/2504_06xxx/2504.06156/images/8105d743b48c767516e10ef93cc71f7fc5122df736e327dea3f051cc7bfb6c47.jpg b/data/2025/2504_06xxx/2504.06156/images/8105d743b48c767516e10ef93cc71f7fc5122df736e327dea3f051cc7bfb6c47.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d1f89db275f71b40ebb48e2e618fe1f85a73254d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/8105d743b48c767516e10ef93cc71f7fc5122df736e327dea3f051cc7bfb6c47.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06f5d13e714678b26f49ce32ac04d0760c7a82d49c8035847d88af77ba57814f +size 28049 diff --git a/data/2025/2504_06xxx/2504.06156/images/827a914e84c52597b4da5d6a8593513b04ba1f5b5f8f15324c3d07e8a040904d.jpg b/data/2025/2504_06xxx/2504.06156/images/827a914e84c52597b4da5d6a8593513b04ba1f5b5f8f15324c3d07e8a040904d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c0cecaf14b140ce98a153f5e8d1ca302b86119d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/827a914e84c52597b4da5d6a8593513b04ba1f5b5f8f15324c3d07e8a040904d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5396d613369681309f245453c60490dd12fb689ef66ffab20a2d2280abf7da32 +size 15140 diff --git a/data/2025/2504_06xxx/2504.06156/images/8ffaab5c8e792fcf6faad355dd789f84bac2ae3b63606a4eac5401f023e77b6c.jpg b/data/2025/2504_06xxx/2504.06156/images/8ffaab5c8e792fcf6faad355dd789f84bac2ae3b63606a4eac5401f023e77b6c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9dc073943a8c13765edd85bbe4df6ad19180a5e4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/8ffaab5c8e792fcf6faad355dd789f84bac2ae3b63606a4eac5401f023e77b6c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cae3f4e6866d890380bf0a0f3c1e51cef41f74a24ce764bec12f0651667db9a +size 6558 diff --git a/data/2025/2504_06xxx/2504.06156/images/94c8d59ccbd0cb0de5a5b3206f5de9360ecd718d47d6d0d3ca249decbf1ffc98.jpg b/data/2025/2504_06xxx/2504.06156/images/94c8d59ccbd0cb0de5a5b3206f5de9360ecd718d47d6d0d3ca249decbf1ffc98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1fb487ae5b43c923b572b2318ecbfccc9edb7a48 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/94c8d59ccbd0cb0de5a5b3206f5de9360ecd718d47d6d0d3ca249decbf1ffc98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:400dec9d08dd4e7717df91bd8bad2302c44e18ebb5a83ca1ea69d2a12197a5ac +size 12209 diff --git a/data/2025/2504_06xxx/2504.06156/images/99c0a32a6a7ff267400458289cc0fbf487ba3fbe191ce416aad8bac7243d1355.jpg b/data/2025/2504_06xxx/2504.06156/images/99c0a32a6a7ff267400458289cc0fbf487ba3fbe191ce416aad8bac7243d1355.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d7462787554b7ae639341211eb31a94d58951fc --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/99c0a32a6a7ff267400458289cc0fbf487ba3fbe191ce416aad8bac7243d1355.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21084ef370053b4b4db325e791e88225d6d0006e2345d8da1f9d691a0bed3337 +size 8877 diff --git a/data/2025/2504_06xxx/2504.06156/images/a285013001ef5630297fdcd051b65b0ca161561fb3c9bcad3b85e8b0d8170ccc.jpg b/data/2025/2504_06xxx/2504.06156/images/a285013001ef5630297fdcd051b65b0ca161561fb3c9bcad3b85e8b0d8170ccc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eefda1d7c9971e64c59f7d160e186a52d4c2c545 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/a285013001ef5630297fdcd051b65b0ca161561fb3c9bcad3b85e8b0d8170ccc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b0a87e99927cd3a199f5ec4b4798e1d992a1e02a31ea7e8fcc398c0d1f91997 +size 14395 diff --git a/data/2025/2504_06xxx/2504.06156/images/a7c742b59dc541a88a3273a380335073e194e7444064c0f55bc20ace7ec82882.jpg b/data/2025/2504_06xxx/2504.06156/images/a7c742b59dc541a88a3273a380335073e194e7444064c0f55bc20ace7ec82882.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eac40d31dd6e73298cc7af809cd294ebd2d78ab1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/a7c742b59dc541a88a3273a380335073e194e7444064c0f55bc20ace7ec82882.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bd3e32de0acb08a5e2f775bd60272286b49a4c6cc65f5694c0f844b216d5834 +size 8515 diff --git a/data/2025/2504_06xxx/2504.06156/images/a9d59bd91c7d90305bf0363e93a7675127b8a317c02f04e3257aa1fddbcebcc7.jpg b/data/2025/2504_06xxx/2504.06156/images/a9d59bd91c7d90305bf0363e93a7675127b8a317c02f04e3257aa1fddbcebcc7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4301a55dfbd74a1d4945c4c73cb5180bb13f959 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/a9d59bd91c7d90305bf0363e93a7675127b8a317c02f04e3257aa1fddbcebcc7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee44e5c22be73f627dc6e99dc2bdbd2f30e1cd6bc8b95db31344b9e97982cf4a +size 41084 diff --git a/data/2025/2504_06xxx/2504.06156/images/ad52e3e1fffe97ce097f5acd4e97f9d17c9f5a5940fed40ac9f7275aebb29b3d.jpg b/data/2025/2504_06xxx/2504.06156/images/ad52e3e1fffe97ce097f5acd4e97f9d17c9f5a5940fed40ac9f7275aebb29b3d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25679147b9991e13ff601c783fce1da6cfcfbd63 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/ad52e3e1fffe97ce097f5acd4e97f9d17c9f5a5940fed40ac9f7275aebb29b3d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af092cb8553acdbb92a3d946c02d33c21ae0a890f2daced05ff58c5becf7ddf8 +size 9050 diff --git a/data/2025/2504_06xxx/2504.06156/images/b83fcb747acca716d74ef5c58839df6114300388b8d0e6ee2f936782a0e64c43.jpg b/data/2025/2504_06xxx/2504.06156/images/b83fcb747acca716d74ef5c58839df6114300388b8d0e6ee2f936782a0e64c43.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2b3e057d074d316f7b35bc743da629012d8dbe9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/b83fcb747acca716d74ef5c58839df6114300388b8d0e6ee2f936782a0e64c43.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44daae7eee4dcadfdddfc24d43c374e58f651952fd8451abe07157b43f9ef890 +size 9025 diff --git a/data/2025/2504_06xxx/2504.06156/images/bad020bfaf946a16dcc60d68034d145033f7b7475137443f7b26c1c2e7ca1978.jpg b/data/2025/2504_06xxx/2504.06156/images/bad020bfaf946a16dcc60d68034d145033f7b7475137443f7b26c1c2e7ca1978.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf175fb3885b8b5bafa8de7a6a9d555c1bd0e1fb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/bad020bfaf946a16dcc60d68034d145033f7b7475137443f7b26c1c2e7ca1978.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcb7adddbef4c83f90ba8d12f079e46ef046ebea1875f8e83118ef54ead56e4c +size 15040 diff --git a/data/2025/2504_06xxx/2504.06156/images/bafd32d27c34e33981964ae485e4dfba8fcf84c249727c41a5a866d6121787e8.jpg b/data/2025/2504_06xxx/2504.06156/images/bafd32d27c34e33981964ae485e4dfba8fcf84c249727c41a5a866d6121787e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9cf5ef44fa69def02e503845f48da5b975c3c48 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/bafd32d27c34e33981964ae485e4dfba8fcf84c249727c41a5a866d6121787e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b37257d96b21fd0476df6f9c83595534dddb8ff713bc6b7f5098d4246373751 +size 11351 diff --git a/data/2025/2504_06xxx/2504.06156/images/c1639d459b6280e0d616c0b61ca5027d7312dc27193311d49fc82c533e5e3614.jpg b/data/2025/2504_06xxx/2504.06156/images/c1639d459b6280e0d616c0b61ca5027d7312dc27193311d49fc82c533e5e3614.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f807e1b8a20f954d837aa09277a7c4fcb009969 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/c1639d459b6280e0d616c0b61ca5027d7312dc27193311d49fc82c533e5e3614.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1c1a7cb55ad01837ce26d88d0958818ba2dab2512b8c949ac26553e732976ad +size 6681 diff --git a/data/2025/2504_06xxx/2504.06156/images/c67f8d30bed7d5e78cd491b955ffbcc6ad2890cd3244a3c6ba5862292c5ec665.jpg b/data/2025/2504_06xxx/2504.06156/images/c67f8d30bed7d5e78cd491b955ffbcc6ad2890cd3244a3c6ba5862292c5ec665.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea28a414e7ca37ab4081eee5c5ba7b324c5078a4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/c67f8d30bed7d5e78cd491b955ffbcc6ad2890cd3244a3c6ba5862292c5ec665.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e167813473813b1dc164bd6a33753ba2340499ddadaa1131ec96166531bdedb5 +size 9601 diff --git a/data/2025/2504_06xxx/2504.06156/images/c6980fdc3266252190f984cdc73c9a2bab1431c731bc03e678d86b4b54eeb2be.jpg b/data/2025/2504_06xxx/2504.06156/images/c6980fdc3266252190f984cdc73c9a2bab1431c731bc03e678d86b4b54eeb2be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7f38b35f7cd9556f7b2d58fa59bb9e143d06daec --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/c6980fdc3266252190f984cdc73c9a2bab1431c731bc03e678d86b4b54eeb2be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94908be63e466d8c0639ee03b4eb42d2d48bfae299f22f40bd3b7f440b8c1a34 +size 9957 diff --git a/data/2025/2504_06xxx/2504.06156/images/c76f0cbfcc0157a2fd0ea97bdb5758c11c6fb5d419f1af561dacdd15498d9a0f.jpg b/data/2025/2504_06xxx/2504.06156/images/c76f0cbfcc0157a2fd0ea97bdb5758c11c6fb5d419f1af561dacdd15498d9a0f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a62710dc0870a5ad2b4a017181143734df949515 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/c76f0cbfcc0157a2fd0ea97bdb5758c11c6fb5d419f1af561dacdd15498d9a0f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05b1aa42a27a30c9f5b4be2a60d430f413c4220793fb1145e4cbdd362bc97314 +size 7813 diff --git a/data/2025/2504_06xxx/2504.06156/images/c7ca6a14e745ecd4ed221682030a13fc964ba2820d452880f11c7800ce40073a.jpg b/data/2025/2504_06xxx/2504.06156/images/c7ca6a14e745ecd4ed221682030a13fc964ba2820d452880f11c7800ce40073a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5176d65dee8f500e7f16298853f0041eea4e2b09 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/c7ca6a14e745ecd4ed221682030a13fc964ba2820d452880f11c7800ce40073a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60b754ca9a382213fb1d741e4ada0d5c315d0897ce024e5d1741164fb866f285 +size 25605 diff --git a/data/2025/2504_06xxx/2504.06156/images/ce63d3a7c6dc3449fc08f0a14ed53567368fbb562685332477dc26bd0e8072a3.jpg b/data/2025/2504_06xxx/2504.06156/images/ce63d3a7c6dc3449fc08f0a14ed53567368fbb562685332477dc26bd0e8072a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39914a2eb7084b77e704f3ae82a210176ca60aa6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/ce63d3a7c6dc3449fc08f0a14ed53567368fbb562685332477dc26bd0e8072a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc393c088d5d1a4c7c6e9df0dcdb8854f0bc14f5e12273d0dc22b766551ea7d5 +size 14419 diff --git a/data/2025/2504_06xxx/2504.06156/images/d26514e671a1cabe35615727660c95426ee9c01df609ba34f6407ddd70a97fc4.jpg b/data/2025/2504_06xxx/2504.06156/images/d26514e671a1cabe35615727660c95426ee9c01df609ba34f6407ddd70a97fc4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5194cfa68ad93ee61c1dd485b7eb21a0586f680 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/d26514e671a1cabe35615727660c95426ee9c01df609ba34f6407ddd70a97fc4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b3eddd74626fe24996f4d9ab8d4d28e3589aac0757a15e2225d5744faa9a325 +size 9465 diff --git a/data/2025/2504_06xxx/2504.06156/images/d3b473412a83550b31125af282bc865a7484b0a11b3fe4b684aa09dfa0912134.jpg b/data/2025/2504_06xxx/2504.06156/images/d3b473412a83550b31125af282bc865a7484b0a11b3fe4b684aa09dfa0912134.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3af4dafbfb991bcfa3f40842414f3d855603c2e1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/d3b473412a83550b31125af282bc865a7484b0a11b3fe4b684aa09dfa0912134.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1404987059db9fdf1a7289f7914977f8feb14db8c798fc4a11c8c3503405568d +size 13379 diff --git a/data/2025/2504_06xxx/2504.06156/images/d8dcc28916f7268aa5ffb965d055ef3eb9daf033798758dca22c4625f78d2473.jpg b/data/2025/2504_06xxx/2504.06156/images/d8dcc28916f7268aa5ffb965d055ef3eb9daf033798758dca22c4625f78d2473.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b746427f7bbde6974417eaafc12cff6ab0de4ed --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/d8dcc28916f7268aa5ffb965d055ef3eb9daf033798758dca22c4625f78d2473.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:530ec726601ba982e09050325cd190c4cf35ec3a2d33ddcd945a738a4c4432d4 +size 14966 diff --git a/data/2025/2504_06xxx/2504.06156/images/d9d86998bcb7355813c2ec3771bc9be86562ca597b9726d312f20d51db3d0713.jpg b/data/2025/2504_06xxx/2504.06156/images/d9d86998bcb7355813c2ec3771bc9be86562ca597b9726d312f20d51db3d0713.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e87a3a91cead88da781b602ac6b2b2e292a974d7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/d9d86998bcb7355813c2ec3771bc9be86562ca597b9726d312f20d51db3d0713.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c27a9c67be7e56dd491ed07884e813ababd0b802af8757def7107b5757ac7855 +size 7997 diff --git a/data/2025/2504_06xxx/2504.06156/images/dfac30919f736ceede8d6fefd2d847d22d809cb8f4e923239b4620efb4776ebf.jpg b/data/2025/2504_06xxx/2504.06156/images/dfac30919f736ceede8d6fefd2d847d22d809cb8f4e923239b4620efb4776ebf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e33fbd107f7f0c675370816ea011f73ad6b3f15 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/dfac30919f736ceede8d6fefd2d847d22d809cb8f4e923239b4620efb4776ebf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6400de8d1b3c656b6d18141c61d2ea4d4f77b3a2bae4fa19de38ee290cd92bc +size 19228 diff --git a/data/2025/2504_06xxx/2504.06156/images/eae2bd4dd37d3b7a1249a136a2e4e36453a4e27aed461598963d0c97dc63fade.jpg b/data/2025/2504_06xxx/2504.06156/images/eae2bd4dd37d3b7a1249a136a2e4e36453a4e27aed461598963d0c97dc63fade.jpg new file mode 100644 index 0000000000000000000000000000000000000000..833482366c1d61bed5e4bd4c140fb684977b6688 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/eae2bd4dd37d3b7a1249a136a2e4e36453a4e27aed461598963d0c97dc63fade.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5915f2fdb28275b07c80f11f38e6d3ae2a8e15fb30ab6a25da0987e99d165eca +size 28220 diff --git a/data/2025/2504_06xxx/2504.06156/images/eaeb262e8e8de2cc9c2c5f6bd946acaa4ad560a3e6122d16fbd8e4f0a08cfc1a.jpg b/data/2025/2504_06xxx/2504.06156/images/eaeb262e8e8de2cc9c2c5f6bd946acaa4ad560a3e6122d16fbd8e4f0a08cfc1a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe9d6801f2abcdd05648a0eae3286127be4a53c8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/eaeb262e8e8de2cc9c2c5f6bd946acaa4ad560a3e6122d16fbd8e4f0a08cfc1a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfa1f299562f35ec244c65f7cf6e44593ea914fe35fd364ac16e1c8403bd3775 +size 60222 diff --git a/data/2025/2504_06xxx/2504.06156/images/ed9be295452bb2b609707999c0d7ce53274abf084feefa571723224f2e442fef.jpg b/data/2025/2504_06xxx/2504.06156/images/ed9be295452bb2b609707999c0d7ce53274abf084feefa571723224f2e442fef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b53f6ec6b34ee22813bdfc0b77b7cff39534b88b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/ed9be295452bb2b609707999c0d7ce53274abf084feefa571723224f2e442fef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:059f67732fe8a3e21ec215b6b4cef279fce5f40bdb65898848c7968cfbdb37a1 +size 7022 diff --git a/data/2025/2504_06xxx/2504.06156/images/fa9a910f7b891b9bde75d314d6b7942f49817996387d1a2c2d598104e4f32095.jpg b/data/2025/2504_06xxx/2504.06156/images/fa9a910f7b891b9bde75d314d6b7942f49817996387d1a2c2d598104e4f32095.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5182aff9d0595e6d662a59254bc4a01c15a02be --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/fa9a910f7b891b9bde75d314d6b7942f49817996387d1a2c2d598104e4f32095.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d183b13bd4ce99952fb168afa479624b8e2ee5dfed90287c3a4464e456340b24 +size 3758 diff --git a/data/2025/2504_06xxx/2504.06156/images/feba9d300a376c584fc6ce727355ddce2b0fc4efeb5965c1fcb3bee7cf808e7a.jpg b/data/2025/2504_06xxx/2504.06156/images/feba9d300a376c584fc6ce727355ddce2b0fc4efeb5965c1fcb3bee7cf808e7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5878b6b7dfd26af0644dd57fc4f116dcbc042a34 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/feba9d300a376c584fc6ce727355ddce2b0fc4efeb5965c1fcb3bee7cf808e7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30b0554c78e8a5e1ad71d5e73c5c13847db44f1d0424189c7bee6ab876dbb43e +size 10510 diff --git a/data/2025/2504_06xxx/2504.06156/images/ff5d1182fc87c0d6043cdc51c2604c67d7dd26e1c42f06dddaec7cbdb5b6fff2.jpg b/data/2025/2504_06xxx/2504.06156/images/ff5d1182fc87c0d6043cdc51c2604c67d7dd26e1c42f06dddaec7cbdb5b6fff2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ce5dcf933381c5225509b70dcbb631c93e54d7e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/images/ff5d1182fc87c0d6043cdc51c2604c67d7dd26e1c42f06dddaec7cbdb5b6fff2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b21d9c9710cc2e2be734299eae211f1d99b8a939a3a03260bb6d87b3cd71df33 +size 6382 diff --git a/data/2025/2504_06xxx/2504.06156/layout.json b/data/2025/2504_06xxx/2504.06156/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b43de4e1f2349c4f1babf8ff64e91162dd7e22c6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06156/layout.json @@ -0,0 +1,8558 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 91, + 69, + 521, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 69, + 521, + 109 + ], + "spans": [ + { + "bbox": [ + 91, + 69, + 521, + 109 + ], + "type": "text", + "content": "ViTaMIn: Learning Contact-Rich Tasks Through Robot-Free Visuo-Tactile Manipulation Interface" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 125, + 494, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 125, + 494, + 138 + ], + "spans": [ + { + "bbox": [ + 111, + 125, + 494, + 138 + ], + "type": "text", + "content": "Fangchen Liu\\*,2, Chuanyu Li\\*,1, Yihua Qin\\*, Jing Xu\\*, Pieter Abbeel\\*, Rui Chen\\*,1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 175, + 139, + 436, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 139, + 436, + 152 + ], + "spans": [ + { + "bbox": [ + 175, + 139, + 436, + 152 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 175, + 139, + 436, + 152 + ], + "type": "text", + "content": "Tsinghua University, " + }, + { + "bbox": [ + 175, + 139, + 436, + 152 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 175, + 139, + 436, + 152 + ], + "type": "text", + "content": "University of California, Berkeley" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 219, + 153, + 389, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 153, + 389, + 165 + ], + "spans": [ + { + "bbox": [ + 219, + 153, + 389, + 165 + ], + "type": "text", + "content": "* Equal contribution, † Corresponding author" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 171, + 167, + 437, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 167, + 437, + 179 + ], + "spans": [ + { + "bbox": [ + 171, + 167, + 437, + 179 + ], + "type": "text", + "content": "https://chuanyune.github.io/ViTaMIN_page" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 59, + 224, + 134, + 298 + ], + "blocks": [ + { + "bbox": [ + 57, + 214, + 135, + 224 + ], + "lines": [ + { + "bbox": [ + 57, + 214, + 135, + 224 + ], + "spans": [ + { + "bbox": [ + 57, + 214, + 135, + 224 + ], + "type": "text", + "content": "Demonstrations" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 59, + 224, + 134, + 298 + ], + "lines": [ + { + "bbox": [ + 59, + 224, + 134, + 298 + ], + "spans": [ + { + "bbox": [ + 59, + 224, + 134, + 298 + ], + "type": "image", + "image_path": "2b3f5d9d3857ac4bc064a25dd8f846478edc8d3d4acf76854100e1224e09f2e4.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 145, + 224, + 209, + 297 + ], + "blocks": [ + { + "bbox": [ + 145, + 224, + 209, + 297 + ], + "lines": [ + { + "bbox": [ + 145, + 224, + 209, + 297 + ], + "spans": [ + { + "bbox": [ + 145, + 224, + 209, + 297 + ], + "type": "image", + "image_path": "c76f0cbfcc0157a2fd0ea97bdb5758c11c6fb5d419f1af561dacdd15498d9a0f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 217, + 224, + 290, + 298 + ], + "blocks": [ + { + "bbox": [ + 217, + 224, + 290, + 298 + ], + "lines": [ + { + "bbox": [ + 217, + 224, + 290, + 298 + ], + "spans": [ + { + "bbox": [ + 217, + 224, + 290, + 298 + ], + "type": "image", + "image_path": "a7c742b59dc541a88a3273a380335073e194e7444064c0f55bc20ace7ec82882.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 298, + 224, + 374, + 298 + ], + "blocks": [ + { + "bbox": [ + 293, + 213, + 377, + 223 + ], + "lines": [ + { + "bbox": [ + 293, + 213, + 377, + 223 + ], + "spans": [ + { + "bbox": [ + 293, + 213, + 377, + 223 + ], + "type": "text", + "content": "Real-World Tasks" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 298, + 224, + 374, + 298 + ], + "lines": [ + { + "bbox": [ + 298, + 224, + 374, + 298 + ], + "spans": [ + { + "bbox": [ + 298, + 224, + 374, + 298 + ], + "type": "image", + "image_path": "4d0acbed64c8854765c2e9c80aa8c7e2abb13d31d7b57777b70dad4d3e6a981a.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 381, + 224, + 463, + 298 + ], + "blocks": [ + { + "bbox": [ + 381, + 224, + 463, + 298 + ], + "lines": [ + { + "bbox": [ + 381, + 224, + 463, + 298 + ], + "spans": [ + { + "bbox": [ + 381, + 224, + 463, + 298 + ], + "type": "image", + "image_path": "c67f8d30bed7d5e78cd491b955ffbcc6ad2890cd3244a3c6ba5862292c5ec665.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 470, + 224, + 553, + 299 + ], + "blocks": [ + { + "bbox": [ + 470, + 224, + 553, + 299 + ], + "lines": [ + { + "bbox": [ + 470, + 224, + 553, + 299 + ], + "spans": [ + { + "bbox": [ + 470, + 224, + 553, + 299 + ], + "type": "image", + "image_path": "3c482c5b0658f8f048d2ebe3c95889ab774372793412480b02727eafd0de414c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 54, + 301, + 277, + 432 + ], + "blocks": [ + { + "bbox": [ + 54, + 301, + 277, + 432 + ], + "lines": [ + { + "bbox": [ + 54, + 301, + 277, + 432 + ], + "spans": [ + { + "bbox": [ + 54, + 301, + 277, + 432 + ], + "type": "image", + "image_path": "56817bcfa892e233432daadca6888f4cef9f11efeeb525983604173fded63e17.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 441, + 558, + 489 + ], + "lines": [ + { + "bbox": [ + 50, + 441, + 558, + 489 + ], + "spans": [ + { + "bbox": [ + 50, + 441, + 558, + 489 + ], + "type": "text", + "content": "Fig. 1: ViTaMIn overview. Our system comprises a portable data collection device that integrates visual and tactile sensing, a multimodal representation learning framework for fusing visual and tactile information, and demonstrations of various contact-rich manipulation tasks. This system facilitates efficient collection of manipulation data without requiring complex robot setups. (*Backgrounds in the images are blurred.)" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 287, + 304, + 383, + 430 + ], + "blocks": [ + { + "bbox": [ + 287, + 304, + 383, + 430 + ], + "lines": [ + { + "bbox": [ + 287, + 304, + 383, + 430 + ], + "spans": [ + { + "bbox": [ + 287, + 304, + 383, + 430 + ], + "type": "image", + "image_path": "d3b473412a83550b31125af282bc865a7484b0a11b3fe4b684aa09dfa0912134.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 384, + 304, + 468, + 430 + ], + "blocks": [ + { + "bbox": [ + 384, + 304, + 468, + 430 + ], + "lines": [ + { + "bbox": [ + 384, + 304, + 468, + 430 + ], + "spans": [ + { + "bbox": [ + 384, + 304, + 468, + 430 + ], + "type": "image", + "image_path": "bafd32d27c34e33981964ae485e4dfba8fcf84c249727c41a5a866d6121787e8.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 470, + 304, + 553, + 430 + ], + "blocks": [ + { + "bbox": [ + 470, + 304, + 553, + 430 + ], + "lines": [ + { + "bbox": [ + 470, + 304, + 553, + 430 + ], + "spans": [ + { + "bbox": [ + 470, + 304, + 553, + 430 + ], + "type": "image", + "image_path": "4bd4ba61306b1059468cbd4655f3e43c112065002e75360f7ec6cebdc5cb4ee4.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 50, + 499, + 299, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 499, + 299, + 679 + ], + "spans": [ + { + "bbox": [ + 50, + 499, + 299, + 679 + ], + "type": "text", + "content": "Abstract—Tactile information plays a crucial role for humans and robots to interact effectively with their environment, particularly for tasks requiring the understanding of contact properties. Solving such dexterous manipulation tasks often relies on imitation learning from demonstration datasets, which are typically collected via teleoperation systems and often demand substantial time and effort. To address these challenges, we present ViTaMIn, an embodiment-free manipulation interface that integrates visual and tactile sensing into a hand-held gripper, enabling multi-modality data collection without the need for teleoperation. Our design employs a compliant Fin Ray gripper with tactile sensing, allowing operators to perceive force feedback during manipulation for more intuitive operation. Additionally, we propose a multi-modal representation learning strategy to obtain pre-trained tactile representations, improving data efficiency and policy robustness. Experiments on 5 contact-rich manipulation tasks demonstrate that our system is more scalable, efficient, and effective than baseline methods." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 134, + 691, + 216, + 702 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 691, + 216, + 702 + ], + "spans": [ + { + "bbox": [ + 134, + 691, + 216, + 702 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 50, + 710, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 710, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 710, + 299, + 734 + ], + "type": "text", + "content": "Humans rely on both visual and tactile modalities to perform a diverse range of manipulation tasks in daily" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 309, + 498, + 559, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 498, + 559, + 582 + ], + "spans": [ + { + "bbox": [ + 309, + 498, + 559, + 582 + ], + "type": "text", + "content": "life. For instance, when inserting a plug into a socket or tightening a screw, vision helps with identifying and aligning components, while tactile signals enable precise force control during contact. This seamless integration of vision and touch enhances human dexterity, particularly in tasks that require contact-rich control, handling visual occlusions, or performing in-hand manipulations." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 309, + 590, + 559, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 590, + 559, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 590, + 559, + 734 + ], + "type": "text", + "content": "Recent progress in learning from demonstrations [1], [2], [3], [4] has shown significant potential for advancing general-purpose robots, enabling them to efficiently acquire complex skills from human demonstrations. Consequently, developing systems to collect high-quality demonstration data has been a recent key focus. Prior works have explored real-world data collection methods, including joint-mapped devices and exoskeletons [5], [6], [7], [8], and vision-based teleoperation frameworks [9], [10]. Nevertheless, these techniques require real-time teleoperation of a physical robot during data collection, which constrains efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14] present" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 224, + 37, + 563 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 224, + 37, + 563 + ], + "spans": [ + { + "bbox": [ + 14, + 224, + 37, + 563 + ], + "type": "text", + "content": "arXiv:2504.06156v2 [cs.RO] 1 Sep 2025" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 52, + 299, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 52, + 299, + 146 + ], + "spans": [ + { + "bbox": [ + 50, + 52, + 299, + 146 + ], + "type": "text", + "content": "a more scalable and cost-effective alternative to collect demonstration without teleoperation. Moreover, they can be seamlessly integrated into various embodiments, providing a more flexible data collection approach. However, these portable devices primarily focus on capturing vision-only demonstration data, limiting their usage for contact-rich and dexterous manipulation tasks where tactile feedback plays a crucial role." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 148, + 299, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 148, + 299, + 351 + ], + "spans": [ + { + "bbox": [ + 50, + 148, + 299, + 351 + ], + "type": "text", + "content": "In this work, we aim to address both the challenge of efficient data collection and the need for learning more dexterous tasks using visuo-tactile demonstrations. To this end, we introduce ViTaMIn, a novel and effective visuotactile manipulation interface designed to capture high-quality demonstrations with enhanced efficiency and flexibility. Unlike conventional approaches that rely on rigid tactile sensors, ViTaMIn leverages an omnidirectional compliant Fin Ray gripper with customized tactile sensing, which can detect contact from all directions as an expressive tactile signal for robot manipulation. We integrate the tactile-aware Fin Ray gripper [15] with UMI [14], enhancing the collected data with rich multimodal information and improving policy learning performance while maintaining the core advantages of portable devices. Additionally, our system enables operators to perceive force feedback during manipulation, facilitating more intuitive and seamless operation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 351, + 299, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 351, + 299, + 542 + ], + "spans": [ + { + "bbox": [ + 50, + 351, + 299, + 542 + ], + "type": "text", + "content": "Pre-trained visual representations have shown improved performance in robotic manipulation [16], [17], [18], [19], [20], benefiting from large-scale visual pre-training. To fully leverage the visuo-tactile datasets collected with ViTaMIn, we adopt a multimodal representation learning strategy to pre-train tactile representations, enhancing the robustness and generalizability of our sensor-based policies. Our pretraining objective integrates masked autoencoding [21] and contrastive learning for multimodal alignment [22], where future image observations are aligned with masked current images and tactile signals. Through extensive experiments on five challenging contact-rich manipulation tasks, our visuotactile policy, enhanced by multimodal pre-training, exhibits superior data and training efficiency while demonstrating strong generalization across diverse objects and environmental conditions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 544, + 212, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 544, + 212, + 554 + ], + "spans": [ + { + "bbox": [ + 61, + 544, + 212, + 554 + ], + "type": "text", + "content": "In conclusion, our contributions are:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 557, + 298, + 664 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 61, + 557, + 298, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 557, + 298, + 580 + ], + "spans": [ + { + "bbox": [ + 61, + 557, + 298, + 580 + ], + "type": "text", + "content": "- ViTaMIn provides a portable and scalable visuo-tactile data collection system." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 582, + 298, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 582, + 298, + 627 + ], + "spans": [ + { + "bbox": [ + 61, + 582, + 298, + 627 + ], + "type": "text", + "content": "- ViTaMIn proposes an effective multimodal representation learning strategy, which significantly improves the data efficiency, robustness and generalization capabilities." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 629, + 298, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 629, + 298, + 664 + ], + "spans": [ + { + "bbox": [ + 61, + 629, + 298, + 664 + ], + "type": "text", + "content": "- ViTaMIn achieves superior performance over vision-only baselines across five manipulation tasks by leveraging visuo-tactile demonstrations." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 130, + 667, + 219, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 667, + 219, + 677 + ], + "spans": [ + { + "bbox": [ + 130, + 667, + 219, + 677 + ], + "type": "text", + "content": "II. RELATED WORK" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 683, + 178, + 694 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 683, + 178, + 694 + ], + "spans": [ + { + "bbox": [ + 50, + 683, + 178, + 694 + ], + "type": "text", + "content": "A. Visuo-Tactile Manipulation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 698, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 698, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 698, + 299, + 734 + ], + "type": "text", + "content": "Tactile sensing is essential for robotic manipulation as it provides signals about physical contact in addition to visual observation. Early works [23], [24], [25] use RGB cameras" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 52, + 558, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 52, + 558, + 99 + ], + "spans": [ + { + "bbox": [ + 309, + 52, + 558, + 99 + ], + "type": "text", + "content": "and force/torque sensors to infer contact status for making decisions. However, the information from force/torque sensors is low-dimensional and insufficient for more dexterous manipulation tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 101, + 558, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 101, + 558, + 221 + ], + "spans": [ + { + "bbox": [ + 309, + 101, + 558, + 221 + ], + "type": "text", + "content": "More recently, vision-based tactile sensors have gained attention for their ability to capture high-resolution contact information [26], [27], [28]. Despite these advances, the rigid design of these sensors restricts the compliance of the end effector, where alternative approaches like uncalibrated tactile skins [29] and plug-and-play sensing systems [30] have improved adaptability and flexibility. In our work, we use a Fin-Ray-shaped compliant and all-directional tactile sensor, which can detect contacts from all directions and also support safe and robust contact-rich manipulation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 236, + 523, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 236, + 523, + 248 + ], + "spans": [ + { + "bbox": [ + 310, + 236, + 523, + 248 + ], + "type": "text", + "content": "B. Data Collection System for Robot Manipulation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 255, + 558, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 255, + 558, + 313 + ], + "spans": [ + { + "bbox": [ + 309, + 255, + 558, + 313 + ], + "type": "text", + "content": "Recent advancements in learning from demonstrations [1], [2], [3], [4] have shown promising results in developing general-purpose robots. Therefore, efficiently collecting high-quality demonstrations has become a key research focus." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 316, + 559, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 316, + 559, + 495 + ], + "spans": [ + { + "bbox": [ + 309, + 316, + 559, + 495 + ], + "type": "text", + "content": "Recently works have focused on efficient real-world data collection systems, such as devices or exoskeletons with joint-mapping [5], [6], [7], exoskeletons [8], or vision-based systems [9], [10]. However, these approaches require a physical robot during data collection, which limits efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14], [31], [32] offer several advantages: they are low-cost, flexible, and do not depend on a specific physical robot. Additionally, they can be seamlessly integrated into various embodiments and provide a more user-friendly experience for data collection. We extend the UMI data collection system [14] by integrating tactile sensing, which enriches the demonstrations with multimodal information, improving policy learning performance while preserving the key benefits of portable devices." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 310, + 510, + 480, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 510, + 480, + 521 + ], + "spans": [ + { + "bbox": [ + 310, + 510, + 480, + 521 + ], + "type": "text", + "content": "C. Multimodal Pre-training for Robotics" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 309, + 529, + 558, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 529, + 558, + 613 + ], + "spans": [ + { + "bbox": [ + 309, + 529, + 558, + 613 + ], + "type": "text", + "content": "Pre-trained visual representations have shown improved performance and generalization in robotic manipulation [16], [17], [18], [19], [20] with self-supervised learning techniques [21], [22]. This can be extended to multimodal representation learning [33], [34], [35] by integrating visual, tactile, and proprioceptive modalities, allowing robots to perceive object properties beyond visual appearance." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 309, + 613, + 558, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 613, + 558, + 685 + ], + "spans": [ + { + "bbox": [ + 309, + 613, + 558, + 685 + ], + "type": "text", + "content": "Aligning heterogeneous sensory modalities is a key challenge in multimodal learning, as different sensors have varying data structures, sampling rates, and noise characteristics [36]. Inspired by CLIP [22], researchers have developed contrastive learning techniques to align tactile and visual representations for manipulation tasks [37], [38]." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 686, + 558, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 686, + 558, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 686, + 558, + 734 + ], + "type": "text", + "content": "Our work extends these efforts by introducing masked contrastive pre-training, where the tactile encoder learns to reconstruct future occluded visual information, further enhancing multimodal understanding." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 49, + 318, + 289 + ], + "blocks": [ + { + "bbox": [ + 58, + 49, + 318, + 289 + ], + "lines": [ + { + "bbox": [ + 58, + 49, + 318, + 289 + ], + "spans": [ + { + "bbox": [ + 58, + 49, + 318, + 289 + ], + "type": "image", + "image_path": "eaeb262e8e8de2cc9c2c5f6bd946acaa4ad560a3e6122d16fbd8e4f0a08cfc1a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 295, + 560, + 346 + ], + "lines": [ + { + "bbox": [ + 50, + 295, + 560, + 346 + ], + "spans": [ + { + "bbox": [ + 50, + 295, + 560, + 346 + ], + "type": "text", + "content": "Fig. 2: ViTaMIn's hardware system overview. The handheld device integrates a GoPro camera, two tactile sensors and a synchronization camera to align visual and tactile information. During data collection, the two tactile sensors and the synchronization camera are connected to the Raspberry Pi in the backbox. The total weight of the gripper is approximately " + }, + { + "bbox": [ + 50, + 295, + 560, + 346 + ], + "type": "inline_equation", + "content": "1960\\mathrm{g}" + }, + { + "bbox": [ + 50, + 295, + 560, + 346 + ], + "type": "text", + "content": ". Left: Side view of the ViTaMIn system. Right: Top view of the ViTaMIn system with the backbox cover removed." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 324, + 51, + 552, + 288 + ], + "blocks": [ + { + "bbox": [ + 324, + 51, + 552, + 288 + ], + "lines": [ + { + "bbox": [ + 324, + 51, + 552, + 288 + ], + "spans": [ + { + "bbox": [ + 324, + 51, + 552, + 288 + ], + "type": "image", + "image_path": "4dc08d4271758a4aabed3f5e31b55c1d21ebc7da0ae86c7b523043f7db6cbe93.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 353, + 282, + 365 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 353, + 282, + 365 + ], + "spans": [ + { + "bbox": [ + 69, + 353, + 282, + 365 + ], + "type": "text", + "content": "III. VISUO-TACTILE MANIPULATION INTERFACE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 370, + 138, + 382 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 370, + 138, + 382 + ], + "spans": [ + { + "bbox": [ + 50, + 370, + 138, + 382 + ], + "type": "text", + "content": "A. System Overview" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 386, + 300, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 386, + 300, + 470 + ], + "spans": [ + { + "bbox": [ + 50, + 386, + 300, + 470 + ], + "type": "text", + "content": "We design a handheld gripper to collect visuo-tactile demonstrations without requiring teleoperation on physical robots. Our gripper design is illustrated in Figure 2. The gripper consists of an RGB fisheye wrist camera (GoPro 10) for image observation, two AllTact finger [15], a synchronization camera for observation temporal alignment, and a Raspberry Pi 5 with a battery for data recording." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 471, + 299, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 471, + 299, + 553 + ], + "spans": [ + { + "bbox": [ + 50, + 471, + 299, + 553 + ], + "type": "text", + "content": "Image Observation To capture comprehensive visual information, we employ a GoPro 10 camera with a " + }, + { + "bbox": [ + 50, + 471, + 299, + 553 + ], + "type": "inline_equation", + "content": "155^{\\circ}" + }, + { + "bbox": [ + 50, + 471, + 299, + 553 + ], + "type": "text", + "content": " field-of-view (FoV) fisheye lens. The camera operates at 60 FPS with a resolution of " + }, + { + "bbox": [ + 50, + 471, + 299, + 553 + ], + "type": "inline_equation", + "content": "2704 \\times 2028" + }, + { + "bbox": [ + 50, + 471, + 299, + 553 + ], + "type": "text", + "content": " pixels and is mounted at the end-effector of our ViTaMIn to ensure consistent visual coverage of the manipulation workspace during demonstration collection and policy deployment." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 554, + 299, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 554, + 299, + 673 + ], + "spans": [ + { + "bbox": [ + 50, + 554, + 299, + 673 + ], + "type": "text", + "content": "Tactile Observation In UMI [14], two TPU-printed Fin Ray grippers are used to provide compliance and enhance grasping stability. However, these grippers lack tactile sensing capabilities. In our ViTaMIn, we employ AllTact [15], a compliant Fin Ray gripper with omnidirectional tactile sensing ability. During manipulation, the embedded camera in AllTact captures both the global deformation of the entire finger and the local deformation of the contact surface as a single image. The tactile sensor operates at 30 FPS with a resolution of " + }, + { + "bbox": [ + 50, + 554, + 299, + 673 + ], + "type": "inline_equation", + "content": "640 \\times 480" + }, + { + "bbox": [ + 50, + 554, + 299, + 673 + ], + "type": "text", + "content": " pixels." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 674, + 300, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 674, + 300, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 674, + 300, + 734 + ], + "type": "text", + "content": "Other Observations To enhance the robustness and accuracy of SLAM, we utilize the IMU data provided by the GoPro, which is synchronized with the visual observations. Gripper width is also critical for precise manipulation. Following UMI [14], we attach two ArUco markers to the" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 353, + 558, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 353, + 558, + 376 + ], + "spans": [ + { + "bbox": [ + 309, + 353, + 558, + 376 + ], + "type": "text", + "content": "gripper's fingers and compute the gripper width from the visual observations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 310, + 384, + 394, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 384, + 394, + 395 + ], + "spans": [ + { + "bbox": [ + 310, + 384, + 394, + 395 + ], + "type": "text", + "content": "B. Data Processing" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 399, + 559, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 399, + 559, + 577 + ], + "spans": [ + { + "bbox": [ + 309, + 399, + 559, + 577 + ], + "type": "text", + "content": "Sensor Synchronization To synchronize the tactile sensors and GoPro camera, we use an additional low-cost camera which is connected to the Raspberry Pi and is naturally synchronized with the tactile sensors. Before data collection, both the GoPro and the synchronization camera simultaneously capture a sequence of ArUco markers displayed on a computer screen. The ArUco IDs are detected in both video streams, and when an identical ID appears in both, the corresponding timestamps are used for synchronization. Since the framereates of the GoPro and the synchronization camera are " + }, + { + "bbox": [ + 309, + 399, + 559, + 577 + ], + "type": "inline_equation", + "content": "60\\mathrm{Hz}" + }, + { + "bbox": [ + 309, + 399, + 559, + 577 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 309, + 399, + 559, + 577 + ], + "type": "inline_equation", + "content": "30\\mathrm{Hz}" + }, + { + "bbox": [ + 309, + 399, + 559, + 577 + ], + "type": "text", + "content": " respectively, the temporal alignment error is below " + }, + { + "bbox": [ + 309, + 399, + 559, + 577 + ], + "type": "inline_equation", + "content": "1/60 + 1/30 = 0.05" + }, + { + "bbox": [ + 309, + 399, + 559, + 577 + ], + "type": "text", + "content": " seconds, which is sufficient for our tasks. Once the two videos are synchronized, they are cropped by the starting and ending signals triggered by the control button." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 578, + 559, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 578, + 559, + 662 + ], + "spans": [ + { + "bbox": [ + 309, + 578, + 559, + 662 + ], + "type": "text", + "content": "Data Collection and Filtering We adopt a similar data collection pipeline to UMI [14]. We also utilize Simultaneous Localization and Mapping (SLAM) to capture the end-effector trajectories. While SLAM may fail in low-texture environments, it achieves a success rate of approximately " + }, + { + "bbox": [ + 309, + 578, + 559, + 662 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 309, + 578, + 559, + 662 + ], + "type": "text", + "content": " in our tasks, allowing the majority of collected data to be used for imitation learning." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 347, + 668, + 522, + 679 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 668, + 522, + 679 + ], + "spans": [ + { + "bbox": [ + 347, + 668, + 522, + 679 + ], + "type": "text", + "content": "IV. VISUO-TACTILE POLICY LEARNING" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 683, + 484, + 695 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 683, + 484, + 695 + ], + "spans": [ + { + "bbox": [ + 309, + 683, + 484, + 695 + ], + "type": "text", + "content": "A. Visuo-Tactile Representation Learning" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 698, + 559, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 698, + 559, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 698, + 559, + 734 + ], + "type": "text", + "content": "UMI uses a pre-trained CLIP [22] encoder to extract visual representations. However, the tactile images in ViTaMIn are very different from the CLIP's training distribution, which" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 52, + 299, + 88 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 52, + 299, + 88 + ], + "spans": [ + { + "bbox": [ + 50, + 52, + 299, + 88 + ], + "type": "text", + "content": "can lead to suboptimal representation. To tackle this, we pretrain an effective tactile encoder using the collected action-free datasets, which doesn't rely on the SLAM success." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 89, + 299, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 89, + 299, + 148 + ], + "spans": [ + { + "bbox": [ + 50, + 89, + 299, + 148 + ], + "type": "text", + "content": "Taking the tactile image in Figure 3 as an example, we want the encoder to capture the essential contact properties, such as the object's in-hand pose and gripper's deformation. These signals are complementary information from pixel observations, and are crucial for making future decisions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "spans": [ + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "text", + "content": "To achieve this, we employ a multimodal contrastive learning approach as illustrated in Figure 3. Given the current masked image " + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "inline_equation", + "content": "\\tilde{I}_V^k" + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "text", + "content": " and current full tactile observation " + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "inline_equation", + "content": "I_T^k" + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "text", + "content": " of step " + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "text", + "content": ", we want the combination of " + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "inline_equation", + "content": "\\tilde{I}_V^k" + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "inline_equation", + "content": "I_T^k" + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "text", + "content": " align with the future full image observation " + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "inline_equation", + "content": "I_V^{k + 1}" + }, + { + "bbox": [ + 50, + 149, + 300, + 245 + ], + "type": "text", + "content": " in the CLIP embedding space. The intuition behind this is to make the tactile encoder focus on the contact information to predict future images based on the current corrupted image." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 62, + 264, + 285, + 459 + ], + "blocks": [ + { + "bbox": [ + 62, + 264, + 285, + 459 + ], + "lines": [ + { + "bbox": [ + 62, + 264, + 285, + 459 + ], + "spans": [ + { + "bbox": [ + 62, + 264, + 285, + 459 + ], + "type": "image", + "image_path": "a9d59bd91c7d90305bf0363e93a7675127b8a317c02f04e3257aa1fddbcebcc7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 91, + 462, + 255, + 622 + ], + "blocks": [ + { + "bbox": [ + 91, + 462, + 255, + 622 + ], + "lines": [ + { + "bbox": [ + 91, + 462, + 255, + 622 + ], + "spans": [ + { + "bbox": [ + 91, + 462, + 255, + 622 + ], + "type": "image", + "image_path": "dfac30919f736ceede8d6fefd2d847d22d809cb8f4e923239b4620efb4776ebf.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 631, + 299, + 679 + ], + "lines": [ + { + "bbox": [ + 50, + 631, + 299, + 679 + ], + "spans": [ + { + "bbox": [ + 50, + 631, + 299, + 679 + ], + "type": "text", + "content": "Fig. 3: The illustration of the multimodal contrastive representation pre-training phase. The tactile encoder is trained to capture complementary information to predict the missing content for the future image." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "text", + "content": "To ensure stable training, we freeze the image CLIP encoder " + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "inline_equation", + "content": "\\phi_V(\\cdot)" + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "text", + "content": " but only fine-tune the tactile encoder " + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "inline_equation", + "content": "\\phi_T(\\cdot)" + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "text", + "content": ". We first obtain the tactile embedding " + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "inline_equation", + "content": "T_{k}" + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "inline_equation", + "content": "\\phi_T(I_T^k)" + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "inline_equation", + "content": "V_{k}" + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "inline_equation", + "content": "\\phi_V(\\tilde{I}_V^k)" + }, + { + "bbox": [ + 50, + 685, + 300, + 734 + ], + "type": "text", + "content": ". These embeddings are concatenated and" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 309, + 52, + 558, + 101 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 52, + 558, + 101 + ], + "spans": [ + { + "bbox": [ + 309, + 52, + 558, + 101 + ], + "type": "text", + "content": "passed through a fully connected projection layer, mapping them back to the original 512-dimensional CLIP embedding space as a fused feature " + }, + { + "bbox": [ + 309, + 52, + 558, + 101 + ], + "type": "inline_equation", + "content": "F_{k}" + }, + { + "bbox": [ + 309, + 52, + 558, + 101 + ], + "type": "text", + "content": ". Finally, we train the tactile encoder using the standard CLIP loss on " + }, + { + "bbox": [ + 309, + 52, + 558, + 101 + ], + "type": "inline_equation", + "content": "F_{k}" + }, + { + "bbox": [ + 309, + 52, + 558, + 101 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 309, + 52, + 558, + 101 + ], + "type": "inline_equation", + "content": "V_{k + 1}" + }, + { + "bbox": [ + 309, + 52, + 558, + 101 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 384, + 104, + 558, + 127 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 104, + 558, + 127 + ], + "spans": [ + { + "bbox": [ + 384, + 104, + 558, + 127 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {C L I P}} = \\frac {1}{2} \\left(\\mathcal {L} _ {\\mathrm {f - v}} + \\mathcal {L} _ {\\mathrm {v - f}}\\right) \\tag {1}", + "image_path": "fa9a910f7b891b9bde75d314d6b7942f49817996387d1a2c2d598104e4f32095.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 310, + 130, + 338, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 130, + 338, + 140 + ], + "spans": [ + { + "bbox": [ + 310, + 130, + 338, + 140 + ], + "type": "text", + "content": "where" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 333, + 144, + 558, + 178 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 144, + 558, + 178 + ], + "spans": [ + { + "bbox": [ + 333, + 144, + 558, + 178 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {v - f}} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp \\left(\\cos \\left(V _ {i + 1} , F _ {i}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(V _ {i + 1} , F _ {j}\\right) / \\tau\\right)} \\tag {2}", + "image_path": "2ec98aac269313a4a3cc98c76d6cba7f37ecc7b2a02ed422fa6eb8b07c3cd183.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 333, + 188, + 558, + 221 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 188, + 558, + 221 + ], + "spans": [ + { + "bbox": [ + 333, + 188, + 558, + 221 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {f - v}} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp \\left(\\cos \\left(F _ {i} , V _ {i + 1}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(F _ {i} , V _ {j + 1}\\right) / \\tau\\right)} \\tag {3}", + "image_path": "feba9d300a376c584fc6ce727355ddce2b0fc4efeb5965c1fcb3bee7cf808e7a.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 224, + 494, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 224, + 494, + 236 + ], + "spans": [ + { + "bbox": [ + 309, + 224, + 494, + 236 + ], + "type": "text", + "content": "here " + }, + { + "bbox": [ + 309, + 224, + 494, + 236 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 309, + 224, + 494, + 236 + ], + "type": "text", + "content": " is a learnable temperature parameter." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 236, + 559, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 236, + 559, + 368 + ], + "spans": [ + { + "bbox": [ + 309, + 236, + 559, + 368 + ], + "type": "text", + "content": "Different from [39], where they directly apply the CLIP loss on the time-aligned visuo-tactile images, we instead fuse the tactile observation with a masked current image to predict the future image. We make this choice for two main reasons. First, in [39], the tactile representation is conditioned on proprioceptive states, which are unavailable in our dataset before the success of SLAM. Second, since different tasks may have varying images but similar tactile observations, fusing a masked current image helps the network learn a more expressive tactile representation. Without sufficient masking, the alignment becomes trivial." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 368, + 559, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 368, + 559, + 416 + ], + "spans": [ + { + "bbox": [ + 309, + 368, + 559, + 416 + ], + "type": "text", + "content": "After pre-training, we train a Diffusion Policy [4] on the SLAM-filtered data. Following [4], we use a U-Net [40] as the noise prediction network and apply DDIM [41] to accelerate the inference for action prediction." + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 314, + 445, + 556, + 533 + ], + "blocks": [ + { + "bbox": [ + 395, + 422, + 473, + 433 + ], + "lines": [ + { + "bbox": [ + 395, + 422, + 473, + 433 + ], + "spans": [ + { + "bbox": [ + 395, + 422, + 473, + 433 + ], + "type": "text", + "content": "V. EXPERIMENTS" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 314, + 445, + 556, + 533 + ], + "lines": [ + { + "bbox": [ + 314, + 445, + 556, + 533 + ], + "spans": [ + { + "bbox": [ + 314, + 445, + 556, + 533 + ], + "type": "image", + "image_path": "66ceb8edcaeb309670260252767cb93432455bba815e9dc4a4ca645ef94a855b.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 336, + 539, + 531, + 552 + ], + "lines": [ + { + "bbox": [ + 336, + 539, + 531, + 552 + ], + "spans": [ + { + "bbox": [ + 336, + 539, + 531, + 552 + ], + "type": "text", + "content": "Fig. 4: Hardware setup for policy deployment." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 309, + 563, + 407, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 563, + 407, + 575 + ], + "spans": [ + { + "bbox": [ + 309, + 563, + 407, + 575 + ], + "type": "text", + "content": "A. Experimental Setup" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 578, + 559, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 578, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 309, + 578, + 559, + 709 + ], + "type": "text", + "content": "Hardware Figure 4 shows the policy deployment setup. Our system consists of a Rokae xMate ER3PRO robotic arm equipped with a PGI-140-80-W-S parallel gripper. The 7-DOF robotic arm provides flexible manipulation capabilities, while the gripper features an 8cm stroke range from fully open to closed position. The system is implemented using ROS Noetic on Ubuntu 20.04. The control loop operates at " + }, + { + "bbox": [ + 309, + 578, + 559, + 709 + ], + "type": "inline_equation", + "content": "10\\mathrm{Hz}" + }, + { + "bbox": [ + 309, + 578, + 559, + 709 + ], + "type": "text", + "content": ", with separate threads handling robot control, visual and tactile sensing. The system architecture is designed to minimize latency while maintaining reliable real-time performance." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 710, + 559, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 710, + 559, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 710, + 559, + 734 + ], + "type": "text", + "content": "Similar to UMI [14], our system compensates for various sources of latency in the perception-action loop through" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 52, + 298, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 52, + 298, + 112 + ], + "spans": [ + { + "bbox": [ + 50, + 52, + 298, + 112 + ], + "type": "text", + "content": "predictive buffering and timestamp-based synchronization between visual and tactile feedback streams. The policy generates 16 consecutive trajectories at each inference step, with 10 trajectories being executed based on our temporal compensation strategy." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 112, + 298, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 112, + 298, + 220 + ], + "spans": [ + { + "bbox": [ + 50, + 112, + 298, + 220 + ], + "type": "text", + "content": "Manipulation Tasks As shown in Figure 5, we propose diverse contact-rich manipulation tasks to evaluate the effectiveness of ViTaMIn. These tasks are specifically crafted to demonstrate the following key capabilities: (1) Robust pick-and-place of diverse objects, including fragile and small objects; (2) Dexterous manipulation, such as in-hand reorientation; (3) Task success determination, allowing the robot to repeat attempts until success; (4) Dynamic and precise manipulation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 220, + 255, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 220, + 255, + 232 + ], + "spans": [ + { + "bbox": [ + 61, + 220, + 255, + 232 + ], + "type": "text", + "content": "We design the following 5 manipulation tasks:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 231, + 298, + 420 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 61, + 231, + 298, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 231, + 298, + 252 + ], + "spans": [ + { + "bbox": [ + 61, + 231, + 298, + 252 + ], + "type": "text", + "content": "- Orange Placement: Put a fragile orange from a randomized position to a randomized plate." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 254, + 298, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 254, + 298, + 289 + ], + "spans": [ + { + "bbox": [ + 61, + 254, + 298, + 289 + ], + "type": "text", + "content": "- Dynamic Peg Insertion: Grasp a peg and approach a hole, which is moving at a constant speed of " + }, + { + "bbox": [ + 61, + 254, + 298, + 289 + ], + "type": "inline_equation", + "content": "10\\mathrm{mm / s}" + }, + { + "bbox": [ + 61, + 254, + 298, + 289 + ], + "type": "text", + "content": ". And precisely insert the peg to the hole." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 289, + 298, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 289, + 298, + 324 + ], + "spans": [ + { + "bbox": [ + 61, + 289, + 298, + 324 + ], + "type": "text", + "content": "- Test Tube Reorientation: Grasp a transparent test tube from a shelf and adjust its pose through extrinsic dexterity based on tactile feedback." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 325, + 298, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 325, + 298, + 358 + ], + "spans": [ + { + "bbox": [ + 61, + 325, + 298, + 358 + ], + "type": "text", + "content": "- Scissor Hanging: Grasp a pair of scissors and hang them on a hook. Adjust the pose and keep attempting until it succeeds." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 61, + 361, + 298, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 361, + 298, + 420 + ], + "spans": [ + { + "bbox": [ + 61, + 361, + 298, + 420 + ], + "type": "text", + "content": "- Dual-Arm Knife Pulling: The left arm first grasps a knife from a cup, orients it horizontally. The right arm grasps and pulls it out with a constrained prismatic motion. This task requires tactile feedback to grasp the thin object and perform the correct pulling motion." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "type": "table", + "bbox": [ + 55, + 451, + 296, + 532 + ], + "blocks": [ + { + "bbox": [ + 59, + 434, + 291, + 445 + ], + "lines": [ + { + "bbox": [ + 59, + 434, + 291, + 445 + ], + "spans": [ + { + "bbox": [ + 59, + 434, + 291, + 445 + ], + "type": "text", + "content": "TABLE I: Data Collection Statistics for Different Tasks" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 55, + 451, + 296, + 532 + ], + "lines": [ + { + "bbox": [ + 55, + 451, + 296, + 532 + ], + "spans": [ + { + "bbox": [ + 55, + 451, + 296, + 532 + ], + "type": "table", + "html": "
TaskRaw DataValid Data*Avg. Length
Orange Placement8773435
Dynamic Peg Insertion201141321
Test Tube Reorientation150125619
Scissor Hanging172137642
Knife Pulling (Left)188131403
Knife Pulling (Right)180134254
", + "image_path": "eae2bd4dd37d3b7a1249a136a2e4e36453a4e27aed461598963d0c97dc63fade.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 60, + 532, + 288, + 540 + ], + "lines": [ + { + "bbox": [ + 60, + 532, + 288, + 540 + ], + "spans": [ + { + "bbox": [ + 60, + 532, + 288, + 540 + ], + "type": "text", + "content": "*Valid data refers to demonstrations with successful SLAM tracking" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 554, + 298, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 554, + 298, + 672 + ], + "spans": [ + { + "bbox": [ + 50, + 554, + 298, + 672 + ], + "type": "text", + "content": "Table I shows the statistics of the demonstration data. We collect demonstrations for both single-arm and dual-arm manipulation tasks. For single-arm tasks, we gather between 87 and 172 raw demonstrations per task according to the task difficulty, with successful SLAM tracking achieved in approximately " + }, + { + "bbox": [ + 50, + 554, + 298, + 672 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 50, + 554, + 298, + 672 + ], + "type": "text", + "content": " of the trajectories. The dual-arm knife pulling task requires coordinated motion between both arms, with similar data collection volumes but slightly different average demonstration lengths for left and right arm movements." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 673, + 298, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 673, + 298, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 673, + 298, + 734 + ], + "type": "text", + "content": "We compare our approach against the following methods: (1) Vision: the policy only takes visual observation from the GoPro camera, which is encoded by the pre-trained CLIP model (identical to the original UMI [14] paper); (2) Ours w/o Pre-training: This baseline simply concatenate visual and" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 52, + 558, + 76 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 52, + 558, + 76 + ], + "spans": [ + { + "bbox": [ + 309, + 52, + 558, + 76 + ], + "type": "text", + "content": "tactile observations after separate CLIP ViT-B/16 encoders, and fine-tuned with behavior cloning." + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 325, + 88, + 544, + 190 + ], + "blocks": [ + { + "bbox": [ + 325, + 88, + 544, + 190 + ], + "lines": [ + { + "bbox": [ + 325, + 88, + 544, + 190 + ], + "spans": [ + { + "bbox": [ + 325, + 88, + 544, + 190 + ], + "type": "table", + "html": "
TaskVisionw/o Pre-trainingOurs
Single-Arm Tasks
Orange placement0.850.91
Test Tube Reorientation0.40.70.9
Scissor Hanging0.10.450.7
Dynamic Peg Insertion0.450.80.9
Dual-Arm Task
Knife Pulling0.60.80.9
", + "image_path": "2a3bf65b454c923e9b5bb949a3116a5dfef20d4712dfc9369a08f9361b171127.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 309, + 194, + 558, + 230 + ], + "lines": [ + { + "bbox": [ + 309, + 194, + 558, + 230 + ], + "spans": [ + { + "bbox": [ + 309, + 194, + 558, + 230 + ], + "type": "text", + "content": "TABLE II: Comparisons on 5 tasks with baselines. Our approach improves the performance on 5 tasks through multimodal sensing and pre-training." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 237, + 558, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 237, + 558, + 332 + ], + "spans": [ + { + "bbox": [ + 309, + 237, + 558, + 332 + ], + "type": "text", + "content": "The results are presented in Table II. For each task, we conduct 20 trials with randomized initial conditions and report the average performance. The vision-only policy performs the worst across all five tasks, particularly in contact-rich tasks like test tube reorientation and scissor hanging, where tactile feedback is crucial for success. Across all tasks, pre-training enhances the performance, highlighting the importance of learning effective tactile representations." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 347, + 392, + 359 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 347, + 392, + 359 + ], + "spans": [ + { + "bbox": [ + 310, + 347, + 392, + 359 + ], + "type": "text", + "content": "B. Failure Analysis" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 365, + 558, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 365, + 558, + 449 + ], + "spans": [ + { + "bbox": [ + 309, + 365, + 558, + 449 + ], + "type": "text", + "content": "In the Orange placement task, the robot picks up an orange from a random position within a " + }, + { + "bbox": [ + 309, + 365, + 558, + 449 + ], + "type": "inline_equation", + "content": "50\\mathrm{cm} \\times 50\\mathrm{cm}" + }, + { + "bbox": [ + 309, + 365, + 558, + 449 + ], + "type": "text", + "content": " workspace and places it on a plate. Failures stem from table collisions, unstable placement, or motion planning errors despite correct object detection. In Dynamic peg insertion, the robot inserts a grasped peg into a moving hole. Vision-only methods often fail due to imprecise localization and alignment." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 450, + 558, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 450, + 558, + 594 + ], + "spans": [ + { + "bbox": [ + 309, + 450, + 558, + 594 + ], + "type": "text", + "content": "In Test tube reorientation, the robot must pick up a tube from a random rack location and reorient it vertically, with success defined by less than " + }, + { + "bbox": [ + 309, + 450, + 558, + 594 + ], + "type": "inline_equation", + "content": "10^{\\circ}" + }, + { + "bbox": [ + 309, + 450, + 558, + 594 + ], + "type": "text", + "content": " orientation error. Failures include rack collisions, over-lifting, and incorrect final orientation. Scissor hanging requires picking up scissors and hanging them on a narrow hook, where common issues include misdetection, misalignment, and failure to release. In Knife pulling, a dual-arm policy reorients the knife with one arm while the other pulls it out of a holder. Failures often result from poor coordination, weak grasps, or incomplete pulling. Overall, vision-only policies struggle with contact-rich tasks, highlighting the limitations of unimodal sensing." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 608, + 505, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 608, + 505, + 620 + ], + "spans": [ + { + "bbox": [ + 310, + 608, + 505, + 620 + ], + "type": "text", + "content": "C. Compliant Articulated Object Manipulation" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 309, + 625, + 558, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 625, + 558, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 625, + 558, + 734 + ], + "type": "text", + "content": "To demonstrate the compliance capabilities of ViTaMIn, we designed a compliant-controlled articulated object manipulation task. The robotic arm needs to grasp a handle (connected to a force gauge) and rotate it 90 degrees to open a switch. During the rotation process, the arm must minimize axial forces to ensure smooth operation. We conduct 10 experiments for each condition and calculate the average forces. The results show that ViTaMIn achieves significantly lower average forces compared to using pure vision as input." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 68, + 124, + 145 + ], + "blocks": [ + { + "bbox": [ + 62, + 55, + 188, + 68 + ], + "lines": [ + { + "bbox": [ + 62, + 55, + 188, + 68 + ], + "spans": [ + { + "bbox": [ + 62, + 55, + 188, + 68 + ], + "type": "text", + "content": "Task 1. Orange Placement" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 68, + 124, + 145 + ], + "lines": [ + { + "bbox": [ + 61, + 68, + 124, + 145 + ], + "spans": [ + { + "bbox": [ + 61, + 68, + 124, + 145 + ], + "type": "image", + "image_path": "ff5d1182fc87c0d6043cdc51c2604c67d7dd26e1c42f06dddaec7cbdb5b6fff2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 127, + 70, + 189, + 145 + ], + "blocks": [ + { + "bbox": [ + 127, + 70, + 189, + 145 + ], + "lines": [ + { + "bbox": [ + 127, + 70, + 189, + 145 + ], + "spans": [ + { + "bbox": [ + 127, + 70, + 189, + 145 + ], + "type": "image", + "image_path": "512cac75dfa1a461ecf945a565d55ee173bc79056728365d8bbabcdff20497f1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 192, + 70, + 254, + 145 + ], + "blocks": [ + { + "bbox": [ + 192, + 70, + 254, + 145 + ], + "lines": [ + { + "bbox": [ + 192, + 70, + 254, + 145 + ], + "spans": [ + { + "bbox": [ + 192, + 70, + 254, + 145 + ], + "type": "image", + "image_path": "8ffaab5c8e792fcf6faad355dd789f84bac2ae3b63606a4eac5401f023e77b6c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 257, + 69, + 332, + 145 + ], + "blocks": [ + { + "bbox": [ + 256, + 55, + 402, + 68 + ], + "lines": [ + { + "bbox": [ + 256, + 55, + 402, + 68 + ], + "spans": [ + { + "bbox": [ + 256, + 55, + 402, + 68 + ], + "type": "text", + "content": "Task 2. Dynamic Peg Insertion" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 257, + 69, + 332, + 145 + ], + "lines": [ + { + "bbox": [ + 257, + 69, + 332, + 145 + ], + "spans": [ + { + "bbox": [ + 257, + 69, + 332, + 145 + ], + "type": "image", + "image_path": "145b0ff91ccc913134917d84f27ec9288b5cc0e7a4e6ccca1027d09a1eb5522f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 332, + 69, + 400, + 145 + ], + "blocks": [ + { + "bbox": [ + 332, + 69, + 400, + 145 + ], + "lines": [ + { + "bbox": [ + 332, + 69, + 400, + 145 + ], + "spans": [ + { + "bbox": [ + 332, + 69, + 400, + 145 + ], + "type": "image", + "image_path": "c1639d459b6280e0d616c0b61ca5027d7312dc27193311d49fc82c533e5e3614.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 400, + 69, + 475, + 145 + ], + "blocks": [ + { + "bbox": [ + 400, + 69, + 475, + 145 + ], + "lines": [ + { + "bbox": [ + 400, + 69, + 475, + 145 + ], + "spans": [ + { + "bbox": [ + 400, + 69, + 475, + 145 + ], + "type": "image", + "image_path": "ed9be295452bb2b609707999c0d7ce53274abf084feefa571723224f2e442fef.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 476, + 69, + 550, + 145 + ], + "blocks": [ + { + "bbox": [ + 476, + 69, + 550, + 145 + ], + "lines": [ + { + "bbox": [ + 476, + 69, + 550, + 145 + ], + "spans": [ + { + "bbox": [ + 476, + 69, + 550, + 145 + ], + "type": "image", + "image_path": "5a72a662adc1c1ba0bfd167d4f4af69842d450e5c116e4daa0ea7c7387c99b10.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 61, + 156, + 156, + 220 + ], + "blocks": [ + { + "bbox": [ + 62, + 146, + 214, + 156 + ], + "lines": [ + { + "bbox": [ + 62, + 146, + 214, + 156 + ], + "spans": [ + { + "bbox": [ + 62, + 146, + 214, + 156 + ], + "type": "text", + "content": "Task 3. Test Tube Reorientation" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 156, + 156, + 220 + ], + "lines": [ + { + "bbox": [ + 61, + 156, + 156, + 220 + ], + "spans": [ + { + "bbox": [ + 61, + 156, + 156, + 220 + ], + "type": "image", + "image_path": "2eb0d57179fb5c021a773de17ac4443e984ccc352e0dc3e5d824297b87a58824.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 221, + 174, + 232 + ], + "lines": [ + { + "bbox": [ + 140, + 221, + 174, + 232 + ], + "spans": [ + { + "bbox": [ + 140, + 221, + 174, + 232 + ], + "type": "text", + "content": "Stage I" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 159, + 156, + 253, + 220 + ], + "blocks": [ + { + "bbox": [ + 159, + 156, + 253, + 220 + ], + "lines": [ + { + "bbox": [ + 159, + 156, + 253, + 220 + ], + "spans": [ + { + "bbox": [ + 159, + 156, + 253, + 220 + ], + "type": "image", + "image_path": "d9d86998bcb7355813c2ec3771bc9be86562ca597b9726d312f20d51db3d0713.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 258, + 156, + 352, + 233 + ], + "blocks": [ + { + "bbox": [ + 258, + 156, + 352, + 233 + ], + "lines": [ + { + "bbox": [ + 258, + 156, + 352, + 233 + ], + "spans": [ + { + "bbox": [ + 258, + 156, + 352, + 233 + ], + "type": "image", + "image_path": "ad52e3e1fffe97ce097f5acd4e97f9d17c9f5a5940fed40ac9f7275aebb29b3d.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 356, + 156, + 450, + 233 + ], + "blocks": [ + { + "bbox": [ + 356, + 156, + 450, + 233 + ], + "lines": [ + { + "bbox": [ + 356, + 156, + 450, + 233 + ], + "spans": [ + { + "bbox": [ + 356, + 156, + 450, + 233 + ], + "type": "image", + "image_path": "6e0490d1099f2e64b741b9dd1f95e5ae865168537ab2ca60ee6fd37e533eacdb.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 455, + 156, + 550, + 233 + ], + "blocks": [ + { + "bbox": [ + 455, + 156, + 550, + 233 + ], + "lines": [ + { + "bbox": [ + 455, + 156, + 550, + 233 + ], + "spans": [ + { + "bbox": [ + 455, + 156, + 550, + 233 + ], + "type": "image", + "image_path": "41e27ceecac4c9235a249ac029abee0e7fc30124d187cd6077bf037a65e93fd4.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 61, + 245, + 156, + 308 + ], + "blocks": [ + { + "bbox": [ + 62, + 233, + 179, + 245 + ], + "lines": [ + { + "bbox": [ + 62, + 233, + 179, + 245 + ], + "spans": [ + { + "bbox": [ + 62, + 233, + 179, + 245 + ], + "type": "text", + "content": "Task 4. Scissor Hanging" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 245, + 156, + 308 + ], + "lines": [ + { + "bbox": [ + 61, + 245, + 156, + 308 + ], + "spans": [ + { + "bbox": [ + 61, + 245, + 156, + 308 + ], + "type": "image", + "image_path": "00f85838005136fdca15b5fe4bb78ee82f7340dea6f3e5a9b2e65bd76936a94c.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 159, + 245, + 253, + 321 + ], + "blocks": [ + { + "bbox": [ + 159, + 245, + 253, + 321 + ], + "lines": [ + { + "bbox": [ + 159, + 245, + 253, + 321 + ], + "spans": [ + { + "bbox": [ + 159, + 245, + 253, + 321 + ], + "type": "image", + "image_path": "67d2c9e7967010da05736086d3a0fca8814cf40da4a222bb5e6737e56f406e1e.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 258, + 245, + 352, + 321 + ], + "blocks": [ + { + "bbox": [ + 258, + 245, + 352, + 321 + ], + "lines": [ + { + "bbox": [ + 258, + 245, + 352, + 321 + ], + "spans": [ + { + "bbox": [ + 258, + 245, + 352, + 321 + ], + "type": "image", + "image_path": "7445085cbc517fd3cd93fbb3a2bd9f6db8580e6c84d599414a68d9405529f3b0.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 356, + 245, + 450, + 321 + ], + "blocks": [ + { + "bbox": [ + 356, + 245, + 450, + 321 + ], + "lines": [ + { + "bbox": [ + 356, + 245, + 450, + 321 + ], + "spans": [ + { + "bbox": [ + 356, + 245, + 450, + 321 + ], + "type": "image", + "image_path": "5e5f3b7ca4ef1ce5b7a8ef47b005c756ed1fc850e06dd280623fc0528eb1a89d.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 455, + 245, + 549, + 321 + ], + "blocks": [ + { + "bbox": [ + 455, + 245, + 549, + 321 + ], + "lines": [ + { + "bbox": [ + 455, + 245, + 549, + 321 + ], + "spans": [ + { + "bbox": [ + 455, + 245, + 549, + 321 + ], + "type": "image", + "image_path": "6fd39913d482519aa7b6f7a9a91a5fd878297b9f18bd5d9df7c2afe47a5f641f.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 61, + 335, + 156, + 411 + ], + "blocks": [ + { + "bbox": [ + 62, + 323, + 216, + 335 + ], + "lines": [ + { + "bbox": [ + 62, + 323, + 216, + 335 + ], + "spans": [ + { + "bbox": [ + 62, + 323, + 216, + 335 + ], + "type": "text", + "content": "Task 5. Knife Pulling (Bimanual)" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 335, + 156, + 411 + ], + "lines": [ + { + "bbox": [ + 61, + 335, + 156, + 411 + ], + "spans": [ + { + "bbox": [ + 61, + 335, + 156, + 411 + ], + "type": "image", + "image_path": "99c0a32a6a7ff267400458289cc0fbf487ba3fbe191ce416aad8bac7243d1355.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 427, + 558, + 452 + ], + "lines": [ + { + "bbox": [ + 50, + 427, + 558, + 452 + ], + "spans": [ + { + "bbox": [ + 50, + 427, + 558, + 452 + ], + "type": "text", + "content": "Fig. 5: We test ViTaMIn on 5 contact-rich manipulation tasks, including precise and dynamic insertion, object hanging with multimodal feedback, and transparent in-hand object manipulation." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 160, + 335, + 253, + 411 + ], + "blocks": [ + { + "bbox": [ + 160, + 335, + 253, + 411 + ], + "lines": [ + { + "bbox": [ + 160, + 335, + 253, + 411 + ], + "spans": [ + { + "bbox": [ + 160, + 335, + 253, + 411 + ], + "type": "image", + "image_path": "3ce4769ee6b1bde42a17eee61d58d48bb5431619637f3963972110f5eafc4433.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 258, + 335, + 352, + 411 + ], + "blocks": [ + { + "bbox": [ + 258, + 335, + 352, + 411 + ], + "lines": [ + { + "bbox": [ + 258, + 335, + 352, + 411 + ], + "spans": [ + { + "bbox": [ + 258, + 335, + 352, + 411 + ], + "type": "image", + "image_path": "d26514e671a1cabe35615727660c95426ee9c01df609ba34f6407ddd70a97fc4.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 356, + 335, + 450, + 411 + ], + "blocks": [ + { + "bbox": [ + 356, + 335, + 450, + 411 + ], + "lines": [ + { + "bbox": [ + 356, + 335, + 450, + 411 + ], + "spans": [ + { + "bbox": [ + 356, + 335, + 450, + 411 + ], + "type": "image", + "image_path": "45234ed6e963ca64aacca0aeebac163393943ee8f94523b029c03b09faa1b450.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 455, + 335, + 549, + 411 + ], + "blocks": [ + { + "bbox": [ + 455, + 335, + 549, + 411 + ], + "lines": [ + { + "bbox": [ + 455, + 335, + 549, + 411 + ], + "spans": [ + { + "bbox": [ + 455, + 335, + 549, + 411 + ], + "type": "image", + "image_path": "b83fcb747acca716d74ef5c58839df6114300388b8d0e6ee2f936782a0e64c43.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 60, + 469, + 173, + 566 + ], + "blocks": [ + { + "bbox": [ + 60, + 469, + 173, + 566 + ], + "lines": [ + { + "bbox": [ + 60, + 469, + 173, + 566 + ], + "spans": [ + { + "bbox": [ + 60, + 469, + 173, + 566 + ], + "type": "image", + "image_path": "ce63d3a7c6dc3449fc08f0a14ed53567368fbb562685332477dc26bd0e8072a3.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 578, + 299, + 615 + ], + "lines": [ + { + "bbox": [ + 50, + 578, + 299, + 615 + ], + "spans": [ + { + "bbox": [ + 50, + 578, + 299, + 615 + ], + "type": "text", + "content": "Fig. 6: The robot needs to flip open a switch (fixed to a force gauge) by rotating it 90 degrees. During the rotation, the robot must minimize axial forces to ensure smooth operation." + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 176, + 479, + 282, + 564 + ], + "blocks": [ + { + "bbox": [ + 176, + 471, + 296, + 479 + ], + "lines": [ + { + "bbox": [ + 176, + 471, + 296, + 479 + ], + "spans": [ + { + "bbox": [ + 176, + 471, + 296, + 479 + ], + "type": "text", + "content": "Maximum Force Comparison: Vision vs. Ours" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 176, + 479, + 282, + 564 + ], + "lines": [ + { + "bbox": [ + 176, + 479, + 282, + 564 + ], + "spans": [ + { + "bbox": [ + 176, + 479, + 282, + 564 + ], + "type": "image", + "image_path": "c6980fdc3266252190f984cdc73c9a2bab1431c731bc03e678d86b4b54eeb2be.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 316, + 470, + 403, + 565 + ], + "blocks": [ + { + "bbox": [ + 316, + 470, + 403, + 565 + ], + "lines": [ + { + "bbox": [ + 316, + 470, + 403, + 565 + ], + "spans": [ + { + "bbox": [ + 316, + 470, + 403, + 565 + ], + "type": "image", + "image_path": "94c8d59ccbd0cb0de5a5b3206f5de9360ecd718d47d6d0d3ca249decbf1ffc98.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 354, + 566, + 399, + 574 + ], + "lines": [ + { + "bbox": [ + 354, + 566, + 399, + 574 + ], + "spans": [ + { + "bbox": [ + 354, + 566, + 399, + 574 + ], + "type": "text", + "content": "Novel Objects" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 309, + 583, + 558, + 619 + ], + "lines": [ + { + "bbox": [ + 309, + 583, + 558, + 619 + ], + "spans": [ + { + "bbox": [ + 309, + 583, + 558, + 619 + ], + "type": "text", + "content": "Fig. 7: Showcase of novel objects and different lighting in the generalization tasks. The right columns demonstrate colored flashlight/high-power/normal lighting conditions." + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 405, + 470, + 550, + 565 + ], + "blocks": [ + { + "bbox": [ + 405, + 470, + 550, + 565 + ], + "lines": [ + { + "bbox": [ + 405, + 470, + 550, + 565 + ], + "spans": [ + { + "bbox": [ + 405, + 470, + 550, + 565 + ], + "type": "image", + "image_path": "c7ca6a14e745ecd4ed221682030a13fc964ba2820d452880f11c7800ce40073a.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 566, + 503, + 575 + ], + "lines": [ + { + "bbox": [ + 452, + 566, + 503, + 575 + ], + "spans": [ + { + "bbox": [ + 452, + 566, + 503, + 575 + ], + "type": "text", + "content": "Different Lighting" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 34 + }, + { + "bbox": [ + 51, + 642, + 135, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 642, + 135, + 653 + ], + "spans": [ + { + "bbox": [ + 51, + 642, + 135, + 653 + ], + "type": "text", + "content": "D. Ablation Studies" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "content": "a) Data Efficiency: We evaluate the performance of policies trained on different amounts (25%, 50%, and 100%) of demonstrations. All the models are evaluated in 20 real-world trials with different initializations. For a more in-depth analysis, we calculate the success rates of each stage separately, as illustrated in Figure 8. With the pre-trained" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 309, + 647, + 559, + 734 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 309, + 647, + 558, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 647, + 558, + 694 + ], + "spans": [ + { + "bbox": [ + 309, + 647, + 558, + 694 + ], + "type": "text", + "content": "tactile representations, our method can achieve consistently higher success rates on all the tasks across different amounts of data, and can even master the task with limited data (25%) for test tube reorientation." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 309, + 698, + 559, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 698, + 559, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 698, + 559, + 734 + ], + "type": "text", + "content": "b) Training Efficiency: We further evaluate the policies trained with different numbers of epochs to understand its training efficiency under the same evaluation protocol. The" + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 72, + 177, + 200 + ], + "blocks": [ + { + "bbox": [ + 109, + 61, + 135, + 71 + ], + "lines": [ + { + "bbox": [ + 109, + 61, + 135, + 71 + ], + "spans": [ + { + "bbox": [ + 109, + 61, + 135, + 71 + ], + "type": "text", + "content": "Stage I" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 53, + 72, + 177, + 200 + ], + "lines": [ + { + "bbox": [ + 53, + 72, + 177, + 200 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 177, + 200 + ], + "type": "image", + "image_path": "827a914e84c52597b4da5d6a8593513b04ba1f5b5f8f15324c3d07e8a040904d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 181, + 62, + 303, + 200 + ], + "blocks": [ + { + "bbox": [ + 143, + 50, + 222, + 60 + ], + "lines": [ + { + "bbox": [ + 143, + 50, + 222, + 60 + ], + "spans": [ + { + "bbox": [ + 143, + 50, + 222, + 60 + ], + "type": "text", + "content": "Tube Reorientation" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 181, + 62, + 303, + 200 + ], + "lines": [ + { + "bbox": [ + 181, + 62, + 303, + 200 + ], + "spans": [ + { + "bbox": [ + 181, + 62, + 303, + 200 + ], + "type": "image", + "image_path": "0d76e53a8c6ce1241acff7eeef8a2fcf95cd1b9821ab719fc786e6b8b40a1ec2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 307, + 72, + 430, + 200 + ], + "blocks": [ + { + "bbox": [ + 362, + 62, + 388, + 70 + ], + "lines": [ + { + "bbox": [ + 362, + 62, + 388, + 70 + ], + "spans": [ + { + "bbox": [ + 362, + 62, + 388, + 70 + ], + "type": "text", + "content": "Stage I" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 72, + 430, + 200 + ], + "lines": [ + { + "bbox": [ + 307, + 72, + 430, + 200 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 430, + 200 + ], + "type": "image", + "image_path": "a285013001ef5630297fdcd051b65b0ca161561fb3c9bcad3b85e8b0d8170ccc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 205, + 559, + 242 + ], + "lines": [ + { + "bbox": [ + 50, + 205, + 559, + 242 + ], + "spans": [ + { + "bbox": [ + 50, + 205, + 559, + 242 + ], + "type": "text", + "content": "Fig. 8: Ablation study on the effect of pre-training on data efficiency. The performance of the policy improves as the quantity of data increases. After pre-training on the action-free, task-ignorant dataset, our method can achieve a high success rate even with limited data (25%)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 433, + 70, + 556, + 200 + ], + "blocks": [ + { + "bbox": [ + 403, + 50, + 471, + 60 + ], + "lines": [ + { + "bbox": [ + 403, + 50, + 471, + 60 + ], + "spans": [ + { + "bbox": [ + 403, + 50, + 471, + 60 + ], + "type": "text", + "content": "Scissor Hanging" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 489, + 62, + 517, + 70 + ], + "lines": [ + { + "bbox": [ + 489, + 62, + 517, + 70 + ], + "spans": [ + { + "bbox": [ + 489, + 62, + 517, + 70 + ], + "type": "text", + "content": "Stage II" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 433, + 70, + 556, + 200 + ], + "lines": [ + { + "bbox": [ + 433, + 70, + 556, + 200 + ], + "spans": [ + { + "bbox": [ + 433, + 70, + 556, + 200 + ], + "type": "image", + "image_path": "4b5c950d25456db7d2d940404eb103086fe309d96067fca9478d24545376c057.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 53, + 272, + 176, + 399 + ], + "blocks": [ + { + "bbox": [ + 141, + 249, + 221, + 258 + ], + "lines": [ + { + "bbox": [ + 141, + 249, + 221, + 258 + ], + "spans": [ + { + "bbox": [ + 141, + 249, + 221, + 258 + ], + "type": "text", + "content": "Tube Reorientation" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 262, + 134, + 271 + ], + "lines": [ + { + "bbox": [ + 108, + 262, + 134, + 271 + ], + "spans": [ + { + "bbox": [ + 108, + 262, + 134, + 271 + ], + "type": "text", + "content": "Stage I" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 53, + 272, + 176, + 399 + ], + "lines": [ + { + "bbox": [ + 53, + 272, + 176, + 399 + ], + "spans": [ + { + "bbox": [ + 53, + 272, + 176, + 399 + ], + "type": "image", + "image_path": "d8dcc28916f7268aa5ffb965d055ef3eb9daf033798758dca22c4625f78d2473.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 405, + 558, + 441 + ], + "lines": [ + { + "bbox": [ + 50, + 405, + 558, + 441 + ], + "spans": [ + { + "bbox": [ + 50, + 405, + 558, + 441 + ], + "type": "text", + "content": "Fig. 9: Ablation study on the effect of pre-training on training efficiency. Policies with pre-training are able to learn to complete the first-stage task at a remarkably early stage of training (within 10 epochs). Additionally, when the policy network is pre-trained, the overall success rates increase more rapidly." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 180, + 272, + 302, + 399 + ], + "blocks": [ + { + "bbox": [ + 235, + 262, + 263, + 271 + ], + "lines": [ + { + "bbox": [ + 235, + 262, + 263, + 271 + ], + "spans": [ + { + "bbox": [ + 235, + 262, + 263, + 271 + ], + "type": "text", + "content": "Stage II" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 180, + 272, + 302, + 399 + ], + "lines": [ + { + "bbox": [ + 180, + 272, + 302, + 399 + ], + "spans": [ + { + "bbox": [ + 180, + 272, + 302, + 399 + ], + "type": "image", + "image_path": "20f9fa3b2ed644154b3075e0c925e50cd264ba0e5235b21294c9f2bd1334e309.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 306, + 271, + 429, + 399 + ], + "blocks": [ + { + "bbox": [ + 403, + 249, + 471, + 259 + ], + "lines": [ + { + "bbox": [ + 403, + 249, + 471, + 259 + ], + "spans": [ + { + "bbox": [ + 403, + 249, + 471, + 259 + ], + "type": "text", + "content": "Scissor Hanging" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 361, + 262, + 388, + 270 + ], + "lines": [ + { + "bbox": [ + 361, + 262, + 388, + 270 + ], + "spans": [ + { + "bbox": [ + 361, + 262, + 388, + 270 + ], + "type": "text", + "content": "Stage I" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 271, + 429, + 399 + ], + "lines": [ + { + "bbox": [ + 306, + 271, + 429, + 399 + ], + "spans": [ + { + "bbox": [ + 306, + 271, + 429, + 399 + ], + "type": "image", + "image_path": "bad020bfaf946a16dcc60d68034d145033f7b7475137443f7b26c1c2e7ca1978.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 433, + 271, + 556, + 399 + ], + "blocks": [ + { + "bbox": [ + 489, + 262, + 516, + 270 + ], + "lines": [ + { + "bbox": [ + 489, + 262, + 516, + 270 + ], + "spans": [ + { + "bbox": [ + 489, + 262, + 516, + 270 + ], + "type": "text", + "content": "Stage II" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 433, + 271, + 556, + 399 + ], + "lines": [ + { + "bbox": [ + 433, + 271, + 556, + 399 + ], + "spans": [ + { + "bbox": [ + 433, + 271, + 556, + 399 + ], + "type": "image", + "image_path": "3e856f4f3818833cac9099e64bb7f58858c535a342f9000c483c2c4ffb29e705.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 50, + 456, + 299, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 456, + 299, + 506 + ], + "spans": [ + { + "bbox": [ + 50, + 456, + 299, + 506 + ], + "type": "text", + "content": "results are illustrated in Figure 9. We also observe consistent task performance improvements with pre-training. The policy can complete the first stage of the task at a remarkably early training stage (within 10 epochs)." + } + ] + } + ], + "index": 21 + }, + { + "type": "table", + "bbox": [ + 52, + 516, + 299, + 606 + ], + "blocks": [ + { + "bbox": [ + 52, + 516, + 299, + 606 + ], + "lines": [ + { + "bbox": [ + 52, + 516, + 299, + 606 + ], + "spans": [ + { + "bbox": [ + 52, + 516, + 299, + 606 + ], + "type": "table", + "html": "
TaskMethodOriginalNovel ObjectsDifferent Lighting
Orange PlacementVision0.850.70.55
Ours w/o Pre-training0.90.80.6
Ours1.01.00.85
Scissor HangingVision0.00.00.0
Ours w/o Pre-training0.450.40.4
Ours0.70.70.5
", + "image_path": "8105d743b48c767516e10ef93cc71f7fc5122df736e327dea3f051cc7bfb6c47.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "table_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 50, + 610, + 299, + 658 + ], + "lines": [ + { + "bbox": [ + 50, + 610, + 299, + 658 + ], + "spans": [ + { + "bbox": [ + 50, + 610, + 299, + 658 + ], + "type": "text", + "content": "TABLE III: Generalization under different objects and scenes. The results demonstrate that our multi-modal policy is more robust to novel objects and different lighting conditions." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 51, + 682, + 174, + 694 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 682, + 174, + 694 + ], + "spans": [ + { + "bbox": [ + 51, + 682, + 174, + 694 + ], + "type": "text", + "content": "E. Generalization Capability" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 50, + 697, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 697, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 697, + 299, + 734 + ], + "type": "text", + "content": "We also evaluate our policy's generalizability to unseen objects and environments. As shown in Figure 7, beyond the training orange and scissor, we introduce 6 unseen small" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 309, + 456, + 558, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 456, + 558, + 540 + ], + "spans": [ + { + "bbox": [ + 309, + 456, + 558, + 540 + ], + "type": "text", + "content": "objects and 3 unseen scissors to assess object generalization. Additionally, we modify lighting conditions by increasing brightness and introducing colored disco ball lighting. Table III presents results on the tasks of orange placement and scissor hanging. Our method with pre-training achieves consistent better performance across various generalization settings." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 394, + 550, + 474, + 560 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 394, + 550, + 474, + 560 + ], + "spans": [ + { + "bbox": [ + 394, + 550, + 474, + 560 + ], + "type": "text", + "content": "VI. CONCLUSION" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 309, + 566, + 559, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 566, + 559, + 697 + ], + "spans": [ + { + "bbox": [ + 309, + 566, + 559, + 697 + ], + "type": "text", + "content": "In this paper, we present ViTaMIn, a portable visuo-tactile manipulation interface designed for efficiently collecting high-quality demonstrations by capturing both visual and tactile signals. Furthermore, ViTaMIn introduces an effective pre-training strategy that leverages all the collected action-free data to learn a robust and generalizable tactile representation through multimodal contrastive learning. Our approach significantly outperforms vision-only policies across 5 real-world contact-rich manipulation tasks and demonstrates improved data efficiency, robustness, and generalizability with pre-trained visuo-tactile representations." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 309, + 698, + 559, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 698, + 559, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 698, + 559, + 734 + ], + "type": "text", + "content": "Our method primarily focuses on fixed-base single-arm and dual-arm tasks with parallel-jaw grippers. While this setup is suitable for a wide range of manipulation tasks," + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 53, + 299, + 88 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 53, + 299, + 88 + ], + "spans": [ + { + "bbox": [ + 50, + 53, + 299, + 88 + ], + "type": "text", + "content": "future work could extend our approach to dexterous hands, enabling richer and more versatile manipulation skills that better approximate human-level dexterity." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 146, + 96, + 205, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 96, + 205, + 106 + ], + "spans": [ + { + "bbox": [ + 146, + 96, + 205, + 106 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 113, + 299, + 734 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 56, + 113, + 299, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 113, + 299, + 140 + ], + "spans": [ + { + "bbox": [ + 56, + 113, + 299, + 140 + ], + "type": "text", + "content": "[1] S. Levine, C. Finn, T. Darrell, and P. Abbeel, \"End-to-end training of deep visuomotor policies,\" Journal of Machine Learning Research, vol. 17, no. 39, pp. 1-40, 2016." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 140, + 299, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 299, + 175 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 299, + 175 + ], + "type": "text", + "content": "[2] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, J. Dabis, C. Finn, K. Gopalakrishnan, K. Hausman, A. Herzog, J. Hsu et al., \"Rt-1: Robotics transformer for real-world control at scale,\" arXiv preprint arXiv:2212.06817, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 176, + 299, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 176, + 299, + 212 + ], + "spans": [ + { + "bbox": [ + 56, + 176, + 299, + 212 + ], + "type": "text", + "content": "[3] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, X. Chen, K. Choromanski, T. Ding, D. Driess, A. Dubey, C. Finn et al., \"Rt-2: Vision-language-action models transfer web knowledge to robotic control,\" arXiv preprint arXiv:2307.15818, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 212, + 299, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 212, + 299, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 212, + 299, + 239 + ], + "type": "text", + "content": "[4] C. Chi, S. Feng, Y. Du, Z. Xu, E. Cousineau, B. Burchfiel, and S. Song, \"Diffusion policy: Visuomotor policy learning via action diffusion,\" arXiv preprint arXiv:2303.04137, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 239, + 299, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 239, + 299, + 275 + ], + "spans": [ + { + "bbox": [ + 56, + 239, + 299, + 275 + ], + "type": "text", + "content": "[5] J. Aldaco, T. Armstrong, R. Baruch, J. Bingham, S. Chan, K. Draper, D. Dwibedi, C. Finn, P. Florence, S. Goodrich et al., \"Aloha 2: An enhanced low-cost hardware for bimanual teleoperation,\" arXiv preprint arXiv:2405.02292, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 275, + 299, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 275, + 299, + 301 + ], + "spans": [ + { + "bbox": [ + 56, + 275, + 299, + 301 + ], + "type": "text", + "content": "[6] Z. Fu, T. Z. Zhao, and C. Finn, \"Mobile aloha: Learning bimanual mobile manipulation with low-cost whole-body teleoperation,\" arXiv preprint arXiv:2401.02117, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 301, + 299, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 301, + 299, + 328 + ], + "spans": [ + { + "bbox": [ + 56, + 301, + 299, + 328 + ], + "type": "text", + "content": "[7] T. Z. Zhao, V. Kumar, S. Levine, and C. Finn, “Learning fine-grained bimanual manipulation with low-cost hardware,” arXiv preprint arXiv:2304.13705, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 329, + 299, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 329, + 299, + 365 + ], + "spans": [ + { + "bbox": [ + 56, + 329, + 299, + 365 + ], + "type": "text", + "content": "[8] H. Fang, H.-S. Fang, Y. Wang, J. Ren, J. Chen, R. Zhang, W. Wang, and C. Lu, \"Airexo: Low-cost exoskeletons for learning whole-arm manipulation in the wild,\" in 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2024, pp. 15031-15038." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 365, + 299, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 365, + 299, + 392 + ], + "spans": [ + { + "bbox": [ + 56, + 365, + 299, + 392 + ], + "type": "text", + "content": "[9] X. Cheng, J. Li, S. Yang, G. Yang, and X. Wang, “Open-television: Teleoperation with immersive active visual feedback,” arXiv preprint arXiv:2407.01512, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 392, + 299, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 392, + 299, + 418 + ], + "spans": [ + { + "bbox": [ + 53, + 392, + 299, + 418 + ], + "type": "text", + "content": "[10] Y. Qin, W. Yang, B. Huang, K. Van Wyk, H. Su, X. Wang, Y.-W. Chao, and D. Fox, \"Anyteleop: A general vision-based dexterous robot arm-hand teleoperation system,\" arXiv preprint arXiv:2307.04577, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 418, + 299, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 418, + 299, + 463 + ], + "spans": [ + { + "bbox": [ + 53, + 418, + 299, + 463 + ], + "type": "text", + "content": "[11] F. Sanches, G. Gao, N. Elangovan, R. V. Godoy, J. Chapman, K. Wang, P. Jarvis, and M. Liarokapis, \"Scalable. intuitive human to robot skill transfer with wearable human machine interfaces: On complex, dexterous tasks,\" in 2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2023, pp. 6318-6325." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 464, + 299, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 464, + 299, + 491 + ], + "spans": [ + { + "bbox": [ + 53, + 464, + 299, + 491 + ], + "type": "text", + "content": "[12] K. Doshi, Y. Huang, and S. Coros, \"On hand-held grippers and the morphological gap in human manipulation demonstration,\" arXiv preprint arXiv:2311.01832, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 491, + 299, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 491, + 299, + 517 + ], + "spans": [ + { + "bbox": [ + 53, + 491, + 299, + 517 + ], + "type": "text", + "content": "[13] N. M. M. Shafiullah, A. Rai, H. Etukuru, Y. Liu, I. Misra, S. Chintala, and L. Pinto, \"On bringing robots home,\" arXiv preprint arXiv:2311.16098, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 517, + 299, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 517, + 299, + 552 + ], + "spans": [ + { + "bbox": [ + 53, + 517, + 299, + 552 + ], + "type": "text", + "content": "[14] C. Chi, Z. Xu, C. Pan, E. Cousineau, B. Burchfiel, S. Feng, R. Tedrake, and S. Song, \"Universal manipulation interface: In-the-wild robot teaching without in-the-wild robots,\" arXiv preprint arXiv:2402.10329, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 553, + 299, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 553, + 299, + 581 + ], + "spans": [ + { + "bbox": [ + 53, + 553, + 299, + 581 + ], + "type": "text", + "content": "[15] S. Liang, Y. Guan, J. Xu, H. Qian, X. Zhang, D. Wu, W. Ding, and R. Chen, \"Alltact fin ray: A compliant robot gripper with omnidirectional tactile sensing,\" arXiv preprint arXiv:2504.18064, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 581, + 299, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 581, + 299, + 616 + ], + "spans": [ + { + "bbox": [ + 53, + 581, + 299, + 616 + ], + "type": "text", + "content": "[16] S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta, “R3m: A universal visual representation for robot manipulation,” in Proceedings of The 6th Conference on Robot Learning (CoRL), vol. 205. PMLR, 2022, pp. 892–909." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 53, + 616, + 299, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 616, + 299, + 652 + ], + "spans": [ + { + "bbox": [ + 53, + 616, + 299, + 652 + ], + "type": "text", + "content": "[17] Y. J. Ma, S. Sodhani, D. Jayaraman, O. Bastani, V. Kumar, and A. Zhang, “VIP: Towards universal visual reward and representation via value-implicit pre-training,” in The Eleventh International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 53, + 652, + 299, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 652, + 299, + 670 + ], + "spans": [ + { + "bbox": [ + 53, + 652, + 299, + 670 + ], + "type": "text", + "content": "[18] T. Xiao, I. Radosavovic, T. Darrell, and J. Malik, “Masked visual pretraining for motor control,” arXiv:2203.06173, 2022." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 53, + 670, + 299, + 697 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 670, + 299, + 697 + ], + "spans": [ + { + "bbox": [ + 53, + 670, + 299, + 697 + ], + "type": "text", + "content": "[19] I. Radosavovic, T. Xiao, S. James, P. Abbeel, J. Malik, and T. Darrell, “Real-world robot learning with masked visual pre-training,” in Conference on Robot Learning. PMLR, 2023, pp. 416–426." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 53, + 697, + 299, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 697, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 53, + 697, + 299, + 734 + ], + "type": "text", + "content": "[20] A. Majumdar, K. Yadav, S. Arnaud, J. Ma, C. Chen, S. Silwal, A. Jain, V.-P. Berges, T. Wu, J. Vakil et al., \"Where are we in the search for an artificial visual cortex for embodied intelligence?\" Advances in Neural Information Processing Systems, vol. 36, pp. 655-677, 2023." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 53, + 558, + 700 + ], + "type": "list", + "angle": 0, + "index": 44, + "blocks": [ + { + "bbox": [ + 312, + 53, + 558, + 90 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 53, + 558, + 90 + ], + "spans": [ + { + "bbox": [ + 312, + 53, + 558, + 90 + ], + "type": "text", + "content": "[21] K. He, X. Chen, S. Xie, Y. Li, P. Dollar, and R. Girshick, “Masked autoencoders are scalable vision learners,” in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2022, pp. 16000-16009." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 91, + 558, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 91, + 558, + 126 + ], + "spans": [ + { + "bbox": [ + 312, + 91, + 558, + 126 + ], + "type": "text", + "content": "[22] A. Radford, J. W. Kim, C. Hallacy, A. Ramesh, G. Goh, S. Agarwal, G. Sastry, A. Askell, P. Mishkin, J. Clark et al., \"Learning transferable visual models from natural language supervision,\" in International conference on machine learning. PMLR, 2021, pp. 8748-8763." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 126, + 558, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 126, + 558, + 162 + ], + "spans": [ + { + "bbox": [ + 312, + 126, + 558, + 162 + ], + "type": "text", + "content": "[23] K. Hosoda, K. Igarashi, and M. Asada, \"Adaptive hybrid visual servoing/force control in unknown environment,\" in Proceedings of IEEE/RSJ International Conference on Intelligent Robots and Systems. IROS'96, vol. 3. IEEE, 1996, pp. 1097-1103." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 162, + 558, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 162, + 558, + 198 + ], + "spans": [ + { + "bbox": [ + 312, + 162, + 558, + 198 + ], + "type": "text", + "content": "[24] H. Nakagaki, K. Kitagaki, T. Ogasawara, and H. Tsukune, \"Study of deformation and insertion tasks of a flexible wire,\" in Proceedings of International Conference on Robotics and Automation, vol. 3. IEEE, 1997, pp. 2397-2402." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 198, + 558, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 198, + 558, + 217 + ], + "spans": [ + { + "bbox": [ + 312, + 198, + 558, + 217 + ], + "type": "text", + "content": "[25] P. Miller and P. Leibowitz, \"Integration of vision, force and tactile sensing for grasping,\" Int. J. Intell. Mach, vol. 4, pp. 129-149, 1999." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 312, + 217, + 558, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 217, + 558, + 243 + ], + "spans": [ + { + "bbox": [ + 312, + 217, + 558, + 243 + ], + "type": "text", + "content": "[26] H. Qi, B. Yi, S. Suresh, M. Lambeta, Y. Ma, R. Calandra, and J. Malik, \"General in-hand object rotation with vision and touch,\" in Conference on Robot Learning. PMLR, 2023, pp. 2549-2564." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 312, + 243, + 558, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 243, + 558, + 270 + ], + "spans": [ + { + "bbox": [ + 312, + 243, + 558, + 270 + ], + "type": "text", + "content": "[27] S. Li, H. Yu, W. Ding, H. Liu, L. Ye, C. Xia, X. Wang, and X.-P. Zhang, “Visual-tactile fusion for transparent object grasping in complex backgrounds,” IEEE Transactions on Robotics, 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 312, + 270, + 558, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 270, + 558, + 305 + ], + "spans": [ + { + "bbox": [ + 312, + 270, + 558, + 305 + ], + "type": "text", + "content": "[28] Y. Han, K. Yu, R. Batra, N. Boyd, C. Mehta, T. Zhao, Y. She, S. Hutchinson, and Y. Zhao, “Learning generalizable vision-tactile robotic grasping strategy for deformable objects via transformer,” IEEE/ASME Transactions on Mechatronics, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 312, + 305, + 558, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 305, + 558, + 332 + ], + "spans": [ + { + "bbox": [ + 312, + 305, + 558, + 332 + ], + "type": "text", + "content": "[29] R. Bhirangi, V. Pattabiraman, E. Erciyes, Y. Cao, T. Hellebrekers, and L. Pinto, “Anyskin: Plug-and-play skin sensing for robotic touch,” arXiv preprint arXiv:2409.08276, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 312, + 333, + 558, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 333, + 558, + 358 + ], + "spans": [ + { + "bbox": [ + 312, + 333, + 558, + 358 + ], + "type": "text", + "content": "[30] V. Pattabiraman, Y. Cao, S. Haldar, L. Pinto, and R. Bhirangi, “Learning precise, contact-rich manipulation through uncalibrated tactile skins,” arXiv preprint arXiv:2410.17246, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 312, + 358, + 558, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 358, + 558, + 387 + ], + "spans": [ + { + "bbox": [ + 312, + 358, + 558, + 387 + ], + "type": "text", + "content": "[31] Liu, Guan, Jia, Wu, Liu, Wang, Liang, Chen, Zhang, Song et al., \"Fastumi: A scalable and hardware-independent universal manipulation interface with dataset,\" arXiv e-prints, pp. arXiv-2409, 2024." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 312, + 387, + 558, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 387, + 558, + 413 + ], + "spans": [ + { + "bbox": [ + 312, + 387, + 558, + 413 + ], + "type": "text", + "content": "[32] Liu, Chi, Cousineau, Kuppuswamy, Burchfiel, and Song, \"Maniwav: Learning robot manipulation from in-the-wild audio-visual data,\" in CoRL, 2024." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 312, + 413, + 558, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 413, + 558, + 458 + ], + "spans": [ + { + "bbox": [ + 312, + 413, + 558, + 458 + ], + "type": "text", + "content": "[33] C. Sferrazza, Y. Seo, H. Liu, Y. Lee, and P. Abbeel, \"The power of the senses: Generalizable manipulation from vision and touch through masked multimodal learning,\" in 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2024, pp. 9698-9705." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 312, + 458, + 558, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 458, + 558, + 485 + ], + "spans": [ + { + "bbox": [ + 312, + 458, + 558, + 485 + ], + "type": "text", + "content": "[34] Z. Xu, R. Uppuluri, X. Zhang, C. Fitch, P. G. Crandall, W. Shou, D. Wang, and Y. She, \"UniT: Unified tactile representation for robot learning,\" 2024. [Online]. Available: https://arxiv.org/abs/2408.06481" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 312, + 485, + 558, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 485, + 558, + 503 + ], + "spans": [ + { + "bbox": [ + 312, + 485, + 558, + 503 + ], + "type": "text", + "content": "[35] X. Zhang and et al., “Fusing multimodal sensory data for robotic perception,” IEEE Transactions on Robotics, 2022." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 312, + 503, + 558, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 503, + 558, + 529 + ], + "spans": [ + { + "bbox": [ + 312, + 503, + 558, + 529 + ], + "type": "text", + "content": "[36] A. Nagabandi, G. Kahn, S. Levine, and C. Finn, \"Deep reinforcement learning for vision-based robotic control with multimodal inputs,\" in Conference on Robot Learning (CoRL), 2020." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 312, + 529, + 558, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 529, + 558, + 575 + ], + "spans": [ + { + "bbox": [ + 312, + 529, + 558, + 575 + ], + "type": "text", + "content": "[37] L. Fu, G. Datta, H. Huang, W. C.-H. Panitch, J. Drake, J. Ortiz, M. Mukadam, M. Lambeta, R. Calandra, and K. Goldberg, \"A touch, vision, and language dataset for multimodal alignment,\" in Forty-first International Conference on Machine Learning, 2024. [Online]. Available: https://openreview.net/forum?id=tFEOOH9eH0" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 312, + 575, + 558, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 575, + 558, + 610 + ], + "spans": [ + { + "bbox": [ + 312, + 575, + 558, + 610 + ], + "type": "text", + "content": "[38] F. Yang, C. Feng, Z. Chen, H. Park, D. Wang, Y. Dou, Z. Zeng, X. Chen, R. Gangopadhyay, A. Owens, and A. Wong, \"Binding touch to everything: Learning unified multimodal tactile representations,\" arXiv:2401.18084, 2024." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 312, + 610, + 558, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 610, + 558, + 637 + ], + "spans": [ + { + "bbox": [ + 312, + 610, + 558, + 637 + ], + "type": "text", + "content": "[39] A. George, S. Gano, P. Katragadda, and A. Farimani, “Vital pretraining: Visuo-tactile pretraining for tactile and non-tactile manipulation policies,” arXiv preprint arXiv:2403.11898, 2024." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 312, + 637, + 558, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 637, + 558, + 683 + ], + "spans": [ + { + "bbox": [ + 312, + 637, + 558, + 683 + ], + "type": "text", + "content": "[40] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks for biomedical image segmentation,” in Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18. Springer, 2015, pp. 234-241." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 312, + 683, + 558, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 683, + 558, + 700 + ], + "spans": [ + { + "bbox": [ + 312, + 683, + 558, + 700 + ], + "type": "text", + "content": "[41] J. Song, C. Meng, and S. Ermon, “Denoising diffusion implicit models,” arXiv preprint arXiv:2010.02502, 2020." + } + ] + } + ], + "index": 43 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06201/43b7dceb-7067-4bc0-81f9-4f968ea096bb_content_list.json b/data/2025/2504_06xxx/2504.06201/43b7dceb-7067-4bc0-81f9-4f968ea096bb_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f3bdfc693aa08ba147e7bc014368437762e6cbb1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/43b7dceb-7067-4bc0-81f9-4f968ea096bb_content_list.json @@ -0,0 +1,1599 @@ +[ + { + "type": "text", + "text": "Quantum Annealing for Combinatorial Optimization: A Benchmarking Study", + "text_level": 1, + "bbox": [ + 183, + 89, + 815, + 137 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Authors: Seongmin Kim $^{1,4}$ , Sang-Woo Ahn $^{2}$ , In-Saeng Suh $^{4}$ , Alexander W. Dowling $^{3,*}$ , Eungkyu Lee $^{2,*}$ , and Tengfei Luo $^{1,*}$", + "bbox": [ + 153, + 152, + 846, + 189 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ Department of Aerospace and Mechanical Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States.", + "$^{2}$ Department of Electronic Engineering, Kyung Hee University; Yongin-Si, Gyeonggi-do 17104, Republic of Korea.", + "$^{3}$ Department of Chemical and Biomolecular Engineering, University of Notre Dame; Notre Dame, Indiana 46556. United States.", + "$^{4}$ National Center for Computational Sciences, Oak Ridge National Laboratory, Oak Ridge, Tennessee 37830, United States.", + "*Corresponding author. Email: adowling@nd.edu, eleest@khu.ac.kr, and tluo@nd.edu" + ], + "bbox": [ + 111, + 205, + 879, + 361 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Quantum annealing (QA) has the potential to significantly improve solution quality and reduce time complexity in solving combinatorial optimization problems compared to classical optimization methods. However, due to the limited number of qubits and their connectivity, the QA hardware did not show such an advantage over classical methods in past benchmarking studies. Recent advancements in QA with more than 5,000 qubits, enhanced qubit connectivity, and the hybrid architecture promise to realize the quantum advantage. Here, we use a quantum annealer with state-of-the-art techniques and benchmark its performance against classical solvers. To compare their performance, we solve over 50 optimization problem instances represented by large and dense Hamiltonian matrices using quantum and classical solvers. The results demonstrate that a state-of-the-art quantum solver has higher accuracy ( $\\sim 0.013\\%$ ) and a significantly faster problem-solving time ( $\\sim 6,561\\times$ ) than the best classical solver. Our results highlight the advantages of leveraging QA over classical counterparts, particularly in hybrid configurations, for achieving high accuracy and substantially reduced problem solving time in large-scale real-world optimization problems.", + "bbox": [ + 114, + 397, + 883, + 642 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: quantum advantage, quantum-classical hybrid algorithm, quantum annealing, combinatorial optimization, benchmarking study", + "bbox": [ + 116, + 659, + 880, + 694 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Introduction", + "text_level": 1, + "bbox": [ + 116, + 729, + 259, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Quantum computers mark a paradigm shift to tackle challenging tasks that classical computers cannot solve in a practical timescale $^{1,2}$ . The quantum annealer is a special quantum computer designed to solve combinatorial optimization problems with problem size-independent time complexity $^{3-5}$ . This unique quantum annealing (QA) capability is based on the so-called adiabatic process $^{6,7}$ . During this process, entangled qubits naturally evolve into the ground state of a given Hamiltonian to find the optimal vector of binary decisions for the corresponding quadratic unconstrained binary optimization (QUBO) problem $^{8-10}$ . The adiabatic theorem of quantum mechanics ensures that QA identifies the optimal solution regardless of the size and landscape of", + "bbox": [ + 116, + 752, + 883, + 891 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 870, + 936, + 880, + 950 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the combinatorial parametric space, highlighting QA as a powerful and practical solver $^{11-14}$ . The ability to efficiently explore high-dimensional combinational spaces makes QA capable of handling a wide range of optimization tasks $^{4,5,10,15,16}$ .", + "bbox": [ + 116, + 89, + 885, + 143 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The potential merit of QA motivates the systematic comparison with classical counterparts (e.g., simulated annealing, integer programming, steepest descent method, tabu search, and parallel tempering with isoenergetic cluster moves), focusing on the solution quality and the time complexity. While previous benchmarking studies showed some advantages of QA, most used low-dimensional or the sparse configuration of QUBO matrices due to the lack of available qubits in the QA hardware and poor topology to connect qubits $^{17-19}$ . For example, O'Malley et al. $^{17}$ compared the performance of QA with classical methods (mathematical programming), but they limited the number of binary variables to 35 due to the QA hardware limitation. Similarly, Tasseff et al. $^{18}$ highlighted the potential advantages of QA compared to classical methods (such as simulated annealing, integer programming, and Markov chain Monte Carlo) for sparse optimization problems containing up to 5,000 decision variables and 40,000 quadratic terms. Haba et al. $^{19}$ demonstrated that a classical solver (integer programming) could be faster than QA for small problems, e.g., $\\sim 100$ decision variables. Consequently, these benchmarking studies show that QA methods and their classical counterparts can exhibit similar solution quality and time complexity. However, such low-dimensional or sparse QUBOs considered in the previous benchmarking studies are challenging to map to a wide range of practical problems, which usually require high-dimensional and dense configuration of QUBO matrices $^{4,5,10,20}$ . For example, in our previous QA optimization of one-dimensional and two-dimensional optical metamaterials, the QUBO matrices exhibit these properties (Fig. S1) $^{4,5,16,20}$ .", + "bbox": [ + 116, + 160, + 885, + 491 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The state-of-the-art QA hardware (D-Wave Advantage System) features more than 5,000 qubits, advanced topology to connect qubits, and efficient hybrid algorithms (e.g., Leap Hybrid sampler). For example, the recent development (e.g., Pegasus topology) has increased qubit connectivity from 6 to $15^{21-23}$ . Improved qubit connectivity reduces the need for complex embedding processes, which map problem variables to physical qubits on the hardware. With better connectivity, such as in D-Wave's Pegasus topology, the embedding process becomes more efficient and can better preserve the structure of dense optimization problems. This enhancement allows the quantum annealer to increase the potential for finding high-quality solutions[24,25]. In addition, a QUBO decomposition algorithm (i.e., QBSolv) splits a large QUBO matrix into small pieces of subQUBO matrices, allowing us to handle a QUBO matrix with dimensions higher than the maximum number of qubits in the QA hardware[26,27]. Given these advancements, it is imperative to study the performance of the state-of-the-art QA system for high-dimensional and dense configuration of QUBO matrices, and systemically compare solution quality and the time complexity with the classical counterparts.", + "bbox": [ + 116, + 507, + 885, + 752 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we benchmark the performance of quantum solvers against classical algorithms in solving QUBO problems with large and dense configurations to represent real-world optimization problems. We analyze the solution quality and the required time to solve these benchmark problems using several quantum and classical solvers. This benchmarking study provides important insights into employing QA in practical problem-solving scenarios.", + "bbox": [ + 116, + 768, + 885, + 857 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 867, + 936, + 880, + 950 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Results", + "text_level": 1, + "bbox": [ + 116, + 92, + 199, + 111 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We present a benchmarking study on combinatorial optimization problems representing real-world scenarios, e.g., materials design, characterized by dense and large QUBO matrices (Fig. S1). These problems are non-convex and exhibit a highly complex energy landscape, making it challenging and time-consuming to identify accurate solutions. Classical solvers, such as integer programming (IP), simulated annealing (SA), steepest descent (SD), tabu search (TS), parallel tempering with isoenergetic cluster moves (PT-ICM), perform well for small-scale problems. However, they are often relatively inaccurate for larger problems (problem size $\\geq 1,000$ ; Fig. 1a). In particular, SD and TS show low relative accuracy compared to other solvers. The combination of PT and ICM leverages the strengths of both techniques: PT facilitates crossing energy barriers, while ICM ensures exploration of the solution space, effectively covering broad and diverse regions. This makes PT-ICM particularly effective for exploring complex optimization spaces and enhancing convergence toward the global optimum[46,47]. However, the performance of PT-ICM can be problem-dependent[48]. While it can work well for sparse problems, its effectiveness decreases for denser problems[46]. Consequently, although SA, and PT-ICM perform better than SD and TS, they also fail to find high-quality solutions for large-scale problems.", + "bbox": [ + 114, + 113, + 883, + 375 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To address these limitations, QUBO decomposition strategies can be employed to improve the relative accuracy. For example, integrating QUBO decomposition with classical solvers (e.g., SA-QBSolv and PT-ICM-QBSolv) improves their performance. Nonetheless, these approaches often remain insufficient for handling massive problems effectively, particularly considering problem-solving time (Fig. 1b), which will be further discussed in the following. On the other hand, quantum solvers provide excellent performance for solving these dense and large-scale problems representing real-world optimization scenarios. Although QA can perform excellently for small problems, it has difficulty solving large and dense QUBOs due to the limited number of qubits $(5,000+)$ and connectivity (15). Several prior studies reported that QA may not be efficient since it cannot effectively handle dense and large QUBOs due to hardware limitations[23,53,54]. However, when it runs with the QUBO decomposition strategy (i.e., QA-QBSolv), large-scale problems ( $n \\geq 100$ ) can be effectively handled. Furthermore, hybrid QA (HQA), which integrates quantum and classical approaches, also can solve large-scale problems efficiently. As a result, the quantum solvers consistently identify high-quality solutions across all problem sizes (Fig. 1a).", + "bbox": [ + 114, + 393, + 883, + 637 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Computational time is also a critical metric for evaluating solver performance. Classical solvers exhibit rapidly increasing solving times as problem sizes grow, making them impractical for large-scale combinatorial optimization problems (Fig. 1b). While SD and TS are faster than other classical solvers, their relative accuracies are low, as can be seen in Fig. 1a. It is worth noting that the SA, and PT-ICM solvers struggle to handle problems with more than 3,000 variables due to excessively long solving time or computational constraints (e.g., memory limits). Although the IP solver is faster than SA and PT-ICM, its solving time increases greatly with problem size. The QUBO decomposition strategy significantly reduces computational time, yet quantum solvers remain faster than their classical counterparts across all problem sizes. For instance, for a problem size of 5,000, the solving time for HQA is $0.0854\\mathrm{s}$ and for QA-QBSolv is $74.59\\mathrm{s}$ , compared to $167.4\\mathrm{s}$ and $195.1\\mathrm{s}$ for SA-QBSolv and PT-ICM-QBSolv, respectively, highlighting superior efficiency of the quantum solvers.", + "bbox": [ + 114, + 655, + 883, + 863 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 869, + 936, + 880, + 950 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To further evaluate scalability, we conduct a systematic benchmarking study on QUBO problems (size: up to 10,000 variables), designed to mimic real-world scenarios through randomly generated elements. PT-ICM is excluded from this analysis due to excessive solving times compared to other solvers (Fig. 1b). As shown in Fig. 2, classical solvers (IP, SA, SD, and TS) are accurate for smaller problems but become inaccurate as the problem size increases. Consistent with the results in Fig. 1, the SD and TS solvers exhibit low relative accuracy even for a relatively small problem (e.g., 2,000). IP and SA are more accurate than SD and TS but fail to identify the optimal state for large problems. It is known that IP can provide global optimality guarantees $^{40}$ , but our study highlights that proving a solution is globally optimal is challenging for large and dense problems. For example, in one case ( $n = 7,000$ ), the optimality gap remains as large as $\\sim 17.73\\%$ , where the best bound is -19,660 while the solution obtained from the IP solver is -16,700, with the optimality gap not narrowing even after 2 hours of runtime. The relative accuracy can be improved by employing the QUBO decomposition strategy (e.g., SA-QBSolv), yet it still fails to identify high-quality solutions for problem sizes exceeding 4,000. In contrast, quantum solvers demonstrate superior accuracy for large-scale problems. Notably, the HQA solver consistently outperforms all other methods, reliably identifying the best solution regardless of problem size (Fig. 2).", + "bbox": [ + 116, + 89, + 885, + 369 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fig. 3a shows that the solving time rapidly increases as the problem size increases for the classical solvers, indicating that solving combinatorial optimization problems with classical solvers can become intractable for large-size problems (Fig. 3b). The solving time trends with increasing problem size agree well with the theoretical time complexities of the classical solvers (Fig. 3b and Fig. S3, see 2-4-2. Computational Time section). While the IP solver can be faster than other classical solvers, it also requires significant time for large problems (e.g., $n > 5,000$ ). The use of the QUBO decomposition strategy dramatically reduces the solving time, but the quantum solvers consistently outpace classical counterparts (Fig. 3a). For example, the solving time ( $n = 10,000$ ) is $0.0855$ s for HQA, $101$ s for QA-QBSolv, and $561$ s for SA-QBSolv.", + "bbox": [ + 114, + 385, + 885, + 542 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Decomposing a large QUBO into smaller pieces leads to a higher relative accuracy, as a solver can find better solutions for each decomposed QUBOs, mitigating the current hardware limitations. Note that the accuracy of QA for QUBOs with problem sizes of 30 and 100 is, respectively, 1.0 and 0.9956 (without leveraging the QUBO decomposition method). Hence, the accuracy of QA-QBSolv with a sub-QUBO size of 30 is higher than that with a sub-QUBO size of 100, as decomposed QUBOs with a smaller size fit the QA hardware better (Fig. 4a). However, a smaller sub-QUBO size results in a greater number of sub-QUBOs after decomposition, leading to increased time required to solve all decomposed problems (Fig. 4b). It is noted that the QA-QBSolv solver does not guarantee finding the best solution for large problems (size $>4,000$ ), resulting in lower accuracies regardless of sub-QUBO sizes, as can be seen in Fig. 2 and Fig. 4a.", + "bbox": [ + 116, + 559, + 885, + 734 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our results show that HQA, which incorporates QA with classical algorithms to overcome the current quantum hardware limitations, is currently the most efficient solver for complex real-world problems that require the formulation of dense and large QUBOs. In this context, we define \"Quantum Advantage\" as the ability of a quantum-enhanced solver to achieve high accuracy and significantly faster problem-solving time compared to the classical solvers for large-scale optimization problems. Our findings suggest that leveraging quantum resources, particularly in hybrid configurations, can provide a computational advantage over classical approaches. Besides, as the current state of HQA demonstrates, we expect QA will have much higher accuracy and", + "bbox": [ + 116, + 751, + 885, + 891 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 867, + 936, + 880, + 950 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "require much shorter time to solve QUBO problems with the development of the quantum hardware with more qubits and better qubit connectivity.", + "bbox": [ + 109, + 90, + 885, + 126 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Discussion", + "text_level": 1, + "bbox": [ + 112, + 162, + 235, + 181 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This work comprehensively compares state-of-the-art QA hardware and software against several classical optimization solvers for large and dense QUBO problems (up to 10,000 variables, fully connected interactions). The classical solvers struggled to solve large-scale problems, but their performance can be improved when combined with the QUBO decomposition method (i.e., QBSolv). Nevertheless, they become inaccurate and inefficient with increasing problem size, indicating that classical methods can face challenges for complex real-world problems represented by large and dense QUBO matrices. On the contrary, HQA performs significantly better than its classical counterparts, exhibiting the highest accuracy ( $\\sim 0.013\\%$ improvement) and shortest time to obtain solutions ( $\\sim 6,561 \\times$ acceleration) for 10,000 dimensional QUBO problems, demonstrating 'Quantum Advantage' for large and dense QUBO problems. Pure QA and QA with the QUBO decomposition method still exhibit limitations in solving large problems due to the current QA hardware limitations (e.g., number of qubits and qubit connectivity). However, we anticipate that QA will eventually reach the efficiency of HQA with the ongoing development of the quantum hardware. Thus, we expect QA to demonstrate true 'Quantum Advantage' in the future.", + "bbox": [ + 109, + 183, + 887, + 429 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Methods", + "text_level": 1, + "bbox": [ + 112, + 463, + 217, + 483 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Definition of a QUBO", + "text_level": 1, + "bbox": [ + 112, + 484, + 302, + 502 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "QA hardware is designed to efficiently solve combinatorial optimization problems that are formulated with a QUBO matrix, which can be given by $^{28,29}$ :", + "bbox": [ + 109, + 503, + 883, + 537 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ny = \\sum_ {i = 1} ^ {n} \\sum_ {j = i} ^ {n} Q _ {i, j} x _ {i} x _ {j} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 553, + 864, + 608 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $Q_{i,j}$ is the $i$ -th row and $j$ -th column real-number element of the QUBO matrix $(\\mathbf{Q})$ , which is an $n \\times n$ Hermitian, i.e., $\\mathbf{Q} \\in \\mathbb{R}^{n \\times n}$ , and $x_i$ is the $i$ -th element of a binary vector $\\mathbf{x}$ with a length of $n$ , i.e., $\\mathbf{x} \\in [0,1^n]$ . $Q_{i,j}$ is often referred to as a linear coefficient for $i = j$ and a quadratic interaction coefficient for $i \\neq j$ . The objective of QA is to identify the optimal binary vector of a given QUBO, which minimizes the scalar output $y$ as29:", + "bbox": [ + 109, + 625, + 888, + 717 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {x} ^ {*} = \\underset {x} {\\operatorname {a r g m i n}} y \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 418, + 734, + 864, + 762 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In optimization problems, the linear coefficients correspond to cost or benefit terms associated with individual variables, while the quadratic coefficients represent interaction terms or dependencies between pairs of variables. These coefficients can be learned using machine learning models, such as the factorization machine (FM), trained on datasets containing input structures and their corresponding performance metrics. By mapping these learned coefficients into a QUBO formulation, we effectively represent an energy function of a material system or other real-world", + "bbox": [ + 109, + 777, + 887, + 883 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 867, + 936, + 883, + 950 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "optimization problem. This QUBO then describes the optimization space, enabling the identification of the optimal state with the best performance $^{30,31}$ .", + "bbox": [ + 109, + 90, + 885, + 126 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Methods to Solve a QUBO", + "text_level": 1, + "bbox": [ + 112, + 142, + 339, + 159 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Various methods have been proposed to solve QUBO problems. For our benchmarking study, we consider seven representative methods: QA, hybrid QA (HQA), integer programming (IP), simulated annealing (SA), steepest descent (SD), tabu search (TS), parallel tempering with isoenergetic cluster moves (PT-ICM). Below, we provide a brief introduction to each of the solvers used in solving combinatorial optimization problems:", + "bbox": [ + 109, + 160, + 887, + 247 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quantum Annealing and Hybrid Quantum Annealing", + "text_level": 1, + "bbox": [ + 109, + 265, + 535, + 282 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "QA starts with a superposition state for all qubits, which has the lowest energy state of the initial Hamiltonian $(H_0)$ . In the annealing process, the system evolves toward the lowest energy state of the final Hamiltonian (also called a problem Hamiltonian, $H_{p}$ ) by minimizing the influence of the initial Hamiltonian. The measured state at the end of the annealing is supposed to be the ground state of $H_{p}$ , which can be expressed as the following equation32,33:", + "bbox": [ + 109, + 282, + 888, + 375 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nH (t / t _ {a}) = A (t / t _ {a}) H _ {0} + B (t / t _ {a}) H _ {p} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 391, + 864, + 411 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Here, $t$ is the elapsed annealing time, and $t_a$ is the total annealing time. Equation (3) evolves from $A(t / t_a) = 1$ , $B(t / t_a) \\approx 0$ at the beginning of the annealing $(t / t_a = 0)$ to $A(t / t_a) \\approx 0$ , $B(t / t_a) = 1$ at the end of the annealing $(t / t_a = 1)$ . Sufficiently slow evolution from $H_0$ to $H_p$ enables the quantum system to stay at the ground state, which leads to the identification of the optimal solution of a given combinatorial optimization problem3,34. We use D-Wave Systems' quantum annealer (Advantage 4.1) to solve the problems using QA, and we set the number of reads for QA to 1,000 with a total annealing time of $20~\\mu s$ . We select the best solution corresponding to the lowest energy state found among 1,000 reads.", + "bbox": [ + 109, + 428, + 887, + 571 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The D-Wave Ocean software development kit (SDK, ver. 3.3.0) provides many useful libraries, which include quantum or classical samplers such as the QA, HQA, SA, SD, and TS. They allow us to solve QUBO problems $^{22,35,36}$ . We employ these samplers, which are implemented in the D-wave Ocean SDK, for the benchmarking study. Classical or QA solvers often benefit from decomposition algorithms to identify a high-quality solution (i.e., an optimal solution or a good solution close to the global optimum) for large QUBO problems. Hence, the decomposition of a QUBO matrix into sub-QUBOs is very useful when the size of QUBO matrix is larger than the physical volume of a sampler (i.e., QUBO size > physical number of qubits in QA or memory capacity of a classical computer). We employ the QBSolv package implemented in D-wave Ocean SDK for QUBO decomposition. The QBSolv splits a QUBO matrix into smaller QUBO matrices, and each of them is sequentially solved by classical or QA solvers. This algorithm enables us to handle a wide range of complex real-world problems $^{21,22,37}$ . The size of the decomposed QUBOs is set to 30 unless otherwise specified. HQA (Leap Hybrid solver), developed by D-Wave systems, also decomposes large QUBO into smaller subproblems well-suited for QA's QPU, and then aggregates the results $^{27,38}$ . The detailed algorithm of HQA, however, is not publicly released. We utilize a D-Wave sampler (dwave-system 1.4.0) for SA, SD, and TS with a specified number of reads (1,000) and default settings for other parameters. Furthermore, we employ D-Wave hybrid framework for PT-ICM.", + "bbox": [ + 109, + 588, + 887, + 904 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 867, + 936, + 883, + 950 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Integer Programming", + "text_level": 1, + "bbox": [ + 112, + 108, + 290, + 125 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "IP uses branch-and-bound, cutting planes, and other methods to search the solution space for optimal integer decisions and prove global optimality within a tolerance (gap). We use Gurobi (version 10.0.2)39 for benchmarking with the default settings (0.1% global optimality gap) plus a two-hour time limit and 240 GB software memory limit per optimization problem. The benchmark QUBO problem is implemented in the Pyomo modeling environment (version 6.6.2)40. We also experimented with a large gap and observed the first identified integer solution often had a poor objective function value. These results are not further reported for brevity.", + "bbox": [ + 109, + 126, + 883, + 247 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Simulated Annealing", + "text_level": 1, + "bbox": [ + 112, + 265, + 282, + 282 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "SA, which is inspired by the annealing process in metallurgy, is a probabilistic optimization algorithm designed to approximate a global optimum of a given objective function. It is considered a metaheuristic method, which can be applied to a wide range of optimization problems $^{41,42}$ . In SA, temperature and cooling schedule are major factors that determine how extensively the algorithm explores the solution space $^{43}$ . This algorithm often identifies near-optimal solutions but cannot guarantee that local or global optimality conditions are satisfied. For SA, the hyperparameters are configured as follows: 1,000 reads, 1,000 sweeps, a 'random' initial state generation, and a 'geometric' temperature schedule.", + "bbox": [ + 109, + 282, + 883, + 422 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Steepest Descent", + "text_level": 1, + "bbox": [ + 112, + 439, + 251, + 455 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "SD operates by employing variable flips to reduce the energy of a given QUBO through local minimization computations rather than relying on a calculated gradient in a traditional gradient descent algorithm $^{44}$ . This algorithm is computationally inexpensive and beneficial for local refinement; thus, it can be used to search for local optima. In our benchmarking study, SD utilizes hyperparameters set to 1,000 reads and a 'random' strategy for initial state generation.", + "bbox": [ + 109, + 455, + 883, + 544 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Tabu Search", + "text_level": 1, + "bbox": [ + 112, + 560, + 218, + 575 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "TS is designed to solve combinatorial and discrete optimization problems by using memory to guide the search for better solutions, as introduced by Glover $^{45}$ . This algorithm can escape already visited local minima by remembering those points (called 'Tabu List' to keep track of moves during the search), aiming to identify high-quality solutions in a large solution space. This algorithm works well for combinatorial optimization problems with small search spaces.", + "bbox": [ + 109, + 578, + 880, + 666 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "However, it can be hard to evaluate neighboring solutions and to maintain and update the Tabu List with increasing problem sizes. The hyperparameter settings for TS are as follows: 1,000 reads, a timeout of $100\\mathrm{ms}$ , and 'random' initial state generation.", + "bbox": [ + 109, + 666, + 864, + 717 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Parallel Tempering with Isoenergetic Cluster Moves (PT-ICM)", + "text_level": 1, + "bbox": [ + 109, + 734, + 614, + 752 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "PT-ICM is an advanced Monte Carlo method designed to navigate optimization space, such as QUBO problems $^{46-48}$ . PT operates by maintaining multiple replicas of the system at different temperatures and allowing exchanges between replicas based on a Metropolis criterion. This approach helps lower-temperature replicas escape local minima with the aid of higher-temperature replicas. ICM identifies clusters of variables that can flip without changing the system's energy $^{46}$ . In this study, the hyperparameters for PT-ICM are set as follows: the number of sweeps is 1,000, the number of replicas is 10, and the number of iterations is 10.", + "bbox": [ + 109, + 752, + 883, + 875 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 867, + 936, + 883, + 950 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Benchmarking Problems", + "text_level": 1, + "bbox": [ + 112, + 90, + 326, + 107 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Real-world problems", + "text_level": 1, + "bbox": [ + 112, + 109, + 284, + 125 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Material optimization is selected to represent real-world problems, with the design of planar multilayers (PMLs) optical film as a testbed for benchmarking. PMLs can be seen in many applications. For example, they have been explored for transparent radiative cooling windows to address global warming by emitting thermal radiation through the atmospheric window ( $8\\mu \\mathrm{m} < \\lambda < 13\\mu \\mathrm{m}$ )4, while transmitting visible photons. PMLs consist of layers with one of four dielectric materials: silicon dioxide, silicon nitride, aluminum oxide, and titanium dioxide. The configuration of these layers can be expressed as a binary vector, where each layer is assigned a two-digit binary label. Optical characteristics and corresponding figure-of-merit (FOM) of the PML can be calculated by solving Maxwell's equations using the transfer matrix method (TMM). To formulate QUBOs, layer configurations (input binary vectors) and their FOMs (outputs) are used to train the FM model. FM learns the linear and quadratic coefficients, effectively modeling the optimization landscape of the material system. QUBO matrices are then generated using these coefficients30,31. PML configurations are randomly generated for training datasets, and their FOMs are calculated using TMM. The resulting QUBO matrices represent real-world materials optimization problems, characterized by highly dense (fully connected) configurations (Fig. S1), which are used for the benchmarking study in Fig. 1.", + "bbox": [ + 109, + 125, + 883, + 405 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Benchmarking problems", + "text_level": 1, + "bbox": [ + 112, + 421, + 310, + 438 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We formulate QUBO matrices with random elements to further systematically explore scalability (Fig. 2 and Fig. 3), following the characteristics of QUBOs from real-world problems, for the benchmarking study as the following:", + "bbox": [ + 109, + 439, + 883, + 491 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Problem size: The problem size, corresponding to the length of a binary vector $(n)$ , varies from 120 to 10,000 (120, 200, 500, 1,000, 1,500, 2,000, 2,500, 3,000, 4,000, 5,000, 6,000, 7,000, 8,000, 9,000 and 10,000).", + "- Distribution of elements: For each problem size, four QUBO matrices with different distributions of elements are studied. These elements are random numbers with a mean value of 0 and standard deviations of 0.001, 0.01, 0.1, or 1. These distributions reflect the variability observed in QUBO coefficients derived from real-world problems (Table S1). A QUBO configured with elements having a large deviation yields a significant variation in the energy landscape, potentially resulting in high energy barriers that must be overcome to find the ground state.", + "- Density of matrices: The density of QUBO matrices reflects the proportion of pairwise interactions among variables relative to the maximum possible interactions. Fully connected QUBOs, such as those derived from real-world problems, represent cases where all variables interact with each other. For example, in layered photonic structures, each layer interacts with every other layer, influencing optical responses, which leads to a fully connected QUBO. In contrast, Max-Cut problems typically result in sparse QUBOs, where only a subset of variables (nodes) interact through edges. The maximum number of interaction coefficients (i.e., the number of edges in Max-Cut problems) is $nC_2$ , where $n$ denotes the problem size. The density of a QUBO can be calculated as:" + ], + "bbox": [ + 109, + 492, + 883, + 808 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\text {d e n s i t y} = \\frac {\\text {n u m b e r o f i n t e r a c t i o n c o e f f i c i e n t s}}{\\text {m a x i m u m n u m b e r o f i n t e r a c t i o n c o e f f i c i e n t s}} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 241, + 824, + 864, + 863 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 867, + 936, + 883, + 950 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For example, a benchmark problem instance (G10) with 800 nodes and 19,176 edges has a density of $6\\%$ , calculated as: density $= 19,176 / 319,600 = 0.06$ . The density of Max-Cut problems can be adjusted by changing the number of edges, with typical instances having densities ranging from $0.02\\%$ to $6\\%$ (Fig. S1, Table S2). In contrast, real-world problems feature fully connected configurations, corresponding to a density of $100\\%$ . QUBOs for this benchmarking study have dense matrices fully filled with real-number elements in the upper triangular part (i.e., fully connected graph nodes, Fig. S2). This configuration aims to approximate real-world optimization problems, which usually requires a dense QUBO matrix4,28.", + "bbox": [ + 109, + 90, + 885, + 233 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Performance Metrics: Relative Accuracy and Computational Time", + "text_level": 1, + "bbox": [ + 109, + 247, + 678, + 263 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Relative Accuracy", + "text_level": 1, + "bbox": [ + 112, + 265, + 261, + 281 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "For small-scale problems, brute-force search guarantees the identification of the global optimum by evaluating all possible solutions. However, this approach becomes infeasible for large-scale problems due to the exponential growth of the search space. The IP solver, such as Gurobi, utilizes the branch-and-bound method to efficiently explore the solution space and prove global optimality within an optimality gap. However, due to computational limitations or time constraints, IP may struggle to find the global optimum for large-scale problems. To address this challenge in our benchmarking study, we employ a 'Relative Accuracy' metric to compare the relative performance of different solvers. Relative accuracy is defined as the ratio of a solver's objective value to the best objective found across all solvers:", + "bbox": [ + 109, + 282, + 887, + 441 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\text {R e l a t i v e A c c u r a c y} = \\text {S o l u t i o n} _ {\\text {s o l v e r}} / \\text {S o l u t i o n} _ {\\text {b e s t}} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 272, + 455, + 864, + 477 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This metric provides a way to evaluate the solution quality when the global optimum cannot be definitively found or proven for large-scale problem instances. Note that the best solution is the lowest value among the solutions obtained from all solvers since the solvers are designed to find the lowest energy state (generally negative values for the QUBOs used in this study). The relative accuracies of the solvers are plotted as a function of problem sizes. In Fig. 1, the relative accuracy represents the average value calculated from three different QUBOs that represent material optimization, and in Fig. 2, it represents the average from four different QUBOs with varying standard deviations for each problem size (ranging from 120 to 10,000). Error bars on the plot represent the standard deviation of accuracies calculated from the four different QUBOs for each problem size, relative to the average values. By definition, the relative accuracy is 1.0 when the solver finds a solution with the best-known objective function value (equation 5).", + "bbox": [ + 109, + 491, + 888, + 686 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Computational Time", + "text_level": 1, + "bbox": [ + 112, + 700, + 279, + 717 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Computational time is another important factor in determining the solvers' performance. Combinatorial optimization problems are considered NP-hard, so increasing problem sizes can lead to an explosion of search space, posing challenges in optimization processes. We measure the computational time dedicated solely to solving given problems, excluding problem reading time, queue time, or communication time between the local computer and quantum annealer. This is consistent with other benchmarking studies[17,18]. For problems solved on D-Wave systems' QPU for QA, the execution time includes programming and sampling times (anneal, readout, and delay time). QPU access time is calculated for all of them after programmed anneal-read cycles, corresponding to the time charged to users in their allocations, which is used as the computational time for QA and HQA. Classical solvers (SA, SD, TS, and PT-ICM) run on a workstation (AMD", + "bbox": [ + 109, + 718, + 885, + 893 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 867, + 936, + 883, + 950 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ryzen Threadripper PRO 3975WX @ 3.5 GHz processor with 32 cores and 32GB of RAM), and IP (Gurobi) run on a cluster node (an Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz processor with 24 cores and 256 GB of RAM). Problem reading time can be significant when the problem size is large, but it is excluded from the computational time consideration. We measure the time solely taken to solve given problems with classical solvers. In Fig. 1b and Fig. 3, the solution time for classical and quantum solvers is presented as a function of problem sizes. Note that a QUBO problem is NP-hard $^{49}$ . Evaluating the energy of a given solution has a computational cost of $O(n^{2})$ , where $n$ (= problem size) is the number of variables. The number of reads or sweeps does not scale with $n$ , but the cost for each sweep scales as $O(n)$ for SA. Consequently, the theoretical time complexities of the classical solvers are known as $O(n^{3})$ for SA $^{50}$ , $O(n^{2})$ for SD $^{51}$ , and $O(n^{2})$ for TS $^{52}$ . On the other hand, the theoretical time complexity of the quantum solvers can be considered constant.", + "bbox": [ + 109, + 90, + 885, + 301 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Data availability", + "text_level": 1, + "bbox": [ + 112, + 338, + 303, + 359 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "All data generated and analyzed during the study are available from the corresponding author upon reasonable request.", + "bbox": [ + 109, + 359, + 883, + 395 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Code availability", + "text_level": 1, + "bbox": [ + 112, + 431, + 308, + 452 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The codes used for generating and analyzing data are available from the corresponding author upon reasonable request.", + "bbox": [ + 109, + 452, + 883, + 488 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 112, + 523, + 334, + 545 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This research used resources of the Oak Ridge Leadership Computing Facility at the Oak Ridge National Laboratory, which is supported by the Office of Science of the U.S. Department of Energy under Contract No. DE-AC05-00OR22725. This research was supported by the Quantum Computing Based on Quantum Advantage Challenge Research (RS-2023-00255442) through the National Research Foundation of Korea (NRF) funded by the Korean Government (Ministry of Science and ICT(MSIT)).", + "bbox": [ + 109, + 545, + 888, + 650 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Author information", + "text_level": 1, + "bbox": [ + 112, + 686, + 339, + 705 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Authors and Affiliations", + "bbox": [ + 112, + 708, + 310, + 723 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Department of Aerospace and Mechanical Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States.", + "bbox": [ + 112, + 724, + 883, + 758 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Seongmin Kim & Tengfei Luo", + "bbox": [ + 112, + 760, + 359, + 777 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Department of Electronic Engineering, Kyung Hee University; Yongin-Si, Gyeonggi-do 17104, Republic of Korea.", + "bbox": [ + 111, + 794, + 883, + 830 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Sangwoo Ahn & Eungkyu Lee", + "bbox": [ + 112, + 830, + 359, + 848 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Department of Chemical and Biomolecular Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States.", + "bbox": [ + 111, + 864, + 883, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 859, + 936, + 883, + 950 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Alexander Dowling", + "text_level": 1, + "bbox": [ + 112, + 90, + 274, + 109 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "National Center for Computational Sciences, Oak Ridge National Laboratory, Oak Ridge, Tennessee 37830, United States.", + "text_level": 1, + "bbox": [ + 114, + 125, + 883, + 159 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Seongmin Kim & In-Saeng Suh", + "bbox": [ + 114, + 161, + 369, + 176 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Contributions", + "text_level": 1, + "bbox": [ + 112, + 195, + 235, + 210 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "S.K., A.D., E.L., and T.L. conceived the idea. S.K. and S.A. performed benchmarking studies to generate data. A.D. and S.K. implemented the IP benchmark. S.K. analyzed the data with advice from I.S., A.D., E.L., and T.L. All authors discussed the results and contributed to the writing of the manuscript.", + "bbox": [ + 112, + 212, + 883, + 282 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Corresponding authors", + "text_level": 1, + "bbox": [ + 112, + 300, + 313, + 316 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Correspondence to Alexander W. Dowling, Eungkyu Lee, or Tengfei Luo.", + "bbox": [ + 112, + 316, + 700, + 334 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Ethics declarations", + "text_level": 1, + "bbox": [ + 112, + 371, + 330, + 390 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Competing Interests", + "text_level": 1, + "bbox": [ + 112, + 393, + 289, + 410 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The authors declare no competing interests.", + "bbox": [ + 112, + 410, + 457, + 426 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Reference", + "text_level": 1, + "bbox": [ + 114, + 463, + 230, + 483 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1 Arute, F. et al. Quantum supremacy using a programmable superconducting processor. Nature 574, 505-510 (2019).", + "2 Daley, A. J. et al. Practical quantum advantage in quantum simulation. Nature 607, 667-676 (2022).", + "3 Johnson, M. W. et al. Quantum annealing with manufactured spins. Nature 473, 194-198 (2011).", + "4 Kim, S. et al. High-Performance Transparent Radiative Cooler Designed by Quantum Computing. ACS Energy Lett 7, 4134-4141 (2022).", + "5 Kim, S., Jung, S., Bobbitt, A., Lee, E. & Luo, T. Wide-angle spectral filter for energy-saving windows designed by quantum annealing-enhanced active learning. Cell Rep Phys Sci (2024).", + "6 Li, R. Y., Di Felice, R., Rohs, R. & Lidar, D. A. Quantum annealing versus classical machine learning applied to a simplified computational biology problem. npj Quantum Inf 4 (2018).", + "7 Vinci, W., Albash, T. & Lidar, D. A. Nested quantum annealing correction. npj Quantum Inf 2 (2016).", + "8 Santoro, G. E. & Tosatti, E. Optimization using quantum mechanics: quantum annealing through adiabatic evolution. J Phys A: Math Gen 39, R393-R431 (2006).", + "9 Mandra, S., Zhu, Z. & Katzgraber, H. G. Exponentially Biased Ground-State Sampling of Quantum Annealing Machines with Transverse-Field Driving Hamiltonians. Phys Rev Lett 118, 070502 (2017).", + "10 Kitai, K. et al. Designing metamaterials with quantum annealing and factorization machines. Phys Rev Res 2, 013319 (2020)." + ], + "bbox": [ + 112, + 484, + 883, + 886 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 857, + 936, + 880, + 950 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "11 Santoro, G. E., Marton a'k, R., Tosatti, E. & Car, R. Theory of Quantum Annealing of an Ising Spin Glass. Science 295, 2427-2430 (2002).", + "12 Hen, I. & Spedalieri, F. M. Quantum Annealing for Constrained Optimization. Phys Rev Appl 5 (2016).", + "13 Kadowaki, T. & Nishimori, H. Quantum annealing in the transverse Ising model. Phys Rev E 58, 5355-5363 (1998).", + "14 Morita, S. & Nishimori, H. Mathematical foundation of quantum annealing J Math Phys 49, 125210 (2008).", + "15 Wilson, B. A. et al. Machine learning framework for quantum sampling of highly constrained, continuous optimization problems. Appl Phys Rev 8, 041418 (2021).", + "16 Kim, S., Wu, S., Jian, R., Xiong, G. & Luo, T. Design of a High-Performance Titanium Nitride Metastructure-Based Solar Absorber Using Quantum Computing-Assisted Optimization. ACS Appl Mater Interfaces 15, 40606-40613 (2023).", + "17 O'Malley, D., Vesselinov, V. V., Alexandrov, B. S. & Alexandrov, L. B. Nonnegative/Binary matrix factorization with a D-Wave quantum annealer. PLoS One 13, e0206653 (2018).", + "18 Tasseff, B. et al. On the Emerging Potential of Quantum Annealing Hardware for Combinatorial Optimization. arXiv:2210.04291 (2022).", + "19 Hab, R., Ohzeki, M. & Tanaka, K. Travel time optimization on multi-AGV routing by reverse annealing. Sci Rep 12, 17753 (2022).", + "20 Kim, S. et al. Quantum annealing-aided design of an ultrathin-metamaterial optical diode. Nano Converg 11, 16 (2024).", + "21 Pelofske, E., Hahn, G. & Djidjev, H. N. Noise dynamics of quantum annealers: estimating the effective noise using idle qubits. Quantum Sci Technol 8 (2023).", + "22 Yoneda, Y., Shimada, M., Yoshida, A. & Shirakashi, J.-i. Searching for optimal experimental parameters with D-Wave quantum annealer for fabrication of Au atomic junctions. Appl Phys Exp 16 (2023).", + "23 Willsch, D. et al. Benchmarking Advantage and D-Wave 2000Q quantum annealers with exact cover problems. Quantum Inf Process 21 (2022).", + "24 Yarkoni, S., Raponi, E., Back, T. & Schmitt, S. Quantum annealing for industry applications: introduction and review. Rep Prog Phys 85 (2022).", + "25 Kasi, S., Warburton, P., Kaewell, J. & Jamieson, K. A Cost and Power Feasibility Analysis of Quantum Annealing for NextG Cellular Wireless Networks. IEEE Transactions on Quantum Engineering 4, 1-17 (2023).", + "26 Teplukhin, A., Kendrick, B. K. & Babikov, D. Solving complex eigenvalue problems on a quantum annealer with applications to quantum scattering resonances. Phys Chem Chem Phys 22, 26136-26144 (2020).", + "27 Atobe, Y., Tawada, M. & Togawa, N. Hybrid Annealing Method Based on subQUBO Model Extraction With Multiple Solution Instances. IEEE Trans Comput 71, 2606-2619 (2022).", + "28 Zaman, M., Tanahashi, K. & Tanaka, S. PyQUBO: Python Library for Mapping Combinatorial Optimization Problems to QUBO Form. IEEE Trans Comput 71, 838-850 (2022).", + "29 Tao, M. et al. in IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW) 557-566 (2020)." + ], + "bbox": [ + 114, + 89, + 883, + 875 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 857, + 936, + 880, + 950 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "30 Kim, S. et al. A review on machine learning-guided design of energy materials. Progress in Energy 6 (2024).", + "31 Kim, S., Luo, T., Lee, E. & Suh, I.-S. Distributed Quantum Approximate Optimization Algorithm on Integrated High-Performance Computing and Quantum Computing Systems for Large-Scale Optimization. arXiv:2407.20212 (2024).", + "32 Gemeinhardt, F., Garmendia, A., Wimmer, M., Weder, B. & Leymann, F. Quantum Combinatorial Optimization in the NISQ Era: A Systematic Mapping Study. ACM Comput Surv 56, 1-36 (2023).", + "33 Willsch, M., Willsch, D., Jin, F., De Raedt, H. & Michielsen, K. Benchmarking the quantum approximate optimization algorithm. Quantum Inf Process 19 (2020).", + "34 Hauke, P., Katzgraber, H. G., Lechner, W., Nishimori, H. & Oliver, W. D. Perspectives of quantum annealing: methods and implementations. Rep Prog Phys 83, 054401 (2020).", + "35 Carugno, C., Ferrari Dacrema, M. & Cremonesi, P. Evaluating the job shop scheduling problem on a D-wave quantum annealer. Sci Rep 12, 6539 (2022).", + "36 Irie, H., Liang, H., Doi, T., Gongyo, S. & Hatsuda, T. Hybrid quantum annealing via molecular dynamics. Sci Rep 11, 8426 (2021).", + "37 Raymond, J. et al. Hybrid Quantum Annealing for Larger-than-QPU Lattice-structured Problems. ACM Transactions on Quantum Computing 4, 1-30 (2023).", + "38 Ceselli, A. & Premoli, M. On good encodings for quantum annealer and digital optimization solvers. Sci Rep 13, 5628 (2023).", + "39 Song, J., Lanka, R., Yue, Y. & Dilkina, B. A General Large Neighborhood Search Framework for Solving Integer Linear Programs. 34th Conference on Neural Information Processing Systems (NeurIPS 2020) (2020).", + "40 Bynum, M. L. et al. Pyomo — Optimization Modeling in Python, 3rd edition. Springer Optimization and Its Applications 67 (2021).", + "41 Alnowibet, K. A., Mahdi, S., El-Alem, M., Abdelawwad, M. & Mohamed, A. W. Guided Hybrid Modified Simulated Annealing Algorithm for Solving Constrained Global Optimization Problems. Mathematics 10 (2022).", + "42 Rere, L. M. R., Fanany, M. I. & Arymurthy, A. M. Simulated Annealing Algorithm for Deep Learning. Procedia Comput Sci 72, 137-144 (2015).", + "43 Gonzales, G. V. et al. A comparison of simulated annealing schedules for constructable design of complex cavities intruded into conductive walls with internal heat generation. Energy 93, 372-382 (2015).", + "44 Wadayama, T. et al. Gradient descent bit flipping algorithms for decoding LDPC codes. IEEE Trans Communi 58, 1610-1614 (2010).", + "45 Glover, F., Laguna, M. & Marti', R. Principles of Tabu Search. Handbook of Approximation Algorithms and Metaheuristics 23 (2007).", + "46 Aramon, M. et al. Physics-Inspired Optimization for Quadratic Unconstrained Problems Using a Digital Annealer. Frontiers in Physics 7 (2019).", + "47 Zhu, Z., Ochoa, A. J. & Katzgraber, H. G. Fair sampling of ground-state configurations of binary optimization problems. arXiv:1903.07600 (2019).", + "48 Mandrà, S. & Katzgraber, H. G. A deceptive step towards quantum speedup detection. Quantum Science and Technology 3 (2018).", + "49 Yasuoka, H. Computational Complexity of Quadratic Unconstrained Binary Optimization. arXiv:2109.10048 (2022)." + ], + "bbox": [ + 109, + 89, + 883, + 875 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 857, + 936, + 880, + 950 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "50 Hansen, P. B. Simulated Annealing. Electrical Engineering and Computer Science Technical Reports 170 (1992).", + "51 Dupin, N., Nielsen, F. & Talbi, E. Dynamic Programming heuristic for k-means Clustering among a 2-dimensional Pareto Frontier. 7th Internat. Conf. on Metaheuristics and Nature Inspired Computing (2018).", + "52 Sakabe, M. & Yagiura, M. An efficient tabu search algorithm for the linear ordering problem. J Adv Mech Des Syst Manuf 16, JAMDSM0041-JAMDSM0041 (2022).", + "53 Delgado, A. & Thaler, J. Quantum annealing for jet clustering with thrust. Phys Rev D 106 (2022).", + "54 Mao, Z., Matsuda, Y., Tamura, R. & Tsuda, K. Chemical design with GPU-based Ising machines. Digit Discov 2, 1098-1103 (2023)." + ], + "bbox": [ + 112, + 89, + 885, + 282 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 857, + 936, + 880, + 950 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/a8cb85a9316795dc6f82454dfcbe57b04e7e91a649d20ea1acb66f8e645c0d90.jpg", + "image_caption": [ + "Figures" + ], + "image_footnote": [], + "bbox": [ + 129, + 142, + 867, + 422 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/691843c0ba4b3d84d880dd78aac48d6de4dedbd2f73f05e2d97a542a22d9e6ad.jpg", + "image_caption": [ + "Fig. 1. Performance analysis of classical (IP, SA, SD, TS, PT-ICM, SA-QBSolv, and PT-ICM-QBSolv) and quantum (QA-QBSolv, and HQA) solvers on QUBO problems representing real-world optimization tasks in material science. (a) Relative accuracy and (b) solving time of the solvers." + ], + "image_footnote": [], + "bbox": [ + 130, + 434, + 864, + 712 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 859, + 936, + 882, + 950 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/3d096df4f9d88e5730c1ff85f4fc8195cc0ced5d1e2af08b268e6fae176f0453.jpg", + "image_caption": [ + "Fig. 2. The relative accuracy of the classical (IP, SA, SD, TS, and SA-QBSolv) and quantum (QA-QBSolv, and HQA) solvers for given QUBO problems. HQA is the best solver for finding the highest-quality solution for all problem sizes." + ], + "image_footnote": [], + "bbox": [ + 117, + 90, + 883, + 385 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/b2225fd80582c1a6d58446009e52cb7580120fa78d8fb8c21a6a179b22e90082.jpg", + "image_caption": [ + "Fig. 3. Solving time of the solvers for given QUBO problems. The solving time of (a) the classical and quantum solvers and (b) the classical solvers (SA, SD, and TS) for small QUBO problems. Quantum solvers do not scale in solving time as the problem size increases, which is a great advantage over classical counterparts." + ], + "image_footnote": [], + "bbox": [ + 124, + 510, + 500, + 714 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/2fba1a69a62f326d6496fa444805cf7cb9d4f83d11029d8a05f043c24cc1c55c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 508, + 870, + 714 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 859, + 936, + 883, + 950 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/e8de5aea7294c5fa21c710d033ec625ad4b86fcd3d99b909d3310eb92ec79d99.jpg", + "image_caption": [ + "Fig. 4. Performance of the QA-QBSolv solver with different decomposition sizes. (a) Relative accuracy and (b) Solving time of the QA-QBSolv solver for given QUBO problems with different sub-QUBO sizes." + ], + "image_footnote": [], + "bbox": [ + 125, + 108, + 504, + 305 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/0dc799fad8a578e4fc936a6f60b7cd51a35fb007df006f3fbbccca020db0e587.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 106, + 880, + 305 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 859, + 936, + 880, + 950 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/590134f4c031fae88771d2e5aa825341c5ba16298aab8804b412e18b733c476d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 154, + 357, + 294 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/ca3232bf1275cd24819fdc30b46c463658d46f4f071687fa7d455c1996177a86.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 370, + 152, + 614, + 294 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/fed61778896eedc872a2ee953f0454d1c1d1df2e5d2dd11118b668eedd8f7389.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 617, + 155, + 852, + 294 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/6deccb0dcac8e41c85f23661aab5cb85956088933112941c178b3170cb344792.jpg", + "image_caption": [ + "Fig. S1. Comparison of QUBO matrices for real-world optimization and Max-Cut problems. (a-c) QUBO matrices representing the optimization of planar multilayered structures (PMLs) with problem sizes of (a) 100, (b) 500, and (c) 3,000. The dense configurations of these matrices reflect the fully connected nature of interactions in material optimization problems. (d-f) QUBO matrices derived from Max-Cut problem instances in the G-set $^{S1}$ : (d) G5, (e) G15, and (f) G40. These matrices exhibit sparse configurations, with relatively few pairwise interactions compared to their maximum possible connections." + ], + "image_footnote": [], + "bbox": [ + 125, + 296, + 370, + 436 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/1417de3df697e4e053ffea4df6d77a5ebdaefb50b3187f1972f3050156c75f08.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 296, + 614, + 436 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/2a620e676a2beae0c82e88cad52c70b73e03b2af6941c4c6ef5de056c159c0c8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 620, + 296, + 861, + 436 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/1d459dddad5c45442c94fd4248e3d25df27831ab5659e8ee16015f75ac1fbd30.jpg", + "image_caption": [ + "Fig. S2. Example QUBO matrices. The size of the given QUBO problems is (a) 120 and (b) 1,000 with a standard deviation of 0.1." + ], + "image_footnote": [], + "bbox": [ + 127, + 626, + 491, + 835 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/31ba301c7d69a67dd38eb2e4932a89306684577806e4211b5dd4f78757c26c08.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 625, + 883, + 835 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Supplementary Information", + "bbox": [ + 112, + 90, + 354, + 108 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/bd938d5eeace304163712a0d6f853d54e179edfffc5c73dc6712007366e9be79.jpg", + "image_caption": [ + "Fig. S3. Time complexity of simulated annealing (SA), steepest descent (SD), and tabu search (TS). This plot is from calculation results based on the theoretical time complexity (see 2-4-2. Computational Time in the main text), so it does not have metrics. The plot agrees well with the solving time plot depicted in Fig. 2b." + ], + "image_footnote": [], + "bbox": [ + 331, + 132, + 692, + 347 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/9537d088a04967bfad78eb524d6fa7fc1eac919745a49fe9f57def7534fd8be3.jpg", + "table_caption": [ + "Table S1. Statistical properties of QUBO coefficients for real-world optimization problems. The table summarizes the average (avg) and standard deviation (std) of QUBO coefficients across different problem sizes $(n)$ . The average values of the coefficients are close to zero, and the standard deviation ranges from 0.2 to 2." + ], + "table_footnote": [], + "table_body": "
n5010020050010003000500010000
avg0.0025-0.00140.0003-0.00040.00010.00160.00120.0008
std0.24910.74400.80831.33191.50901.95192.03722.0706
", + "bbox": [ + 114, + 191, + 890, + 262 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/7e2d928dd61b4f39f2ede4b6fad96500fe72db9030197351e71c02b3d19d4b8d.jpg", + "table_caption": [ + "Table S2. Density of Max-Cut problem instances. These instances feature sparse QUBO matrices with a density lower than $6\\%$ ." + ], + "table_footnote": [], + "table_body": "
Instances# Nodes# Edges# Maximum EdgesDensity (%)
G580019,176319,6006.0000
G1080019,176319,6006.0000
G158004,661319,6001.4583
G208004,672319,6001.4618
G302,00019,9001,999,0000.9954
G402,00011,7661,999,0000.5885
G503,0006,0004,498,5000.1333
G555,00012,49812,497,5000.1000
G607,00017,14824,496,5000.0700
G7010,0009,99949,995,0000.0200
", + "bbox": [ + 129, + 397, + 869, + 652 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "References", + "bbox": [ + 116, + 90, + 205, + 107 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "S1 Ye, Y. [online] Available: https://web.stanford.edu/~yyyve/yyye/Gset/.", + "bbox": [ + 114, + 125, + 725, + 143 + ], + "page_idx": 20 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06201/43b7dceb-7067-4bc0-81f9-4f968ea096bb_model.json b/data/2025/2504_06xxx/2504.06201/43b7dceb-7067-4bc0-81f9-4f968ea096bb_model.json new file mode 100644 index 0000000000000000000000000000000000000000..44905c2d8948b281d6805df698172caae54abf5a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/43b7dceb-7067-4bc0-81f9-4f968ea096bb_model.json @@ -0,0 +1,2266 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.184, + 0.09, + 0.816, + 0.138 + ], + "angle": 0, + "content": "Quantum Annealing for Combinatorial Optimization: A Benchmarking Study" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.153, + 0.847, + 0.19 + ], + "angle": 0, + "content": "Authors: Seongmin Kim\\(^{1,4}\\), Sang-Woo Ahn\\(^{2}\\), In-Saeng Suh\\(^{4}\\), Alexander W. Dowling\\(^{3,*}\\), Eungkyu Lee\\(^{2,*}\\), and Tengfei Luo\\(^{1,*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.206, + 0.88, + 0.24 + ], + "angle": 0, + "content": "\\(^{1}\\)Department of Aerospace and Mechanical Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.242, + 0.88, + 0.274 + ], + "angle": 0, + "content": "\\(^{2}\\)Department of Electronic Engineering, Kyung Hee University; Yongin-Si, Gyeonggi-do 17104, Republic of Korea." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.276, + 0.88, + 0.308 + ], + "angle": 0, + "content": "\\(^{3}\\)Department of Chemical and Biomolecular Engineering, University of Notre Dame; Notre Dame, Indiana 46556. United States." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.311, + 0.88, + 0.342 + ], + "angle": 0, + "content": "\\(^{4}\\)National Center for Computational Sciences, Oak Ridge National Laboratory, Oak Ridge, Tennessee 37830, United States." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.346, + 0.798, + 0.362 + ], + "angle": 0, + "content": "*Corresponding author. Email: adowling@nd.edu, eleest@khu.ac.kr, and tluo@nd.edu" + }, + { + "type": "list", + "bbox": [ + 0.112, + 0.206, + 0.88, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.398, + 0.885, + 0.643 + ], + "angle": 0, + "content": "Quantum annealing (QA) has the potential to significantly improve solution quality and reduce time complexity in solving combinatorial optimization problems compared to classical optimization methods. However, due to the limited number of qubits and their connectivity, the QA hardware did not show such an advantage over classical methods in past benchmarking studies. Recent advancements in QA with more than 5,000 qubits, enhanced qubit connectivity, and the hybrid architecture promise to realize the quantum advantage. Here, we use a quantum annealer with state-of-the-art techniques and benchmark its performance against classical solvers. To compare their performance, we solve over 50 optimization problem instances represented by large and dense Hamiltonian matrices using quantum and classical solvers. The results demonstrate that a state-of-the-art quantum solver has higher accuracy (\\(\\sim 0.013\\%\\)) and a significantly faster problem-solving time (\\(\\sim 6,561\\times\\)) than the best classical solver. Our results highlight the advantages of leveraging QA over classical counterparts, particularly in hybrid configurations, for achieving high accuracy and substantially reduced problem solving time in large-scale real-world optimization problems." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.66, + 0.882, + 0.695 + ], + "angle": 0, + "content": "Keywords: quantum advantage, quantum-classical hybrid algorithm, quantum annealing, combinatorial optimization, benchmarking study" + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.731, + 0.26, + 0.751 + ], + "angle": 0, + "content": "Introduction" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.753, + 0.885, + 0.892 + ], + "angle": 0, + "content": "Quantum computers mark a paradigm shift to tackle challenging tasks that classical computers cannot solve in a practical timescale\\(^{1,2}\\). The quantum annealer is a special quantum computer designed to solve combinatorial optimization problems with problem size-independent time complexity\\(^{3-5}\\). This unique quantum annealing (QA) capability is based on the so-called adiabatic process\\(^{6,7}\\). During this process, entangled qubits naturally evolve into the ground state of a given Hamiltonian to find the optimal vector of binary decisions for the corresponding quadratic unconstrained binary optimization (QUBO) problem\\(^{8-10}\\). The adiabatic theorem of quantum mechanics ensures that QA identifies the optimal solution regardless of the size and landscape of" + }, + { + "type": "page_number", + "bbox": [ + 0.871, + 0.938, + 0.882, + 0.951 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.117, + 0.09, + 0.887, + 0.144 + ], + "angle": 0, + "content": "the combinatorial parametric space, highlighting QA as a powerful and practical solver\\(^{11-14}\\). The ability to efficiently explore high-dimensional combinational spaces makes QA capable of handling a wide range of optimization tasks\\(^{4,5,10,15,16}\\)." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.161, + 0.887, + 0.492 + ], + "angle": 0, + "content": "The potential merit of QA motivates the systematic comparison with classical counterparts (e.g., simulated annealing, integer programming, steepest descent method, tabu search, and parallel tempering with isoenergetic cluster moves), focusing on the solution quality and the time complexity. While previous benchmarking studies showed some advantages of QA, most used low-dimensional or the sparse configuration of QUBO matrices due to the lack of available qubits in the QA hardware and poor topology to connect qubits\\(^{17-19}\\). For example, O'Malley et al.\\(^{17}\\) compared the performance of QA with classical methods (mathematical programming), but they limited the number of binary variables to 35 due to the QA hardware limitation. Similarly, Tasseff et al.\\(^{18}\\) highlighted the potential advantages of QA compared to classical methods (such as simulated annealing, integer programming, and Markov chain Monte Carlo) for sparse optimization problems containing up to 5,000 decision variables and 40,000 quadratic terms. Haba et al.\\(^{19}\\) demonstrated that a classical solver (integer programming) could be faster than QA for small problems, e.g., \\(\\sim 100\\) decision variables. Consequently, these benchmarking studies show that QA methods and their classical counterparts can exhibit similar solution quality and time complexity. However, such low-dimensional or sparse QUBOs considered in the previous benchmarking studies are challenging to map to a wide range of practical problems, which usually require high-dimensional and dense configuration of QUBO matrices\\(^{4,5,10,20}\\). For example, in our previous QA optimization of one-dimensional and two-dimensional optical metamaterials, the QUBO matrices exhibit these properties (Fig. S1)\\(^{4,5,16,20}\\)." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.508, + 0.887, + 0.753 + ], + "angle": 0, + "content": "The state-of-the-art QA hardware (D-Wave Advantage System) features more than 5,000 qubits, advanced topology to connect qubits, and efficient hybrid algorithms (e.g., Leap Hybrid sampler). For example, the recent development (e.g., Pegasus topology) has increased qubit connectivity from 6 to \\(15^{21-23}\\). Improved qubit connectivity reduces the need for complex embedding processes, which map problem variables to physical qubits on the hardware. With better connectivity, such as in D-Wave's Pegasus topology, the embedding process becomes more efficient and can better preserve the structure of dense optimization problems. This enhancement allows the quantum annealer to increase the potential for finding high-quality solutions[24,25]. In addition, a QUBO decomposition algorithm (i.e., QBSolv) splits a large QUBO matrix into small pieces of subQUBO matrices, allowing us to handle a QUBO matrix with dimensions higher than the maximum number of qubits in the QA hardware[26,27]. Given these advancements, it is imperative to study the performance of the state-of-the-art QA system for high-dimensional and dense configuration of QUBO matrices, and systemically compare solution quality and the time complexity with the classical counterparts." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.769, + 0.887, + 0.858 + ], + "angle": 0, + "content": "In this work, we benchmark the performance of quantum solvers against classical algorithms in solving QUBO problems with large and dense configurations to represent real-world optimization problems. We analyze the solution quality and the required time to solve these benchmark problems using several quantum and classical solvers. This benchmarking study provides important insights into employing QA in practical problem-solving scenarios." + }, + { + "type": "page_number", + "bbox": [ + 0.869, + 0.938, + 0.882, + 0.952 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.117, + 0.093, + 0.2, + 0.112 + ], + "angle": 0, + "content": "Results" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.114, + 0.884, + 0.376 + ], + "angle": 0, + "content": "We present a benchmarking study on combinatorial optimization problems representing real-world scenarios, e.g., materials design, characterized by dense and large QUBO matrices (Fig. S1). These problems are non-convex and exhibit a highly complex energy landscape, making it challenging and time-consuming to identify accurate solutions. Classical solvers, such as integer programming (IP), simulated annealing (SA), steepest descent (SD), tabu search (TS), parallel tempering with isoenergetic cluster moves (PT-ICM), perform well for small-scale problems. However, they are often relatively inaccurate for larger problems (problem size \\(\\geq 1,000\\); Fig. 1a). In particular, SD and TS show low relative accuracy compared to other solvers. The combination of PT and ICM leverages the strengths of both techniques: PT facilitates crossing energy barriers, while ICM ensures exploration of the solution space, effectively covering broad and diverse regions. This makes PT-ICM particularly effective for exploring complex optimization spaces and enhancing convergence toward the global optimum[46,47]. However, the performance of PT-ICM can be problem-dependent[48]. While it can work well for sparse problems, its effectiveness decreases for denser problems[46]. Consequently, although SA, and PT-ICM perform better than SD and TS, they also fail to find high-quality solutions for large-scale problems." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.394, + 0.884, + 0.638 + ], + "angle": 0, + "content": "To address these limitations, QUBO decomposition strategies can be employed to improve the relative accuracy. For example, integrating QUBO decomposition with classical solvers (e.g., SA-QBSolv and PT-ICM-QBSolv) improves their performance. Nonetheless, these approaches often remain insufficient for handling massive problems effectively, particularly considering problem-solving time (Fig. 1b), which will be further discussed in the following. On the other hand, quantum solvers provide excellent performance for solving these dense and large-scale problems representing real-world optimization scenarios. Although QA can perform excellently for small problems, it has difficulty solving large and dense QUBOs due to the limited number of qubits \\((5,000+)\\) and connectivity (15). Several prior studies reported that QA may not be efficient since it cannot effectively handle dense and large QUBOs due to hardware limitations[23,53,54]. However, when it runs with the QUBO decomposition strategy (i.e., QA-QBSolv), large-scale problems (\\(n \\geq 100\\)) can be effectively handled. Furthermore, hybrid QA (HQA), which integrates quantum and classical approaches, also can solve large-scale problems efficiently. As a result, the quantum solvers consistently identify high-quality solutions across all problem sizes (Fig. 1a)." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.656, + 0.884, + 0.864 + ], + "angle": 0, + "content": "Computational time is also a critical metric for evaluating solver performance. Classical solvers exhibit rapidly increasing solving times as problem sizes grow, making them impractical for large-scale combinatorial optimization problems (Fig. 1b). While SD and TS are faster than other classical solvers, their relative accuracies are low, as can be seen in Fig. 1a. It is worth noting that the SA, and PT-ICM solvers struggle to handle problems with more than 3,000 variables due to excessively long solving time or computational constraints (e.g., memory limits). Although the IP solver is faster than SA and PT-ICM, its solving time increases greatly with problem size. The QUBO decomposition strategy significantly reduces computational time, yet quantum solvers remain faster than their classical counterparts across all problem sizes. For instance, for a problem size of 5,000, the solving time for HQA is \\(0.0854\\mathrm{s}\\) and for QA-QBSolv is \\(74.59\\mathrm{s}\\), compared to \\(167.4\\mathrm{s}\\) and \\(195.1\\mathrm{s}\\) for SA-QBSolv and PT-ICM-QBSolv, respectively, highlighting superior efficiency of the quantum solvers." + }, + { + "type": "page_number", + "bbox": [ + 0.87, + 0.938, + 0.882, + 0.951 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.117, + 0.09, + 0.887, + 0.37 + ], + "angle": 0, + "content": "To further evaluate scalability, we conduct a systematic benchmarking study on QUBO problems (size: up to 10,000 variables), designed to mimic real-world scenarios through randomly generated elements. PT-ICM is excluded from this analysis due to excessive solving times compared to other solvers (Fig. 1b). As shown in Fig. 2, classical solvers (IP, SA, SD, and TS) are accurate for smaller problems but become inaccurate as the problem size increases. Consistent with the results in Fig. 1, the SD and TS solvers exhibit low relative accuracy even for a relatively small problem (e.g., 2,000). IP and SA are more accurate than SD and TS but fail to identify the optimal state for large problems. It is known that IP can provide global optimality guarantees\\(^{40}\\), but our study highlights that proving a solution is globally optimal is challenging for large and dense problems. For example, in one case (\\(n = 7,000\\)), the optimality gap remains as large as \\(\\sim 17.73\\%\\), where the best bound is -19,660 while the solution obtained from the IP solver is -16,700, with the optimality gap not narrowing even after 2 hours of runtime. The relative accuracy can be improved by employing the QUBO decomposition strategy (e.g., SA-QBSolv), yet it still fails to identify high-quality solutions for problem sizes exceeding 4,000. In contrast, quantum solvers demonstrate superior accuracy for large-scale problems. Notably, the HQA solver consistently outperforms all other methods, reliably identifying the best solution regardless of problem size (Fig. 2)." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.386, + 0.887, + 0.543 + ], + "angle": 0, + "content": "Fig. 3a shows that the solving time rapidly increases as the problem size increases for the classical solvers, indicating that solving combinatorial optimization problems with classical solvers can become intractable for large-size problems (Fig. 3b). The solving time trends with increasing problem size agree well with the theoretical time complexities of the classical solvers (Fig. 3b and Fig. S3, see 2-4-2. Computational Time section). While the IP solver can be faster than other classical solvers, it also requires significant time for large problems (e.g., \\( n > 5,000 \\)). The use of the QUBO decomposition strategy dramatically reduces the solving time, but the quantum solvers consistently outpace classical counterparts (Fig. 3a). For example, the solving time (\\( n = 10,000 \\)) is \\( 0.0855 \\) s for HQA, \\( 101 \\) s for QA-QBSolv, and \\( 561 \\) s for SA-QBSolv." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.56, + 0.887, + 0.736 + ], + "angle": 0, + "content": "Decomposing a large QUBO into smaller pieces leads to a higher relative accuracy, as a solver can find better solutions for each decomposed QUBOs, mitigating the current hardware limitations. Note that the accuracy of QA for QUBOs with problem sizes of 30 and 100 is, respectively, 1.0 and 0.9956 (without leveraging the QUBO decomposition method). Hence, the accuracy of QA-QBSolv with a sub-QUBO size of 30 is higher than that with a sub-QUBO size of 100, as decomposed QUBOs with a smaller size fit the QA hardware better (Fig. 4a). However, a smaller sub-QUBO size results in a greater number of sub-QUBOs after decomposition, leading to increased time required to solve all decomposed problems (Fig. 4b). It is noted that the QA-QBSolv solver does not guarantee finding the best solution for large problems (size \\(>4,000\\)), resulting in lower accuracies regardless of sub-QUBO sizes, as can be seen in Fig. 2 and Fig. 4a." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.752, + 0.887, + 0.892 + ], + "angle": 0, + "content": "Our results show that HQA, which incorporates QA with classical algorithms to overcome the current quantum hardware limitations, is currently the most efficient solver for complex real-world problems that require the formulation of dense and large QUBOs. In this context, we define \"Quantum Advantage\" as the ability of a quantum-enhanced solver to achieve high accuracy and significantly faster problem-solving time compared to the classical solvers for large-scale optimization problems. Our findings suggest that leveraging quantum resources, particularly in hybrid configurations, can provide a computational advantage over classical approaches. Besides, as the current state of HQA demonstrates, we expect QA will have much higher accuracy and" + }, + { + "type": "page_number", + "bbox": [ + 0.869, + 0.938, + 0.882, + 0.951 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.091, + 0.887, + 0.127 + ], + "angle": 0, + "content": "require much shorter time to solve QUBO problems with the development of the quantum hardware with more qubits and better qubit connectivity." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.163, + 0.236, + 0.182 + ], + "angle": 0, + "content": "Discussion" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.184, + 0.888, + 0.43 + ], + "angle": 0, + "content": "This work comprehensively compares state-of-the-art QA hardware and software against several classical optimization solvers for large and dense QUBO problems (up to 10,000 variables, fully connected interactions). The classical solvers struggled to solve large-scale problems, but their performance can be improved when combined with the QUBO decomposition method (i.e., QBSolv). Nevertheless, they become inaccurate and inefficient with increasing problem size, indicating that classical methods can face challenges for complex real-world problems represented by large and dense QUBO matrices. On the contrary, HQA performs significantly better than its classical counterparts, exhibiting the highest accuracy (\\(\\sim 0.013\\%\\) improvement) and shortest time to obtain solutions (\\(\\sim 6,561 \\times\\) acceleration) for 10,000 dimensional QUBO problems, demonstrating 'Quantum Advantage' for large and dense QUBO problems. Pure QA and QA with the QUBO decomposition method still exhibit limitations in solving large problems due to the current QA hardware limitations (e.g., number of qubits and qubit connectivity). However, we anticipate that QA will eventually reach the efficiency of HQA with the ongoing development of the quantum hardware. Thus, we expect QA to demonstrate true 'Quantum Advantage' in the future." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.464, + 0.218, + 0.484 + ], + "angle": 0, + "content": "Methods" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.486, + 0.303, + 0.503 + ], + "angle": 0, + "content": "Definition of a QUBO" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.504, + 0.884, + 0.539 + ], + "angle": 0, + "content": "QA hardware is designed to efficiently solve combinatorial optimization problems that are formulated with a QUBO matrix, which can be given by\\(^{28,29}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.395, + 0.554, + 0.866, + 0.609 + ], + "angle": 0, + "content": "\\[\ny = \\sum_ {i = 1} ^ {n} \\sum_ {j = i} ^ {n} Q _ {i, j} x _ {i} x _ {j} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.625, + 0.889, + 0.718 + ], + "angle": 0, + "content": "where \\( Q_{i,j} \\) is the \\( i \\)-th row and \\( j \\)-th column real-number element of the QUBO matrix \\( (\\mathbf{Q}) \\), which is an \\( n \\times n \\) Hermitian, i.e., \\( \\mathbf{Q} \\in \\mathbb{R}^{n \\times n} \\), and \\( x_i \\) is the \\( i \\)-th element of a binary vector \\( \\mathbf{x} \\) with a length of \\( n \\), i.e., \\( \\mathbf{x} \\in [0,1^n] \\). \\( Q_{i,j} \\) is often referred to as a linear coefficient for \\( i = j \\) and a quadratic interaction coefficient for \\( i \\neq j \\). The objective of QA is to identify the optimal binary vector of a given QUBO, which minimizes the scalar output \\( y \\) as29:" + }, + { + "type": "equation", + "bbox": [ + 0.419, + 0.735, + 0.866, + 0.763 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {x} ^ {*} = \\underset {x} {\\operatorname {a r g m i n}} y \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.779, + 0.888, + 0.884 + ], + "angle": 0, + "content": "In optimization problems, the linear coefficients correspond to cost or benefit terms associated with individual variables, while the quadratic coefficients represent interaction terms or dependencies between pairs of variables. These coefficients can be learned using machine learning models, such as the factorization machine (FM), trained on datasets containing input structures and their corresponding performance metrics. By mapping these learned coefficients into a QUBO formulation, we effectively represent an energy function of a material system or other real-world" + }, + { + "type": "page_number", + "bbox": [ + 0.869, + 0.937, + 0.885, + 0.952 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.091, + 0.887, + 0.127 + ], + "angle": 0, + "content": "optimization problem. This QUBO then describes the optimization space, enabling the identification of the optimal state with the best performance\\(^{30,31}\\)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.143, + 0.341, + 0.16 + ], + "angle": 0, + "content": "Methods to Solve a QUBO" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.161, + 0.888, + 0.248 + ], + "angle": 0, + "content": "Various methods have been proposed to solve QUBO problems. For our benchmarking study, we consider seven representative methods: QA, hybrid QA (HQA), integer programming (IP), simulated annealing (SA), steepest descent (SD), tabu search (TS), parallel tempering with isoenergetic cluster moves (PT-ICM). Below, we provide a brief introduction to each of the solvers used in solving combinatorial optimization problems:" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.266, + 0.537, + 0.284 + ], + "angle": 0, + "content": "Quantum Annealing and Hybrid Quantum Annealing" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.284, + 0.889, + 0.375 + ], + "angle": 0, + "content": "QA starts with a superposition state for all qubits, which has the lowest energy state of the initial Hamiltonian \\((H_0)\\). In the annealing process, the system evolves toward the lowest energy state of the final Hamiltonian (also called a problem Hamiltonian, \\(H_{p}\\)) by minimizing the influence of the initial Hamiltonian. The measured state at the end of the annealing is supposed to be the ground state of \\(H_{p}\\), which can be expressed as the following equation32,33:" + }, + { + "type": "equation", + "bbox": [ + 0.33, + 0.392, + 0.866, + 0.412 + ], + "angle": 0, + "content": "\\[\nH (t / t _ {a}) = A (t / t _ {a}) H _ {0} + B (t / t _ {a}) H _ {p} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.429, + 0.888, + 0.573 + ], + "angle": 0, + "content": "Here, \\( t \\) is the elapsed annealing time, and \\( t_a \\) is the total annealing time. Equation (3) evolves from \\( A(t / t_a) = 1 \\), \\( B(t / t_a) \\approx 0 \\) at the beginning of the annealing \\( (t / t_a = 0) \\) to \\( A(t / t_a) \\approx 0 \\), \\( B(t / t_a) = 1 \\) at the end of the annealing \\( (t / t_a = 1) \\). Sufficiently slow evolution from \\( H_0 \\) to \\( H_p \\) enables the quantum system to stay at the ground state, which leads to the identification of the optimal solution of a given combinatorial optimization problem3,34. We use D-Wave Systems' quantum annealer (Advantage 4.1) to solve the problems using QA, and we set the number of reads for QA to 1,000 with a total annealing time of \\( 20~\\mu s \\). We select the best solution corresponding to the lowest energy state found among 1,000 reads." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.589, + 0.888, + 0.905 + ], + "angle": 0, + "content": "The D-Wave Ocean software development kit (SDK, ver. 3.3.0) provides many useful libraries, which include quantum or classical samplers such as the QA, HQA, SA, SD, and TS. They allow us to solve QUBO problems\\(^{22,35,36}\\). We employ these samplers, which are implemented in the D-wave Ocean SDK, for the benchmarking study. Classical or QA solvers often benefit from decomposition algorithms to identify a high-quality solution (i.e., an optimal solution or a good solution close to the global optimum) for large QUBO problems. Hence, the decomposition of a QUBO matrix into sub-QUBOs is very useful when the size of QUBO matrix is larger than the physical volume of a sampler (i.e., QUBO size > physical number of qubits in QA or memory capacity of a classical computer). We employ the QBSolv package implemented in D-wave Ocean SDK for QUBO decomposition. The QBSolv splits a QUBO matrix into smaller QUBO matrices, and each of them is sequentially solved by classical or QA solvers. This algorithm enables us to handle a wide range of complex real-world problems\\(^{21,22,37}\\). The size of the decomposed QUBOs is set to 30 unless otherwise specified. HQA (Leap Hybrid solver), developed by D-Wave systems, also decomposes large QUBO into smaller subproblems well-suited for QA's QPU, and then aggregates the results\\(^{27,38}\\). The detailed algorithm of HQA, however, is not publicly released. We utilize a D-Wave sampler (dwave-system 1.4.0) for SA, SD, and TS with a specified number of reads (1,000) and default settings for other parameters. Furthermore, we employ D-Wave hybrid framework for PT-ICM." + }, + { + "type": "page_number", + "bbox": [ + 0.869, + 0.938, + 0.885, + 0.952 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.109, + 0.292, + 0.126 + ], + "angle": 0, + "content": "Integer Programming" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.127, + 0.884, + 0.248 + ], + "angle": 0, + "content": "IP uses branch-and-bound, cutting planes, and other methods to search the solution space for optimal integer decisions and prove global optimality within a tolerance (gap). We use Gurobi (version 10.0.2)39 for benchmarking with the default settings (0.1% global optimality gap) plus a two-hour time limit and 240 GB software memory limit per optimization problem. The benchmark QUBO problem is implemented in the Pyomo modeling environment (version 6.6.2)40. We also experimented with a large gap and observed the first identified integer solution often had a poor objective function value. These results are not further reported for brevity." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.266, + 0.284, + 0.284 + ], + "angle": 0, + "content": "Simulated Annealing" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.283, + 0.884, + 0.424 + ], + "angle": 0, + "content": "SA, which is inspired by the annealing process in metallurgy, is a probabilistic optimization algorithm designed to approximate a global optimum of a given objective function. It is considered a metaheuristic method, which can be applied to a wide range of optimization problems\\(^{41,42}\\). In SA, temperature and cooling schedule are major factors that determine how extensively the algorithm explores the solution space\\(^{43}\\). This algorithm often identifies near-optimal solutions but cannot guarantee that local or global optimality conditions are satisfied. For SA, the hyperparameters are configured as follows: 1,000 reads, 1,000 sweeps, a 'random' initial state generation, and a 'geometric' temperature schedule." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.44, + 0.253, + 0.457 + ], + "angle": 0, + "content": "Steepest Descent" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.457, + 0.884, + 0.545 + ], + "angle": 0, + "content": "SD operates by employing variable flips to reduce the energy of a given QUBO through local minimization computations rather than relying on a calculated gradient in a traditional gradient descent algorithm\\(^{44}\\). This algorithm is computationally inexpensive and beneficial for local refinement; thus, it can be used to search for local optima. In our benchmarking study, SD utilizes hyperparameters set to 1,000 reads and a 'random' strategy for initial state generation." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.561, + 0.22, + 0.577 + ], + "angle": 0, + "content": "Tabu Search" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.579, + 0.882, + 0.667 + ], + "angle": 0, + "content": "TS is designed to solve combinatorial and discrete optimization problems by using memory to guide the search for better solutions, as introduced by Glover\\(^{45}\\). This algorithm can escape already visited local minima by remembering those points (called 'Tabu List' to keep track of moves during the search), aiming to identify high-quality solutions in a large solution space. This algorithm works well for combinatorial optimization problems with small search spaces." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.667, + 0.865, + 0.718 + ], + "angle": 0, + "content": "However, it can be hard to evaluate neighboring solutions and to maintain and update the Tabu List with increasing problem sizes. The hyperparameter settings for TS are as follows: 1,000 reads, a timeout of \\(100\\mathrm{ms}\\), and 'random' initial state generation." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.736, + 0.615, + 0.753 + ], + "angle": 0, + "content": "Parallel Tempering with Isoenergetic Cluster Moves (PT-ICM)" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.753, + 0.884, + 0.875 + ], + "angle": 0, + "content": "PT-ICM is an advanced Monte Carlo method designed to navigate optimization space, such as QUBO problems\\(^{46-48}\\). PT operates by maintaining multiple replicas of the system at different temperatures and allowing exchanges between replicas based on a Metropolis criterion. This approach helps lower-temperature replicas escape local minima with the aid of higher-temperature replicas. ICM identifies clusters of variables that can flip without changing the system's energy\\(^{46}\\). In this study, the hyperparameters for PT-ICM are set as follows: the number of sweeps is 1,000, the number of replicas is 10, and the number of iterations is 10." + }, + { + "type": "page_number", + "bbox": [ + 0.869, + 0.937, + 0.885, + 0.952 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.092, + 0.327, + 0.108 + ], + "angle": 0, + "content": "Benchmarking Problems" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.11, + 0.285, + 0.125 + ], + "angle": 0, + "content": "Real-world problems" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.126, + 0.885, + 0.406 + ], + "angle": 0, + "content": "Material optimization is selected to represent real-world problems, with the design of planar multilayers (PMLs) optical film as a testbed for benchmarking. PMLs can be seen in many applications. For example, they have been explored for transparent radiative cooling windows to address global warming by emitting thermal radiation through the atmospheric window (\\(8\\mu \\mathrm{m} < \\lambda < 13\\mu \\mathrm{m}\\))4, while transmitting visible photons. PMLs consist of layers with one of four dielectric materials: silicon dioxide, silicon nitride, aluminum oxide, and titanium dioxide. The configuration of these layers can be expressed as a binary vector, where each layer is assigned a two-digit binary label. Optical characteristics and corresponding figure-of-merit (FOM) of the PML can be calculated by solving Maxwell's equations using the transfer matrix method (TMM). To formulate QUBOs, layer configurations (input binary vectors) and their FOMs (outputs) are used to train the FM model. FM learns the linear and quadratic coefficients, effectively modeling the optimization landscape of the material system. QUBO matrices are then generated using these coefficients30,31. PML configurations are randomly generated for training datasets, and their FOMs are calculated using TMM. The resulting QUBO matrices represent real-world materials optimization problems, characterized by highly dense (fully connected) configurations (Fig. S1), which are used for the benchmarking study in Fig. 1." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.422, + 0.312, + 0.439 + ], + "angle": 0, + "content": "Benchmarking problems" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.44, + 0.884, + 0.492 + ], + "angle": 0, + "content": "We formulate QUBO matrices with random elements to further systematically explore scalability (Fig. 2 and Fig. 3), following the characteristics of QUBOs from real-world problems, for the benchmarking study as the following:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.493, + 0.884, + 0.545 + ], + "angle": 0, + "content": "- Problem size: The problem size, corresponding to the length of a binary vector \\((n)\\), varies from 120 to 10,000 (120, 200, 500, 1,000, 1,500, 2,000, 2,500, 3,000, 4,000, 5,000, 6,000, 7,000, 8,000, 9,000 and 10,000)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.547, + 0.885, + 0.652 + ], + "angle": 0, + "content": "- Distribution of elements: For each problem size, four QUBO matrices with different distributions of elements are studied. These elements are random numbers with a mean value of 0 and standard deviations of 0.001, 0.01, 0.1, or 1. These distributions reflect the variability observed in QUBO coefficients derived from real-world problems (Table S1). A QUBO configured with elements having a large deviation yields a significant variation in the energy landscape, potentially resulting in high energy barriers that must be overcome to find the ground state." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.653, + 0.875, + 0.809 + ], + "angle": 0, + "content": "- Density of matrices: The density of QUBO matrices reflects the proportion of pairwise interactions among variables relative to the maximum possible interactions. Fully connected QUBOs, such as those derived from real-world problems, represent cases where all variables interact with each other. For example, in layered photonic structures, each layer interacts with every other layer, influencing optical responses, which leads to a fully connected QUBO. In contrast, Max-Cut problems typically result in sparse QUBOs, where only a subset of variables (nodes) interact through edges. The maximum number of interaction coefficients (i.e., the number of edges in Max-Cut problems) is \\( nC_2 \\), where \\( n \\) denotes the problem size. The density of a QUBO can be calculated as:" + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.493, + 0.885, + 0.809 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.243, + 0.825, + 0.866, + 0.864 + ], + "angle": 0, + "content": "\\[\n\\text {d e n s i t y} = \\frac {\\text {n u m b e r o f i n t e r a c t i o n c o e f f i c i e n t s}}{\\text {m a x i m u m n u m b e r o f i n t e r a c t i o n c o e f f i c i e n t s}} \\tag {4}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.869, + 0.938, + 0.885, + 0.952 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.091, + 0.887, + 0.234 + ], + "angle": 0, + "content": "For example, a benchmark problem instance (G10) with 800 nodes and 19,176 edges has a density of \\(6\\%\\), calculated as: density \\(= 19,176 / 319,600 = 0.06\\). The density of Max-Cut problems can be adjusted by changing the number of edges, with typical instances having densities ranging from \\(0.02\\%\\) to \\(6\\%\\) (Fig. S1, Table S2). In contrast, real-world problems feature fully connected configurations, corresponding to a density of \\(100\\%\\). QUBOs for this benchmarking study have dense matrices fully filled with real-number elements in the upper triangular part (i.e., fully connected graph nodes, Fig. S2). This configuration aims to approximate real-world optimization problems, which usually requires a dense QUBO matrix4,28." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.248, + 0.679, + 0.265 + ], + "angle": 0, + "content": "Performance Metrics: Relative Accuracy and Computational Time" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.266, + 0.263, + 0.282 + ], + "angle": 0, + "content": "Relative Accuracy" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.283, + 0.888, + 0.442 + ], + "angle": 0, + "content": "For small-scale problems, brute-force search guarantees the identification of the global optimum by evaluating all possible solutions. However, this approach becomes infeasible for large-scale problems due to the exponential growth of the search space. The IP solver, such as Gurobi, utilizes the branch-and-bound method to efficiently explore the solution space and prove global optimality within an optimality gap. However, due to computational limitations or time constraints, IP may struggle to find the global optimum for large-scale problems. To address this challenge in our benchmarking study, we employ a 'Relative Accuracy' metric to compare the relative performance of different solvers. Relative accuracy is defined as the ratio of a solver's objective value to the best objective found across all solvers:" + }, + { + "type": "equation", + "bbox": [ + 0.273, + 0.457, + 0.866, + 0.478 + ], + "angle": 0, + "content": "\\[\n\\text {R e l a t i v e A c c u r a c y} = \\text {S o l u t i o n} _ {\\text {s o l v e r}} / \\text {S o l u t i o n} _ {\\text {b e s t}} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.492, + 0.889, + 0.688 + ], + "angle": 0, + "content": "This metric provides a way to evaluate the solution quality when the global optimum cannot be definitively found or proven for large-scale problem instances. Note that the best solution is the lowest value among the solutions obtained from all solvers since the solvers are designed to find the lowest energy state (generally negative values for the QUBOs used in this study). The relative accuracies of the solvers are plotted as a function of problem sizes. In Fig. 1, the relative accuracy represents the average value calculated from three different QUBOs that represent material optimization, and in Fig. 2, it represents the average from four different QUBOs with varying standard deviations for each problem size (ranging from 120 to 10,000). Error bars on the plot represent the standard deviation of accuracies calculated from the four different QUBOs for each problem size, relative to the average values. By definition, the relative accuracy is 1.0 when the solver finds a solution with the best-known objective function value (equation 5)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.702, + 0.281, + 0.718 + ], + "angle": 0, + "content": "Computational Time" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.719, + 0.886, + 0.895 + ], + "angle": 0, + "content": "Computational time is another important factor in determining the solvers' performance. Combinatorial optimization problems are considered NP-hard, so increasing problem sizes can lead to an explosion of search space, posing challenges in optimization processes. We measure the computational time dedicated solely to solving given problems, excluding problem reading time, queue time, or communication time between the local computer and quantum annealer. This is consistent with other benchmarking studies[17,18]. For problems solved on D-Wave systems' QPU for QA, the execution time includes programming and sampling times (anneal, readout, and delay time). QPU access time is calculated for all of them after programmed anneal-read cycles, corresponding to the time charged to users in their allocations, which is used as the computational time for QA and HQA. Classical solvers (SA, SD, TS, and PT-ICM) run on a workstation (AMD" + }, + { + "type": "page_number", + "bbox": [ + 0.869, + 0.938, + 0.885, + 0.952 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.091, + 0.887, + 0.303 + ], + "angle": 0, + "content": "Ryzen Threadripper PRO 3975WX @ 3.5 GHz processor with 32 cores and 32GB of RAM), and IP (Gurobi) run on a cluster node (an Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz processor with 24 cores and 256 GB of RAM). Problem reading time can be significant when the problem size is large, but it is excluded from the computational time consideration. We measure the time solely taken to solve given problems with classical solvers. In Fig. 1b and Fig. 3, the solution time for classical and quantum solvers is presented as a function of problem sizes. Note that a QUBO problem is NP-hard\\(^{49}\\). Evaluating the energy of a given solution has a computational cost of \\( O(n^{2}) \\), where \\( n \\) (= problem size) is the number of variables. The number of reads or sweeps does not scale with \\( n \\), but the cost for each sweep scales as \\( O(n) \\) for SA. Consequently, the theoretical time complexities of the classical solvers are known as \\( O(n^{3}) \\) for SA\\(^{50}\\), \\( O(n^{2}) \\) for SD\\(^{51}\\), and \\( O(n^{2}) \\) for TS\\(^{52}\\). On the other hand, the theoretical time complexity of the quantum solvers can be considered constant." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.339, + 0.305, + 0.36 + ], + "angle": 0, + "content": "Data availability" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.36, + 0.884, + 0.396 + ], + "angle": 0, + "content": "All data generated and analyzed during the study are available from the corresponding author upon reasonable request." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.432, + 0.309, + 0.453 + ], + "angle": 0, + "content": "Code availability" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.453, + 0.884, + 0.489 + ], + "angle": 0, + "content": "The codes used for generating and analyzing data are available from the corresponding author upon reasonable request." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.525, + 0.336, + 0.546 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.546, + 0.889, + 0.651 + ], + "angle": 0, + "content": "This research used resources of the Oak Ridge Leadership Computing Facility at the Oak Ridge National Laboratory, which is supported by the Office of Science of the U.S. Department of Energy under Contract No. DE-AC05-00OR22725. This research was supported by the Quantum Computing Based on Quantum Advantage Challenge Research (RS-2023-00255442) through the National Research Foundation of Korea (NRF) funded by the Korean Government (Ministry of Science and ICT(MSIT))." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.687, + 0.341, + 0.707 + ], + "angle": 0, + "content": "Author information" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.709, + 0.312, + 0.724 + ], + "angle": 0, + "content": "Authors and Affiliations" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.726, + 0.884, + 0.759 + ], + "angle": 0, + "content": "Department of Aerospace and Mechanical Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.761, + 0.361, + 0.779 + ], + "angle": 0, + "content": "Seongmin Kim & Tengfei Luo" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.795, + 0.884, + 0.831 + ], + "angle": 0, + "content": "Department of Electronic Engineering, Kyung Hee University; Yongin-Si, Gyeonggi-do 17104, Republic of Korea." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.831, + 0.361, + 0.849 + ], + "angle": 0, + "content": "Sangwoo Ahn & Eungkyu Lee" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.865, + 0.884, + 0.901 + ], + "angle": 0, + "content": "Department of Chemical and Biomolecular Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States." + }, + { + "type": "page_number", + "bbox": [ + 0.86, + 0.937, + 0.884, + 0.952 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.092, + 0.275, + 0.11 + ], + "angle": 0, + "content": "Alexander Dowling" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.125, + 0.884, + 0.16 + ], + "angle": 0, + "content": "National Center for Computational Sciences, Oak Ridge National Laboratory, Oak Ridge, Tennessee 37830, United States." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.162, + 0.37, + 0.178 + ], + "angle": 0, + "content": "Seongmin Kim & In-Saeng Suh" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.196, + 0.236, + 0.211 + ], + "angle": 0, + "content": "Contributions" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.213, + 0.885, + 0.283 + ], + "angle": 0, + "content": "S.K., A.D., E.L., and T.L. conceived the idea. S.K. and S.A. performed benchmarking studies to generate data. A.D. and S.K. implemented the IP benchmark. S.K. analyzed the data with advice from I.S., A.D., E.L., and T.L. All authors discussed the results and contributed to the writing of the manuscript." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.301, + 0.315, + 0.318 + ], + "angle": 0, + "content": "Corresponding authors" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.318, + 0.701, + 0.335 + ], + "angle": 0, + "content": "Correspondence to Alexander W. Dowling, Eungkyu Lee, or Tengfei Luo." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.372, + 0.331, + 0.391 + ], + "angle": 0, + "content": "Ethics declarations" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.394, + 0.29, + 0.411 + ], + "angle": 0, + "content": "Competing Interests" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.411, + 0.458, + 0.428 + ], + "angle": 0, + "content": "The authors declare no competing interests." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.464, + 0.231, + 0.484 + ], + "angle": 0, + "content": "Reference" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.486, + 0.885, + 0.52 + ], + "angle": 0, + "content": "1 Arute, F. et al. Quantum supremacy using a programmable superconducting processor. Nature 574, 505-510 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.521, + 0.885, + 0.554 + ], + "angle": 0, + "content": "2 Daley, A. J. et al. Practical quantum advantage in quantum simulation. Nature 607, 667-676 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.556, + 0.885, + 0.589 + ], + "angle": 0, + "content": "3 Johnson, M. W. et al. Quantum annealing with manufactured spins. Nature 473, 194-198 (2011)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.59, + 0.885, + 0.625 + ], + "angle": 0, + "content": "4 Kim, S. et al. High-Performance Transparent Radiative Cooler Designed by Quantum Computing. ACS Energy Lett 7, 4134-4141 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.626, + 0.885, + 0.678 + ], + "angle": 0, + "content": "5 Kim, S., Jung, S., Bobbitt, A., Lee, E. & Luo, T. Wide-angle spectral filter for energy-saving windows designed by quantum annealing-enhanced active learning. Cell Rep Phys Sci (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.679, + 0.885, + 0.729 + ], + "angle": 0, + "content": "6 Li, R. Y., Di Felice, R., Rohs, R. & Lidar, D. A. Quantum annealing versus classical machine learning applied to a simplified computational biology problem. npj Quantum Inf 4 (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.73, + 0.885, + 0.765 + ], + "angle": 0, + "content": "7 Vinci, W., Albash, T. & Lidar, D. A. Nested quantum annealing correction. npj Quantum Inf 2 (2016)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.766, + 0.885, + 0.799 + ], + "angle": 0, + "content": "8 Santoro, G. E. & Tosatti, E. Optimization using quantum mechanics: quantum annealing through adiabatic evolution. J Phys A: Math Gen 39, R393-R431 (2006)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.8, + 0.885, + 0.851 + ], + "angle": 0, + "content": "9 Mandra, S., Zhu, Z. & Katzgraber, H. G. Exponentially Biased Ground-State Sampling of Quantum Annealing Machines with Transverse-Field Driving Hamiltonians. Phys Rev Lett 118, 070502 (2017)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.852, + 0.885, + 0.887 + ], + "angle": 0, + "content": "10 Kitai, K. et al. Designing metamaterials with quantum annealing and factorization machines. Phys Rev Res 2, 013319 (2020)." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.486, + 0.885, + 0.887 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.859, + 0.937, + 0.881, + 0.952 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.09, + 0.885, + 0.125 + ], + "angle": 0, + "content": "11 Santoro, G. E., Marton a'k, R., Tosatti, E. & Car, R. Theory of Quantum Annealing of an Ising Spin Glass. Science 295, 2427-2430 (2002)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.127, + 0.885, + 0.16 + ], + "angle": 0, + "content": "12 Hen, I. & Spedalieri, F. M. Quantum Annealing for Constrained Optimization. Phys Rev Appl 5 (2016)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.161, + 0.885, + 0.195 + ], + "angle": 0, + "content": "13 Kadowaki, T. & Nishimori, H. Quantum annealing in the transverse Ising model. Phys Rev E 58, 5355-5363 (1998)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.196, + 0.885, + 0.23 + ], + "angle": 0, + "content": "14 Morita, S. & Nishimori, H. Mathematical foundation of quantum annealing J Math Phys 49, 125210 (2008)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.231, + 0.885, + 0.265 + ], + "angle": 0, + "content": "15 Wilson, B. A. et al. Machine learning framework for quantum sampling of highly constrained, continuous optimization problems. Appl Phys Rev 8, 041418 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.267, + 0.885, + 0.317 + ], + "angle": 0, + "content": "16 Kim, S., Wu, S., Jian, R., Xiong, G. & Luo, T. Design of a High-Performance Titanium Nitride Metastructure-Based Solar Absorber Using Quantum Computing-Assisted Optimization. ACS Appl Mater Interfaces 15, 40606-40613 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.318, + 0.885, + 0.37 + ], + "angle": 0, + "content": "17 O'Malley, D., Vesselinov, V. V., Alexandrov, B. S. & Alexandrov, L. B. Nonnegative/Binary matrix factorization with a D-Wave quantum annealer. PLoS One 13, e0206653 (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.371, + 0.885, + 0.404 + ], + "angle": 0, + "content": "18 Tasseff, B. et al. On the Emerging Potential of Quantum Annealing Hardware for Combinatorial Optimization. arXiv:2210.04291 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.406, + 0.885, + 0.44 + ], + "angle": 0, + "content": "19 Hab, R., Ohzeki, M. & Tanaka, K. Travel time optimization on multi-AGV routing by reverse annealing. Sci Rep 12, 17753 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.441, + 0.885, + 0.473 + ], + "angle": 0, + "content": "20 Kim, S. et al. Quantum annealing-aided design of an ultrathin-metamaterial optical diode. Nano Converg 11, 16 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.475, + 0.885, + 0.509 + ], + "angle": 0, + "content": "21 Pelofske, E., Hahn, G. & Djidjev, H. N. Noise dynamics of quantum annealers: estimating the effective noise using idle qubits. Quantum Sci Technol 8 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.51, + 0.885, + 0.562 + ], + "angle": 0, + "content": "22 Yoneda, Y., Shimada, M., Yoshida, A. & Shirakashi, J.-i. Searching for optimal experimental parameters with D-Wave quantum annealer for fabrication of Au atomic junctions. Appl Phys Exp 16 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.563, + 0.885, + 0.596 + ], + "angle": 0, + "content": "23 Willsch, D. et al. Benchmarking Advantage and D-Wave 2000Q quantum annealers with exact cover problems. Quantum Inf Process 21 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.598, + 0.885, + 0.632 + ], + "angle": 0, + "content": "24 Yarkoni, S., Raponi, E., Back, T. & Schmitt, S. Quantum annealing for industry applications: introduction and review. Rep Prog Phys 85 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.633, + 0.885, + 0.683 + ], + "angle": 0, + "content": "25 Kasi, S., Warburton, P., Kaewell, J. & Jamieson, K. A Cost and Power Feasibility Analysis of Quantum Annealing for NextG Cellular Wireless Networks. IEEE Transactions on Quantum Engineering 4, 1-17 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.684, + 0.885, + 0.736 + ], + "angle": 0, + "content": "26 Teplukhin, A., Kendrick, B. K. & Babikov, D. Solving complex eigenvalue problems on a quantum annealer with applications to quantum scattering resonances. Phys Chem Chem Phys 22, 26136-26144 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.737, + 0.885, + 0.788 + ], + "angle": 0, + "content": "27 Atobe, Y., Tawada, M. & Togawa, N. Hybrid Annealing Method Based on subQUBO Model Extraction With Multiple Solution Instances. IEEE Trans Comput 71, 2606-2619 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.789, + 0.885, + 0.84 + ], + "angle": 0, + "content": "28 Zaman, M., Tanahashi, K. & Tanaka, S. PyQUBO: Python Library for Mapping Combinatorial Optimization Problems to QUBO Form. IEEE Trans Comput 71, 838-850 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.841, + 0.885, + 0.875 + ], + "angle": 0, + "content": "29 Tao, M. et al. in IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW) 557-566 (2020)." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.09, + 0.885, + 0.875 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.859, + 0.937, + 0.882, + 0.952 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.09, + 0.885, + 0.126 + ], + "angle": 0, + "content": "30 Kim, S. et al. A review on machine learning-guided design of energy materials. Progress in Energy 6 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.127, + 0.884, + 0.178 + ], + "angle": 0, + "content": "31 Kim, S., Luo, T., Lee, E. & Suh, I.-S. Distributed Quantum Approximate Optimization Algorithm on Integrated High-Performance Computing and Quantum Computing Systems for Large-Scale Optimization. arXiv:2407.20212 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.179, + 0.885, + 0.23 + ], + "angle": 0, + "content": "32 Gemeinhardt, F., Garmendia, A., Wimmer, M., Weder, B. & Leymann, F. Quantum Combinatorial Optimization in the NISQ Era: A Systematic Mapping Study. ACM Comput Surv 56, 1-36 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.231, + 0.884, + 0.266 + ], + "angle": 0, + "content": "33 Willsch, M., Willsch, D., Jin, F., De Raedt, H. & Michielsen, K. Benchmarking the quantum approximate optimization algorithm. Quantum Inf Process 19 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.267, + 0.885, + 0.3 + ], + "angle": 0, + "content": "34 Hauke, P., Katzgraber, H. G., Lechner, W., Nishimori, H. & Oliver, W. D. Perspectives of quantum annealing: methods and implementations. Rep Prog Phys 83, 054401 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.301, + 0.884, + 0.336 + ], + "angle": 0, + "content": "35 Carugno, C., Ferrari Dacrema, M. & Cremonesi, P. Evaluating the job shop scheduling problem on a D-wave quantum annealer. Sci Rep 12, 6539 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.336, + 0.884, + 0.37 + ], + "angle": 0, + "content": "36 Irie, H., Liang, H., Doi, T., Gongyo, S. & Hatsuda, T. Hybrid quantum annealing via molecular dynamics. Sci Rep 11, 8426 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.371, + 0.884, + 0.405 + ], + "angle": 0, + "content": "37 Raymond, J. et al. Hybrid Quantum Annealing for Larger-than-QPU Lattice-structured Problems. ACM Transactions on Quantum Computing 4, 1-30 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.406, + 0.884, + 0.44 + ], + "angle": 0, + "content": "38 Ceselli, A. & Premoli, M. On good encodings for quantum annealer and digital optimization solvers. Sci Rep 13, 5628 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.441, + 0.884, + 0.492 + ], + "angle": 0, + "content": "39 Song, J., Lanka, R., Yue, Y. & Dilkina, B. A General Large Neighborhood Search Framework for Solving Integer Linear Programs. 34th Conference on Neural Information Processing Systems (NeurIPS 2020) (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.493, + 0.884, + 0.528 + ], + "angle": 0, + "content": "40 Bynum, M. L. et al. Pyomo — Optimization Modeling in Python, 3rd edition. Springer Optimization and Its Applications 67 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.528, + 0.884, + 0.579 + ], + "angle": 0, + "content": "41 Alnowibet, K. A., Mahdi, S., El-Alem, M., Abdelawwad, M. & Mohamed, A. W. Guided Hybrid Modified Simulated Annealing Algorithm for Solving Constrained Global Optimization Problems. Mathematics 10 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.58, + 0.884, + 0.615 + ], + "angle": 0, + "content": "42 Rere, L. M. R., Fanany, M. I. & Arymurthy, A. M. Simulated Annealing Algorithm for Deep Learning. Procedia Comput Sci 72, 137-144 (2015)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.615, + 0.884, + 0.667 + ], + "angle": 0, + "content": "43 Gonzales, G. V. et al. A comparison of simulated annealing schedules for constructable design of complex cavities intruded into conductive walls with internal heat generation. Energy 93, 372-382 (2015)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.667, + 0.884, + 0.702 + ], + "angle": 0, + "content": "44 Wadayama, T. et al. Gradient descent bit flipping algorithms for decoding LDPC codes. IEEE Trans Communi 58, 1610-1614 (2010)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.702, + 0.885, + 0.737 + ], + "angle": 0, + "content": "45 Glover, F., Laguna, M. & Marti', R. Principles of Tabu Search. Handbook of Approximation Algorithms and Metaheuristics 23 (2007)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.737, + 0.884, + 0.771 + ], + "angle": 0, + "content": "46 Aramon, M. et al. Physics-Inspired Optimization for Quadratic Unconstrained Problems Using a Digital Annealer. Frontiers in Physics 7 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.772, + 0.884, + 0.806 + ], + "angle": 0, + "content": "47 Zhu, Z., Ochoa, A. J. & Katzgraber, H. G. Fair sampling of ground-state configurations of binary optimization problems. arXiv:1903.07600 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.806, + 0.884, + 0.841 + ], + "angle": 0, + "content": "48 Mandrà, S. & Katzgraber, H. G. A deceptive step towards quantum speedup detection. Quantum Science and Technology 3 (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.841, + 0.884, + 0.876 + ], + "angle": 0, + "content": "49 Yasuoka, H. Computational Complexity of Quadratic Unconstrained Binary Optimization. arXiv:2109.10048 (2022)." + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.09, + 0.885, + 0.876 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.859, + 0.937, + 0.882, + 0.952 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.09, + 0.887, + 0.125 + ], + "angle": 0, + "content": "50 Hansen, P. B. Simulated Annealing. Electrical Engineering and Computer Science Technical Reports 170 (1992)." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.127, + 0.887, + 0.178 + ], + "angle": 0, + "content": "51 Dupin, N., Nielsen, F. & Talbi, E. Dynamic Programming heuristic for k-means Clustering among a 2-dimensional Pareto Frontier. 7th Internat. Conf. on Metaheuristics and Nature Inspired Computing (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.179, + 0.887, + 0.213 + ], + "angle": 0, + "content": "52 Sakabe, M. & Yagiura, M. An efficient tabu search algorithm for the linear ordering problem. J Adv Mech Des Syst Manuf 16, JAMDSM0041-JAMDSM0041 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.214, + 0.887, + 0.247 + ], + "angle": 0, + "content": "53 Delgado, A. & Thaler, J. Quantum annealing for jet clustering with thrust. Phys Rev D 106 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.249, + 0.887, + 0.283 + ], + "angle": 0, + "content": "54 Mao, Z., Matsuda, Y., Tamura, R. & Tsuda, K. Chemical design with GPU-based Ising machines. Digit Discov 2, 1098-1103 (2023)." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.09, + 0.887, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.859, + 0.937, + 0.882, + 0.951 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.092, + 0.204, + 0.116 + ], + "angle": 0, + "content": "Figures" + }, + { + "type": "image", + "bbox": [ + 0.13, + 0.143, + 0.868, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.131, + 0.435, + 0.866, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.715, + 0.885, + 0.786 + ], + "angle": 0, + "content": "Fig. 1. Performance analysis of classical (IP, SA, SD, TS, PT-ICM, SA-QBSolv, and PT-ICM-QBSolv) and quantum (QA-QBSolv, and HQA) solvers on QUBO problems representing real-world optimization tasks in material science. (a) Relative accuracy and (b) solving time of the solvers." + }, + { + "type": "page_number", + "bbox": [ + 0.86, + 0.937, + 0.883, + 0.952 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.092, + 0.885, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.389, + 0.884, + 0.445 + ], + "angle": 0, + "content": "Fig. 2. The relative accuracy of the classical (IP, SA, SD, TS, and SA-QBSolv) and quantum (QA-QBSolv, and HQA) solvers for given QUBO problems. HQA is the best solver for finding the highest-quality solution for all problem sizes." + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.511, + 0.501, + 0.715 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.51, + 0.871, + 0.715 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.72, + 0.885, + 0.792 + ], + "angle": 0, + "content": "Fig. 3. Solving time of the solvers for given QUBO problems. The solving time of (a) the classical and quantum solvers and (b) the classical solvers (SA, SD, and TS) for small QUBO problems. Quantum solvers do not scale in solving time as the problem size increases, which is a great advantage over classical counterparts." + }, + { + "type": "page_number", + "bbox": [ + 0.86, + 0.937, + 0.885, + 0.952 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.127, + 0.109, + 0.506, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.107, + 0.882, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.312, + 0.884, + 0.364 + ], + "angle": 0, + "content": "Fig. 4. Performance of the QA-QBSolv solver with different decomposition sizes. (a) Relative accuracy and (b) Solving time of the QA-QBSolv solver for given QUBO problems with different sub-QUBO sizes." + }, + { + "type": "page_number", + "bbox": [ + 0.86, + 0.937, + 0.882, + 0.951 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.114, + 0.092, + 0.355, + 0.109 + ], + "angle": 0, + "content": "Supplementary Information" + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.155, + 0.359, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.154, + 0.616, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.156, + 0.854, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.127, + 0.297, + 0.372, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.297, + 0.616, + 0.437 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.621, + 0.297, + 0.862, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.445, + 0.885, + 0.568 + ], + "angle": 0, + "content": "Fig. S1. Comparison of QUBO matrices for real-world optimization and Max-Cut problems. (a-c) QUBO matrices representing the optimization of planar multilayered structures (PMLs) with problem sizes of (a) 100, (b) 500, and (c) 3,000. The dense configurations of these matrices reflect the fully connected nature of interactions in material optimization problems. (d-f) QUBO matrices derived from Max-Cut problem instances in the G-set\\(^{S1}\\): (d) G5, (e) G15, and (f) G40. These matrices exhibit sparse configurations, with relatively few pairwise interactions compared to their maximum possible connections." + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.627, + 0.492, + 0.836 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.626, + 0.885, + 0.836 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.837, + 0.885, + 0.873 + ], + "angle": 0, + "content": "Fig. S2. Example QUBO matrices. The size of the given QUBO problems is (a) 120 and (b) 1,000 with a standard deviation of 0.1." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.333, + 0.133, + 0.693, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.354, + 0.885, + 0.425 + ], + "angle": 0, + "content": "Fig. S3. Time complexity of simulated annealing (SA), steepest descent (SD), and tabu search (TS). This plot is from calculation results based on the theoretical time complexity (see 2-4-2. Computational Time in the main text), so it does not have metrics. The plot agrees well with the solving time plot depicted in Fig. 2b." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.115, + 0.108, + 0.884, + 0.178 + ], + "angle": 0, + "content": "Table S1. Statistical properties of QUBO coefficients for real-world optimization problems. The table summarizes the average (avg) and standard deviation (std) of QUBO coefficients across different problem sizes \\((n)\\). The average values of the coefficients are close to zero, and the standard deviation ranges from 0.2 to 2." + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.193, + 0.892, + 0.263 + ], + "angle": 0, + "content": "
n5010020050010003000500010000
avg0.0025-0.00140.0003-0.00040.00010.00160.00120.0008
std0.24910.74400.80831.33191.50901.95192.03722.0706
" + }, + { + "type": "table_caption", + "bbox": [ + 0.117, + 0.348, + 0.884, + 0.383 + ], + "angle": 0, + "content": "Table S2. Density of Max-Cut problem instances. These instances feature sparse QUBO matrices with a density lower than \\(6\\%\\)." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.398, + 0.87, + 0.653 + ], + "angle": 0, + "content": "
Instances# Nodes# Edges# Maximum EdgesDensity (%)
G580019,176319,6006.0000
G1080019,176319,6006.0000
G158004,661319,6001.4583
G208004,672319,6001.4618
G302,00019,9001,999,0000.9954
G402,00011,7661,999,0000.5885
G503,0006,0004,498,5000.1333
G555,00012,49812,497,5000.1000
G607,00017,14824,496,5000.0700
G7010,0009,99949,995,0000.0200
" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.117, + 0.091, + 0.207, + 0.108 + ], + "angle": 0, + "content": "References" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.125, + 0.726, + 0.144 + ], + "angle": 0, + "content": "S1 Ye, Y. [online] Available: https://web.stanford.edu/~yyyve/yyye/Gset/." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06201/43b7dceb-7067-4bc0-81f9-4f968ea096bb_origin.pdf b/data/2025/2504_06xxx/2504.06201/43b7dceb-7067-4bc0-81f9-4f968ea096bb_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7b17a26438ce082fc59377d211f4697e9fe39a49 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/43b7dceb-7067-4bc0-81f9-4f968ea096bb_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa4f0b122f050cbd662720c507f99bce8d680d96568e25cf3670101ab1f26835 +size 2441168 diff --git a/data/2025/2504_06xxx/2504.06201/full.md b/data/2025/2504_06xxx/2504.06201/full.md new file mode 100644 index 0000000000000000000000000000000000000000..72b6220213c6c5826ba3df2a4d7f8da85f79db30 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/full.md @@ -0,0 +1,301 @@ +# Quantum Annealing for Combinatorial Optimization: A Benchmarking Study + +Authors: Seongmin Kim $^{1,4}$ , Sang-Woo Ahn $^{2}$ , In-Saeng Suh $^{4}$ , Alexander W. Dowling $^{3,*}$ , Eungkyu Lee $^{2,*}$ , and Tengfei Luo $^{1,*}$ + +$^{1}$ Department of Aerospace and Mechanical Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States. +$^{2}$ Department of Electronic Engineering, Kyung Hee University; Yongin-Si, Gyeonggi-do 17104, Republic of Korea. +$^{3}$ Department of Chemical and Biomolecular Engineering, University of Notre Dame; Notre Dame, Indiana 46556. United States. +$^{4}$ National Center for Computational Sciences, Oak Ridge National Laboratory, Oak Ridge, Tennessee 37830, United States. +*Corresponding author. Email: adowling@nd.edu, eleest@khu.ac.kr, and tluo@nd.edu + +Quantum annealing (QA) has the potential to significantly improve solution quality and reduce time complexity in solving combinatorial optimization problems compared to classical optimization methods. However, due to the limited number of qubits and their connectivity, the QA hardware did not show such an advantage over classical methods in past benchmarking studies. Recent advancements in QA with more than 5,000 qubits, enhanced qubit connectivity, and the hybrid architecture promise to realize the quantum advantage. Here, we use a quantum annealer with state-of-the-art techniques and benchmark its performance against classical solvers. To compare their performance, we solve over 50 optimization problem instances represented by large and dense Hamiltonian matrices using quantum and classical solvers. The results demonstrate that a state-of-the-art quantum solver has higher accuracy ( $\sim 0.013\%$ ) and a significantly faster problem-solving time ( $\sim 6,561\times$ ) than the best classical solver. Our results highlight the advantages of leveraging QA over classical counterparts, particularly in hybrid configurations, for achieving high accuracy and substantially reduced problem solving time in large-scale real-world optimization problems. + +Keywords: quantum advantage, quantum-classical hybrid algorithm, quantum annealing, combinatorial optimization, benchmarking study + +# Introduction + +Quantum computers mark a paradigm shift to tackle challenging tasks that classical computers cannot solve in a practical timescale $^{1,2}$ . The quantum annealer is a special quantum computer designed to solve combinatorial optimization problems with problem size-independent time complexity $^{3-5}$ . This unique quantum annealing (QA) capability is based on the so-called adiabatic process $^{6,7}$ . During this process, entangled qubits naturally evolve into the ground state of a given Hamiltonian to find the optimal vector of binary decisions for the corresponding quadratic unconstrained binary optimization (QUBO) problem $^{8-10}$ . The adiabatic theorem of quantum mechanics ensures that QA identifies the optimal solution regardless of the size and landscape of + +the combinatorial parametric space, highlighting QA as a powerful and practical solver $^{11-14}$ . The ability to efficiently explore high-dimensional combinational spaces makes QA capable of handling a wide range of optimization tasks $^{4,5,10,15,16}$ . + +The potential merit of QA motivates the systematic comparison with classical counterparts (e.g., simulated annealing, integer programming, steepest descent method, tabu search, and parallel tempering with isoenergetic cluster moves), focusing on the solution quality and the time complexity. While previous benchmarking studies showed some advantages of QA, most used low-dimensional or the sparse configuration of QUBO matrices due to the lack of available qubits in the QA hardware and poor topology to connect qubits $^{17-19}$ . For example, O'Malley et al. $^{17}$ compared the performance of QA with classical methods (mathematical programming), but they limited the number of binary variables to 35 due to the QA hardware limitation. Similarly, Tasseff et al. $^{18}$ highlighted the potential advantages of QA compared to classical methods (such as simulated annealing, integer programming, and Markov chain Monte Carlo) for sparse optimization problems containing up to 5,000 decision variables and 40,000 quadratic terms. Haba et al. $^{19}$ demonstrated that a classical solver (integer programming) could be faster than QA for small problems, e.g., $\sim 100$ decision variables. Consequently, these benchmarking studies show that QA methods and their classical counterparts can exhibit similar solution quality and time complexity. However, such low-dimensional or sparse QUBOs considered in the previous benchmarking studies are challenging to map to a wide range of practical problems, which usually require high-dimensional and dense configuration of QUBO matrices $^{4,5,10,20}$ . For example, in our previous QA optimization of one-dimensional and two-dimensional optical metamaterials, the QUBO matrices exhibit these properties (Fig. S1) $^{4,5,16,20}$ . + +The state-of-the-art QA hardware (D-Wave Advantage System) features more than 5,000 qubits, advanced topology to connect qubits, and efficient hybrid algorithms (e.g., Leap Hybrid sampler). For example, the recent development (e.g., Pegasus topology) has increased qubit connectivity from 6 to $15^{21-23}$ . Improved qubit connectivity reduces the need for complex embedding processes, which map problem variables to physical qubits on the hardware. With better connectivity, such as in D-Wave's Pegasus topology, the embedding process becomes more efficient and can better preserve the structure of dense optimization problems. This enhancement allows the quantum annealer to increase the potential for finding high-quality solutions[24,25]. In addition, a QUBO decomposition algorithm (i.e., QBSolv) splits a large QUBO matrix into small pieces of subQUBO matrices, allowing us to handle a QUBO matrix with dimensions higher than the maximum number of qubits in the QA hardware[26,27]. Given these advancements, it is imperative to study the performance of the state-of-the-art QA system for high-dimensional and dense configuration of QUBO matrices, and systemically compare solution quality and the time complexity with the classical counterparts. + +In this work, we benchmark the performance of quantum solvers against classical algorithms in solving QUBO problems with large and dense configurations to represent real-world optimization problems. We analyze the solution quality and the required time to solve these benchmark problems using several quantum and classical solvers. This benchmarking study provides important insights into employing QA in practical problem-solving scenarios. + +# Results + +We present a benchmarking study on combinatorial optimization problems representing real-world scenarios, e.g., materials design, characterized by dense and large QUBO matrices (Fig. S1). These problems are non-convex and exhibit a highly complex energy landscape, making it challenging and time-consuming to identify accurate solutions. Classical solvers, such as integer programming (IP), simulated annealing (SA), steepest descent (SD), tabu search (TS), parallel tempering with isoenergetic cluster moves (PT-ICM), perform well for small-scale problems. However, they are often relatively inaccurate for larger problems (problem size $\geq 1,000$ ; Fig. 1a). In particular, SD and TS show low relative accuracy compared to other solvers. The combination of PT and ICM leverages the strengths of both techniques: PT facilitates crossing energy barriers, while ICM ensures exploration of the solution space, effectively covering broad and diverse regions. This makes PT-ICM particularly effective for exploring complex optimization spaces and enhancing convergence toward the global optimum[46,47]. However, the performance of PT-ICM can be problem-dependent[48]. While it can work well for sparse problems, its effectiveness decreases for denser problems[46]. Consequently, although SA, and PT-ICM perform better than SD and TS, they also fail to find high-quality solutions for large-scale problems. + +To address these limitations, QUBO decomposition strategies can be employed to improve the relative accuracy. For example, integrating QUBO decomposition with classical solvers (e.g., SA-QBSolv and PT-ICM-QBSolv) improves their performance. Nonetheless, these approaches often remain insufficient for handling massive problems effectively, particularly considering problem-solving time (Fig. 1b), which will be further discussed in the following. On the other hand, quantum solvers provide excellent performance for solving these dense and large-scale problems representing real-world optimization scenarios. Although QA can perform excellently for small problems, it has difficulty solving large and dense QUBOs due to the limited number of qubits $(5,000+)$ and connectivity (15). Several prior studies reported that QA may not be efficient since it cannot effectively handle dense and large QUBOs due to hardware limitations[23,53,54]. However, when it runs with the QUBO decomposition strategy (i.e., QA-QBSolv), large-scale problems ( $n \geq 100$ ) can be effectively handled. Furthermore, hybrid QA (HQA), which integrates quantum and classical approaches, also can solve large-scale problems efficiently. As a result, the quantum solvers consistently identify high-quality solutions across all problem sizes (Fig. 1a). + +Computational time is also a critical metric for evaluating solver performance. Classical solvers exhibit rapidly increasing solving times as problem sizes grow, making them impractical for large-scale combinatorial optimization problems (Fig. 1b). While SD and TS are faster than other classical solvers, their relative accuracies are low, as can be seen in Fig. 1a. It is worth noting that the SA, and PT-ICM solvers struggle to handle problems with more than 3,000 variables due to excessively long solving time or computational constraints (e.g., memory limits). Although the IP solver is faster than SA and PT-ICM, its solving time increases greatly with problem size. The QUBO decomposition strategy significantly reduces computational time, yet quantum solvers remain faster than their classical counterparts across all problem sizes. For instance, for a problem size of 5,000, the solving time for HQA is $0.0854\mathrm{s}$ and for QA-QBSolv is $74.59\mathrm{s}$ , compared to $167.4\mathrm{s}$ and $195.1\mathrm{s}$ for SA-QBSolv and PT-ICM-QBSolv, respectively, highlighting superior efficiency of the quantum solvers. + +To further evaluate scalability, we conduct a systematic benchmarking study on QUBO problems (size: up to 10,000 variables), designed to mimic real-world scenarios through randomly generated elements. PT-ICM is excluded from this analysis due to excessive solving times compared to other solvers (Fig. 1b). As shown in Fig. 2, classical solvers (IP, SA, SD, and TS) are accurate for smaller problems but become inaccurate as the problem size increases. Consistent with the results in Fig. 1, the SD and TS solvers exhibit low relative accuracy even for a relatively small problem (e.g., 2,000). IP and SA are more accurate than SD and TS but fail to identify the optimal state for large problems. It is known that IP can provide global optimality guarantees $^{40}$ , but our study highlights that proving a solution is globally optimal is challenging for large and dense problems. For example, in one case ( $n = 7,000$ ), the optimality gap remains as large as $\sim 17.73\%$ , where the best bound is -19,660 while the solution obtained from the IP solver is -16,700, with the optimality gap not narrowing even after 2 hours of runtime. The relative accuracy can be improved by employing the QUBO decomposition strategy (e.g., SA-QBSolv), yet it still fails to identify high-quality solutions for problem sizes exceeding 4,000. In contrast, quantum solvers demonstrate superior accuracy for large-scale problems. Notably, the HQA solver consistently outperforms all other methods, reliably identifying the best solution regardless of problem size (Fig. 2). + +Fig. 3a shows that the solving time rapidly increases as the problem size increases for the classical solvers, indicating that solving combinatorial optimization problems with classical solvers can become intractable for large-size problems (Fig. 3b). The solving time trends with increasing problem size agree well with the theoretical time complexities of the classical solvers (Fig. 3b and Fig. S3, see 2-4-2. Computational Time section). While the IP solver can be faster than other classical solvers, it also requires significant time for large problems (e.g., $n > 5,000$ ). The use of the QUBO decomposition strategy dramatically reduces the solving time, but the quantum solvers consistently outpace classical counterparts (Fig. 3a). For example, the solving time ( $n = 10,000$ ) is $0.0855$ s for HQA, $101$ s for QA-QBSolv, and $561$ s for SA-QBSolv. + +Decomposing a large QUBO into smaller pieces leads to a higher relative accuracy, as a solver can find better solutions for each decomposed QUBOs, mitigating the current hardware limitations. Note that the accuracy of QA for QUBOs with problem sizes of 30 and 100 is, respectively, 1.0 and 0.9956 (without leveraging the QUBO decomposition method). Hence, the accuracy of QA-QBSolv with a sub-QUBO size of 30 is higher than that with a sub-QUBO size of 100, as decomposed QUBOs with a smaller size fit the QA hardware better (Fig. 4a). However, a smaller sub-QUBO size results in a greater number of sub-QUBOs after decomposition, leading to increased time required to solve all decomposed problems (Fig. 4b). It is noted that the QA-QBSolv solver does not guarantee finding the best solution for large problems (size $>4,000$ ), resulting in lower accuracies regardless of sub-QUBO sizes, as can be seen in Fig. 2 and Fig. 4a. + +Our results show that HQA, which incorporates QA with classical algorithms to overcome the current quantum hardware limitations, is currently the most efficient solver for complex real-world problems that require the formulation of dense and large QUBOs. In this context, we define "Quantum Advantage" as the ability of a quantum-enhanced solver to achieve high accuracy and significantly faster problem-solving time compared to the classical solvers for large-scale optimization problems. Our findings suggest that leveraging quantum resources, particularly in hybrid configurations, can provide a computational advantage over classical approaches. Besides, as the current state of HQA demonstrates, we expect QA will have much higher accuracy and + +require much shorter time to solve QUBO problems with the development of the quantum hardware with more qubits and better qubit connectivity. + +# Discussion + +This work comprehensively compares state-of-the-art QA hardware and software against several classical optimization solvers for large and dense QUBO problems (up to 10,000 variables, fully connected interactions). The classical solvers struggled to solve large-scale problems, but their performance can be improved when combined with the QUBO decomposition method (i.e., QBSolv). Nevertheless, they become inaccurate and inefficient with increasing problem size, indicating that classical methods can face challenges for complex real-world problems represented by large and dense QUBO matrices. On the contrary, HQA performs significantly better than its classical counterparts, exhibiting the highest accuracy ( $\sim 0.013\%$ improvement) and shortest time to obtain solutions ( $\sim 6,561 \times$ acceleration) for 10,000 dimensional QUBO problems, demonstrating 'Quantum Advantage' for large and dense QUBO problems. Pure QA and QA with the QUBO decomposition method still exhibit limitations in solving large problems due to the current QA hardware limitations (e.g., number of qubits and qubit connectivity). However, we anticipate that QA will eventually reach the efficiency of HQA with the ongoing development of the quantum hardware. Thus, we expect QA to demonstrate true 'Quantum Advantage' in the future. + +# Methods + +# Definition of a QUBO + +QA hardware is designed to efficiently solve combinatorial optimization problems that are formulated with a QUBO matrix, which can be given by $^{28,29}$ : + +$$ +y = \sum_ {i = 1} ^ {n} \sum_ {j = i} ^ {n} Q _ {i, j} x _ {i} x _ {j} \tag {1} +$$ + +where $Q_{i,j}$ is the $i$ -th row and $j$ -th column real-number element of the QUBO matrix $(\mathbf{Q})$ , which is an $n \times n$ Hermitian, i.e., $\mathbf{Q} \in \mathbb{R}^{n \times n}$ , and $x_i$ is the $i$ -th element of a binary vector $\mathbf{x}$ with a length of $n$ , i.e., $\mathbf{x} \in [0,1^n]$ . $Q_{i,j}$ is often referred to as a linear coefficient for $i = j$ and a quadratic interaction coefficient for $i \neq j$ . The objective of QA is to identify the optimal binary vector of a given QUBO, which minimizes the scalar output $y$ as29: + +$$ +\boldsymbol {x} ^ {*} = \underset {x} {\operatorname {a r g m i n}} y \tag {2} +$$ + +In optimization problems, the linear coefficients correspond to cost or benefit terms associated with individual variables, while the quadratic coefficients represent interaction terms or dependencies between pairs of variables. These coefficients can be learned using machine learning models, such as the factorization machine (FM), trained on datasets containing input structures and their corresponding performance metrics. By mapping these learned coefficients into a QUBO formulation, we effectively represent an energy function of a material system or other real-world + +optimization problem. This QUBO then describes the optimization space, enabling the identification of the optimal state with the best performance $^{30,31}$ . + +# Methods to Solve a QUBO + +Various methods have been proposed to solve QUBO problems. For our benchmarking study, we consider seven representative methods: QA, hybrid QA (HQA), integer programming (IP), simulated annealing (SA), steepest descent (SD), tabu search (TS), parallel tempering with isoenergetic cluster moves (PT-ICM). Below, we provide a brief introduction to each of the solvers used in solving combinatorial optimization problems: + +# Quantum Annealing and Hybrid Quantum Annealing + +QA starts with a superposition state for all qubits, which has the lowest energy state of the initial Hamiltonian $(H_0)$ . In the annealing process, the system evolves toward the lowest energy state of the final Hamiltonian (also called a problem Hamiltonian, $H_{p}$ ) by minimizing the influence of the initial Hamiltonian. The measured state at the end of the annealing is supposed to be the ground state of $H_{p}$ , which can be expressed as the following equation32,33: + +$$ +H (t / t _ {a}) = A (t / t _ {a}) H _ {0} + B (t / t _ {a}) H _ {p} \tag {3} +$$ + +Here, $t$ is the elapsed annealing time, and $t_a$ is the total annealing time. Equation (3) evolves from $A(t / t_a) = 1$ , $B(t / t_a) \approx 0$ at the beginning of the annealing $(t / t_a = 0)$ to $A(t / t_a) \approx 0$ , $B(t / t_a) = 1$ at the end of the annealing $(t / t_a = 1)$ . Sufficiently slow evolution from $H_0$ to $H_p$ enables the quantum system to stay at the ground state, which leads to the identification of the optimal solution of a given combinatorial optimization problem3,34. We use D-Wave Systems' quantum annealer (Advantage 4.1) to solve the problems using QA, and we set the number of reads for QA to 1,000 with a total annealing time of $20~\mu s$ . We select the best solution corresponding to the lowest energy state found among 1,000 reads. + +The D-Wave Ocean software development kit (SDK, ver. 3.3.0) provides many useful libraries, which include quantum or classical samplers such as the QA, HQA, SA, SD, and TS. They allow us to solve QUBO problems $^{22,35,36}$ . We employ these samplers, which are implemented in the D-wave Ocean SDK, for the benchmarking study. Classical or QA solvers often benefit from decomposition algorithms to identify a high-quality solution (i.e., an optimal solution or a good solution close to the global optimum) for large QUBO problems. Hence, the decomposition of a QUBO matrix into sub-QUBOs is very useful when the size of QUBO matrix is larger than the physical volume of a sampler (i.e., QUBO size > physical number of qubits in QA or memory capacity of a classical computer). We employ the QBSolv package implemented in D-wave Ocean SDK for QUBO decomposition. The QBSolv splits a QUBO matrix into smaller QUBO matrices, and each of them is sequentially solved by classical or QA solvers. This algorithm enables us to handle a wide range of complex real-world problems $^{21,22,37}$ . The size of the decomposed QUBOs is set to 30 unless otherwise specified. HQA (Leap Hybrid solver), developed by D-Wave systems, also decomposes large QUBO into smaller subproblems well-suited for QA's QPU, and then aggregates the results $^{27,38}$ . The detailed algorithm of HQA, however, is not publicly released. We utilize a D-Wave sampler (dwave-system 1.4.0) for SA, SD, and TS with a specified number of reads (1,000) and default settings for other parameters. Furthermore, we employ D-Wave hybrid framework for PT-ICM. + +# Integer Programming + +IP uses branch-and-bound, cutting planes, and other methods to search the solution space for optimal integer decisions and prove global optimality within a tolerance (gap). We use Gurobi (version 10.0.2)39 for benchmarking with the default settings (0.1% global optimality gap) plus a two-hour time limit and 240 GB software memory limit per optimization problem. The benchmark QUBO problem is implemented in the Pyomo modeling environment (version 6.6.2)40. We also experimented with a large gap and observed the first identified integer solution often had a poor objective function value. These results are not further reported for brevity. + +# Simulated Annealing + +SA, which is inspired by the annealing process in metallurgy, is a probabilistic optimization algorithm designed to approximate a global optimum of a given objective function. It is considered a metaheuristic method, which can be applied to a wide range of optimization problems $^{41,42}$ . In SA, temperature and cooling schedule are major factors that determine how extensively the algorithm explores the solution space $^{43}$ . This algorithm often identifies near-optimal solutions but cannot guarantee that local or global optimality conditions are satisfied. For SA, the hyperparameters are configured as follows: 1,000 reads, 1,000 sweeps, a 'random' initial state generation, and a 'geometric' temperature schedule. + +# Steepest Descent + +SD operates by employing variable flips to reduce the energy of a given QUBO through local minimization computations rather than relying on a calculated gradient in a traditional gradient descent algorithm $^{44}$ . This algorithm is computationally inexpensive and beneficial for local refinement; thus, it can be used to search for local optima. In our benchmarking study, SD utilizes hyperparameters set to 1,000 reads and a 'random' strategy for initial state generation. + +# Tabu Search + +TS is designed to solve combinatorial and discrete optimization problems by using memory to guide the search for better solutions, as introduced by Glover $^{45}$ . This algorithm can escape already visited local minima by remembering those points (called 'Tabu List' to keep track of moves during the search), aiming to identify high-quality solutions in a large solution space. This algorithm works well for combinatorial optimization problems with small search spaces. + +However, it can be hard to evaluate neighboring solutions and to maintain and update the Tabu List with increasing problem sizes. The hyperparameter settings for TS are as follows: 1,000 reads, a timeout of $100\mathrm{ms}$ , and 'random' initial state generation. + +# Parallel Tempering with Isoenergetic Cluster Moves (PT-ICM) + +PT-ICM is an advanced Monte Carlo method designed to navigate optimization space, such as QUBO problems $^{46-48}$ . PT operates by maintaining multiple replicas of the system at different temperatures and allowing exchanges between replicas based on a Metropolis criterion. This approach helps lower-temperature replicas escape local minima with the aid of higher-temperature replicas. ICM identifies clusters of variables that can flip without changing the system's energy $^{46}$ . In this study, the hyperparameters for PT-ICM are set as follows: the number of sweeps is 1,000, the number of replicas is 10, and the number of iterations is 10. + +# Benchmarking Problems + +# Real-world problems + +Material optimization is selected to represent real-world problems, with the design of planar multilayers (PMLs) optical film as a testbed for benchmarking. PMLs can be seen in many applications. For example, they have been explored for transparent radiative cooling windows to address global warming by emitting thermal radiation through the atmospheric window ( $8\mu \mathrm{m} < \lambda < 13\mu \mathrm{m}$ )4, while transmitting visible photons. PMLs consist of layers with one of four dielectric materials: silicon dioxide, silicon nitride, aluminum oxide, and titanium dioxide. The configuration of these layers can be expressed as a binary vector, where each layer is assigned a two-digit binary label. Optical characteristics and corresponding figure-of-merit (FOM) of the PML can be calculated by solving Maxwell's equations using the transfer matrix method (TMM). To formulate QUBOs, layer configurations (input binary vectors) and their FOMs (outputs) are used to train the FM model. FM learns the linear and quadratic coefficients, effectively modeling the optimization landscape of the material system. QUBO matrices are then generated using these coefficients30,31. PML configurations are randomly generated for training datasets, and their FOMs are calculated using TMM. The resulting QUBO matrices represent real-world materials optimization problems, characterized by highly dense (fully connected) configurations (Fig. S1), which are used for the benchmarking study in Fig. 1. + +# Benchmarking problems + +We formulate QUBO matrices with random elements to further systematically explore scalability (Fig. 2 and Fig. 3), following the characteristics of QUBOs from real-world problems, for the benchmarking study as the following: + +- Problem size: The problem size, corresponding to the length of a binary vector $(n)$ , varies from 120 to 10,000 (120, 200, 500, 1,000, 1,500, 2,000, 2,500, 3,000, 4,000, 5,000, 6,000, 7,000, 8,000, 9,000 and 10,000). +- Distribution of elements: For each problem size, four QUBO matrices with different distributions of elements are studied. These elements are random numbers with a mean value of 0 and standard deviations of 0.001, 0.01, 0.1, or 1. These distributions reflect the variability observed in QUBO coefficients derived from real-world problems (Table S1). A QUBO configured with elements having a large deviation yields a significant variation in the energy landscape, potentially resulting in high energy barriers that must be overcome to find the ground state. +- Density of matrices: The density of QUBO matrices reflects the proportion of pairwise interactions among variables relative to the maximum possible interactions. Fully connected QUBOs, such as those derived from real-world problems, represent cases where all variables interact with each other. For example, in layered photonic structures, each layer interacts with every other layer, influencing optical responses, which leads to a fully connected QUBO. In contrast, Max-Cut problems typically result in sparse QUBOs, where only a subset of variables (nodes) interact through edges. The maximum number of interaction coefficients (i.e., the number of edges in Max-Cut problems) is $nC_2$ , where $n$ denotes the problem size. The density of a QUBO can be calculated as: + +$$ +\text {d e n s i t y} = \frac {\text {n u m b e r o f i n t e r a c t i o n c o e f f i c i e n t s}}{\text {m a x i m u m n u m b e r o f i n t e r a c t i o n c o e f f i c i e n t s}} \tag {4} +$$ + +For example, a benchmark problem instance (G10) with 800 nodes and 19,176 edges has a density of $6\%$ , calculated as: density $= 19,176 / 319,600 = 0.06$ . The density of Max-Cut problems can be adjusted by changing the number of edges, with typical instances having densities ranging from $0.02\%$ to $6\%$ (Fig. S1, Table S2). In contrast, real-world problems feature fully connected configurations, corresponding to a density of $100\%$ . QUBOs for this benchmarking study have dense matrices fully filled with real-number elements in the upper triangular part (i.e., fully connected graph nodes, Fig. S2). This configuration aims to approximate real-world optimization problems, which usually requires a dense QUBO matrix4,28. + +# Performance Metrics: Relative Accuracy and Computational Time + +# Relative Accuracy + +For small-scale problems, brute-force search guarantees the identification of the global optimum by evaluating all possible solutions. However, this approach becomes infeasible for large-scale problems due to the exponential growth of the search space. The IP solver, such as Gurobi, utilizes the branch-and-bound method to efficiently explore the solution space and prove global optimality within an optimality gap. However, due to computational limitations or time constraints, IP may struggle to find the global optimum for large-scale problems. To address this challenge in our benchmarking study, we employ a 'Relative Accuracy' metric to compare the relative performance of different solvers. Relative accuracy is defined as the ratio of a solver's objective value to the best objective found across all solvers: + +$$ +\text {R e l a t i v e A c c u r a c y} = \text {S o l u t i o n} _ {\text {s o l v e r}} / \text {S o l u t i o n} _ {\text {b e s t}} \tag {5} +$$ + +This metric provides a way to evaluate the solution quality when the global optimum cannot be definitively found or proven for large-scale problem instances. Note that the best solution is the lowest value among the solutions obtained from all solvers since the solvers are designed to find the lowest energy state (generally negative values for the QUBOs used in this study). The relative accuracies of the solvers are plotted as a function of problem sizes. In Fig. 1, the relative accuracy represents the average value calculated from three different QUBOs that represent material optimization, and in Fig. 2, it represents the average from four different QUBOs with varying standard deviations for each problem size (ranging from 120 to 10,000). Error bars on the plot represent the standard deviation of accuracies calculated from the four different QUBOs for each problem size, relative to the average values. By definition, the relative accuracy is 1.0 when the solver finds a solution with the best-known objective function value (equation 5). + +# Computational Time + +Computational time is another important factor in determining the solvers' performance. Combinatorial optimization problems are considered NP-hard, so increasing problem sizes can lead to an explosion of search space, posing challenges in optimization processes. We measure the computational time dedicated solely to solving given problems, excluding problem reading time, queue time, or communication time between the local computer and quantum annealer. This is consistent with other benchmarking studies[17,18]. For problems solved on D-Wave systems' QPU for QA, the execution time includes programming and sampling times (anneal, readout, and delay time). QPU access time is calculated for all of them after programmed anneal-read cycles, corresponding to the time charged to users in their allocations, which is used as the computational time for QA and HQA. Classical solvers (SA, SD, TS, and PT-ICM) run on a workstation (AMD + +Ryzen Threadripper PRO 3975WX @ 3.5 GHz processor with 32 cores and 32GB of RAM), and IP (Gurobi) run on a cluster node (an Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz processor with 24 cores and 256 GB of RAM). Problem reading time can be significant when the problem size is large, but it is excluded from the computational time consideration. We measure the time solely taken to solve given problems with classical solvers. In Fig. 1b and Fig. 3, the solution time for classical and quantum solvers is presented as a function of problem sizes. Note that a QUBO problem is NP-hard $^{49}$ . Evaluating the energy of a given solution has a computational cost of $O(n^{2})$ , where $n$ (= problem size) is the number of variables. The number of reads or sweeps does not scale with $n$ , but the cost for each sweep scales as $O(n)$ for SA. Consequently, the theoretical time complexities of the classical solvers are known as $O(n^{3})$ for SA $^{50}$ , $O(n^{2})$ for SD $^{51}$ , and $O(n^{2})$ for TS $^{52}$ . On the other hand, the theoretical time complexity of the quantum solvers can be considered constant. + +# Data availability + +All data generated and analyzed during the study are available from the corresponding author upon reasonable request. + +# Code availability + +The codes used for generating and analyzing data are available from the corresponding author upon reasonable request. + +# Acknowledgements + +This research used resources of the Oak Ridge Leadership Computing Facility at the Oak Ridge National Laboratory, which is supported by the Office of Science of the U.S. Department of Energy under Contract No. DE-AC05-00OR22725. This research was supported by the Quantum Computing Based on Quantum Advantage Challenge Research (RS-2023-00255442) through the National Research Foundation of Korea (NRF) funded by the Korean Government (Ministry of Science and ICT(MSIT)). + +# Author information + +Authors and Affiliations + +Department of Aerospace and Mechanical Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States. + +Seongmin Kim & Tengfei Luo + +Department of Electronic Engineering, Kyung Hee University; Yongin-Si, Gyeonggi-do 17104, Republic of Korea. + +Sangwoo Ahn & Eungkyu Lee + +Department of Chemical and Biomolecular Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States. + +# Alexander Dowling + +# National Center for Computational Sciences, Oak Ridge National Laboratory, Oak Ridge, Tennessee 37830, United States. + +Seongmin Kim & In-Saeng Suh + +# Contributions + +S.K., A.D., E.L., and T.L. conceived the idea. S.K. and S.A. performed benchmarking studies to generate data. A.D. and S.K. implemented the IP benchmark. S.K. analyzed the data with advice from I.S., A.D., E.L., and T.L. All authors discussed the results and contributed to the writing of the manuscript. + +# Corresponding authors + +Correspondence to Alexander W. Dowling, Eungkyu Lee, or Tengfei Luo. + +# Ethics declarations + +# Competing Interests + +The authors declare no competing interests. + +# Reference + +1 Arute, F. et al. Quantum supremacy using a programmable superconducting processor. Nature 574, 505-510 (2019). +2 Daley, A. J. et al. Practical quantum advantage in quantum simulation. Nature 607, 667-676 (2022). +3 Johnson, M. W. et al. Quantum annealing with manufactured spins. Nature 473, 194-198 (2011). +4 Kim, S. et al. High-Performance Transparent Radiative Cooler Designed by Quantum Computing. ACS Energy Lett 7, 4134-4141 (2022). +5 Kim, S., Jung, S., Bobbitt, A., Lee, E. & Luo, T. Wide-angle spectral filter for energy-saving windows designed by quantum annealing-enhanced active learning. Cell Rep Phys Sci (2024). +6 Li, R. Y., Di Felice, R., Rohs, R. & Lidar, D. A. Quantum annealing versus classical machine learning applied to a simplified computational biology problem. npj Quantum Inf 4 (2018). +7 Vinci, W., Albash, T. & Lidar, D. A. Nested quantum annealing correction. npj Quantum Inf 2 (2016). +8 Santoro, G. E. & Tosatti, E. Optimization using quantum mechanics: quantum annealing through adiabatic evolution. J Phys A: Math Gen 39, R393-R431 (2006). +9 Mandra, S., Zhu, Z. & Katzgraber, H. G. Exponentially Biased Ground-State Sampling of Quantum Annealing Machines with Transverse-Field Driving Hamiltonians. Phys Rev Lett 118, 070502 (2017). +10 Kitai, K. et al. Designing metamaterials with quantum annealing and factorization machines. Phys Rev Res 2, 013319 (2020). + +11 Santoro, G. E., Marton a'k, R., Tosatti, E. & Car, R. Theory of Quantum Annealing of an Ising Spin Glass. Science 295, 2427-2430 (2002). +12 Hen, I. & Spedalieri, F. M. Quantum Annealing for Constrained Optimization. Phys Rev Appl 5 (2016). +13 Kadowaki, T. & Nishimori, H. Quantum annealing in the transverse Ising model. Phys Rev E 58, 5355-5363 (1998). +14 Morita, S. & Nishimori, H. Mathematical foundation of quantum annealing J Math Phys 49, 125210 (2008). +15 Wilson, B. A. et al. Machine learning framework for quantum sampling of highly constrained, continuous optimization problems. Appl Phys Rev 8, 041418 (2021). +16 Kim, S., Wu, S., Jian, R., Xiong, G. & Luo, T. Design of a High-Performance Titanium Nitride Metastructure-Based Solar Absorber Using Quantum Computing-Assisted Optimization. ACS Appl Mater Interfaces 15, 40606-40613 (2023). +17 O'Malley, D., Vesselinov, V. V., Alexandrov, B. S. & Alexandrov, L. B. Nonnegative/Binary matrix factorization with a D-Wave quantum annealer. PLoS One 13, e0206653 (2018). +18 Tasseff, B. et al. On the Emerging Potential of Quantum Annealing Hardware for Combinatorial Optimization. arXiv:2210.04291 (2022). +19 Hab, R., Ohzeki, M. & Tanaka, K. Travel time optimization on multi-AGV routing by reverse annealing. Sci Rep 12, 17753 (2022). +20 Kim, S. et al. Quantum annealing-aided design of an ultrathin-metamaterial optical diode. Nano Converg 11, 16 (2024). +21 Pelofske, E., Hahn, G. & Djidjev, H. N. Noise dynamics of quantum annealers: estimating the effective noise using idle qubits. Quantum Sci Technol 8 (2023). +22 Yoneda, Y., Shimada, M., Yoshida, A. & Shirakashi, J.-i. Searching for optimal experimental parameters with D-Wave quantum annealer for fabrication of Au atomic junctions. Appl Phys Exp 16 (2023). +23 Willsch, D. et al. Benchmarking Advantage and D-Wave 2000Q quantum annealers with exact cover problems. Quantum Inf Process 21 (2022). +24 Yarkoni, S., Raponi, E., Back, T. & Schmitt, S. Quantum annealing for industry applications: introduction and review. Rep Prog Phys 85 (2022). +25 Kasi, S., Warburton, P., Kaewell, J. & Jamieson, K. A Cost and Power Feasibility Analysis of Quantum Annealing for NextG Cellular Wireless Networks. IEEE Transactions on Quantum Engineering 4, 1-17 (2023). +26 Teplukhin, A., Kendrick, B. K. & Babikov, D. Solving complex eigenvalue problems on a quantum annealer with applications to quantum scattering resonances. Phys Chem Chem Phys 22, 26136-26144 (2020). +27 Atobe, Y., Tawada, M. & Togawa, N. Hybrid Annealing Method Based on subQUBO Model Extraction With Multiple Solution Instances. IEEE Trans Comput 71, 2606-2619 (2022). +28 Zaman, M., Tanahashi, K. & Tanaka, S. PyQUBO: Python Library for Mapping Combinatorial Optimization Problems to QUBO Form. IEEE Trans Comput 71, 838-850 (2022). +29 Tao, M. et al. in IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW) 557-566 (2020). + +30 Kim, S. et al. A review on machine learning-guided design of energy materials. Progress in Energy 6 (2024). +31 Kim, S., Luo, T., Lee, E. & Suh, I.-S. Distributed Quantum Approximate Optimization Algorithm on Integrated High-Performance Computing and Quantum Computing Systems for Large-Scale Optimization. arXiv:2407.20212 (2024). +32 Gemeinhardt, F., Garmendia, A., Wimmer, M., Weder, B. & Leymann, F. Quantum Combinatorial Optimization in the NISQ Era: A Systematic Mapping Study. ACM Comput Surv 56, 1-36 (2023). +33 Willsch, M., Willsch, D., Jin, F., De Raedt, H. & Michielsen, K. Benchmarking the quantum approximate optimization algorithm. Quantum Inf Process 19 (2020). +34 Hauke, P., Katzgraber, H. G., Lechner, W., Nishimori, H. & Oliver, W. D. Perspectives of quantum annealing: methods and implementations. Rep Prog Phys 83, 054401 (2020). +35 Carugno, C., Ferrari Dacrema, M. & Cremonesi, P. Evaluating the job shop scheduling problem on a D-wave quantum annealer. Sci Rep 12, 6539 (2022). +36 Irie, H., Liang, H., Doi, T., Gongyo, S. & Hatsuda, T. Hybrid quantum annealing via molecular dynamics. Sci Rep 11, 8426 (2021). +37 Raymond, J. et al. Hybrid Quantum Annealing for Larger-than-QPU Lattice-structured Problems. ACM Transactions on Quantum Computing 4, 1-30 (2023). +38 Ceselli, A. & Premoli, M. On good encodings for quantum annealer and digital optimization solvers. Sci Rep 13, 5628 (2023). +39 Song, J., Lanka, R., Yue, Y. & Dilkina, B. A General Large Neighborhood Search Framework for Solving Integer Linear Programs. 34th Conference on Neural Information Processing Systems (NeurIPS 2020) (2020). +40 Bynum, M. L. et al. Pyomo — Optimization Modeling in Python, 3rd edition. Springer Optimization and Its Applications 67 (2021). +41 Alnowibet, K. A., Mahdi, S., El-Alem, M., Abdelawwad, M. & Mohamed, A. W. Guided Hybrid Modified Simulated Annealing Algorithm for Solving Constrained Global Optimization Problems. Mathematics 10 (2022). +42 Rere, L. M. R., Fanany, M. I. & Arymurthy, A. M. Simulated Annealing Algorithm for Deep Learning. Procedia Comput Sci 72, 137-144 (2015). +43 Gonzales, G. V. et al. A comparison of simulated annealing schedules for constructable design of complex cavities intruded into conductive walls with internal heat generation. Energy 93, 372-382 (2015). +44 Wadayama, T. et al. Gradient descent bit flipping algorithms for decoding LDPC codes. IEEE Trans Communi 58, 1610-1614 (2010). +45 Glover, F., Laguna, M. & Marti', R. Principles of Tabu Search. Handbook of Approximation Algorithms and Metaheuristics 23 (2007). +46 Aramon, M. et al. Physics-Inspired Optimization for Quadratic Unconstrained Problems Using a Digital Annealer. Frontiers in Physics 7 (2019). +47 Zhu, Z., Ochoa, A. J. & Katzgraber, H. G. Fair sampling of ground-state configurations of binary optimization problems. arXiv:1903.07600 (2019). +48 Mandrà, S. & Katzgraber, H. G. A deceptive step towards quantum speedup detection. Quantum Science and Technology 3 (2018). +49 Yasuoka, H. Computational Complexity of Quadratic Unconstrained Binary Optimization. arXiv:2109.10048 (2022). + +50 Hansen, P. B. Simulated Annealing. Electrical Engineering and Computer Science Technical Reports 170 (1992). +51 Dupin, N., Nielsen, F. & Talbi, E. Dynamic Programming heuristic for k-means Clustering among a 2-dimensional Pareto Frontier. 7th Internat. Conf. on Metaheuristics and Nature Inspired Computing (2018). +52 Sakabe, M. & Yagiura, M. An efficient tabu search algorithm for the linear ordering problem. J Adv Mech Des Syst Manuf 16, JAMDSM0041-JAMDSM0041 (2022). +53 Delgado, A. & Thaler, J. Quantum annealing for jet clustering with thrust. Phys Rev D 106 (2022). +54 Mao, Z., Matsuda, Y., Tamura, R. & Tsuda, K. Chemical design with GPU-based Ising machines. Digit Discov 2, 1098-1103 (2023). + +![](images/a8cb85a9316795dc6f82454dfcbe57b04e7e91a649d20ea1acb66f8e645c0d90.jpg) +Figures + +![](images/691843c0ba4b3d84d880dd78aac48d6de4dedbd2f73f05e2d97a542a22d9e6ad.jpg) +Fig. 1. Performance analysis of classical (IP, SA, SD, TS, PT-ICM, SA-QBSolv, and PT-ICM-QBSolv) and quantum (QA-QBSolv, and HQA) solvers on QUBO problems representing real-world optimization tasks in material science. (a) Relative accuracy and (b) solving time of the solvers. + +![](images/3d096df4f9d88e5730c1ff85f4fc8195cc0ced5d1e2af08b268e6fae176f0453.jpg) +Fig. 2. The relative accuracy of the classical (IP, SA, SD, TS, and SA-QBSolv) and quantum (QA-QBSolv, and HQA) solvers for given QUBO problems. HQA is the best solver for finding the highest-quality solution for all problem sizes. + +![](images/b2225fd80582c1a6d58446009e52cb7580120fa78d8fb8c21a6a179b22e90082.jpg) +Fig. 3. Solving time of the solvers for given QUBO problems. The solving time of (a) the classical and quantum solvers and (b) the classical solvers (SA, SD, and TS) for small QUBO problems. Quantum solvers do not scale in solving time as the problem size increases, which is a great advantage over classical counterparts. + +![](images/2fba1a69a62f326d6496fa444805cf7cb9d4f83d11029d8a05f043c24cc1c55c.jpg) + +![](images/e8de5aea7294c5fa21c710d033ec625ad4b86fcd3d99b909d3310eb92ec79d99.jpg) +Fig. 4. Performance of the QA-QBSolv solver with different decomposition sizes. (a) Relative accuracy and (b) Solving time of the QA-QBSolv solver for given QUBO problems with different sub-QUBO sizes. + +![](images/0dc799fad8a578e4fc936a6f60b7cd51a35fb007df006f3fbbccca020db0e587.jpg) + +![](images/590134f4c031fae88771d2e5aa825341c5ba16298aab8804b412e18b733c476d.jpg) + +![](images/ca3232bf1275cd24819fdc30b46c463658d46f4f071687fa7d455c1996177a86.jpg) + +![](images/fed61778896eedc872a2ee953f0454d1c1d1df2e5d2dd11118b668eedd8f7389.jpg) + +![](images/6deccb0dcac8e41c85f23661aab5cb85956088933112941c178b3170cb344792.jpg) +Fig. S1. Comparison of QUBO matrices for real-world optimization and Max-Cut problems. (a-c) QUBO matrices representing the optimization of planar multilayered structures (PMLs) with problem sizes of (a) 100, (b) 500, and (c) 3,000. The dense configurations of these matrices reflect the fully connected nature of interactions in material optimization problems. (d-f) QUBO matrices derived from Max-Cut problem instances in the G-set $^{S1}$ : (d) G5, (e) G15, and (f) G40. These matrices exhibit sparse configurations, with relatively few pairwise interactions compared to their maximum possible connections. + +![](images/1417de3df697e4e053ffea4df6d77a5ebdaefb50b3187f1972f3050156c75f08.jpg) + +![](images/2a620e676a2beae0c82e88cad52c70b73e03b2af6941c4c6ef5de056c159c0c8.jpg) + +![](images/1d459dddad5c45442c94fd4248e3d25df27831ab5659e8ee16015f75ac1fbd30.jpg) +Fig. S2. Example QUBO matrices. The size of the given QUBO problems is (a) 120 and (b) 1,000 with a standard deviation of 0.1. + +![](images/31ba301c7d69a67dd38eb2e4932a89306684577806e4211b5dd4f78757c26c08.jpg) + +![](images/bd938d5eeace304163712a0d6f853d54e179edfffc5c73dc6712007366e9be79.jpg) +Fig. S3. Time complexity of simulated annealing (SA), steepest descent (SD), and tabu search (TS). This plot is from calculation results based on the theoretical time complexity (see 2-4-2. Computational Time in the main text), so it does not have metrics. The plot agrees well with the solving time plot depicted in Fig. 2b. + +Table S1. Statistical properties of QUBO coefficients for real-world optimization problems. The table summarizes the average (avg) and standard deviation (std) of QUBO coefficients across different problem sizes $(n)$ . The average values of the coefficients are close to zero, and the standard deviation ranges from 0.2 to 2. + +
n5010020050010003000500010000
avg0.0025-0.00140.0003-0.00040.00010.00160.00120.0008
std0.24910.74400.80831.33191.50901.95192.03722.0706
+ +Table S2. Density of Max-Cut problem instances. These instances feature sparse QUBO matrices with a density lower than $6\%$ . + +
Instances# Nodes# Edges# Maximum EdgesDensity (%)
G580019,176319,6006.0000
G1080019,176319,6006.0000
G158004,661319,6001.4583
G208004,672319,6001.4618
G302,00019,9001,999,0000.9954
G402,00011,7661,999,0000.5885
G503,0006,0004,498,5000.1333
G555,00012,49812,497,5000.1000
G607,00017,14824,496,5000.0700
G7010,0009,99949,995,0000.0200
+ +References + +S1 Ye, Y. [online] Available: https://web.stanford.edu/~yyyve/yyye/Gset/. \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06201/images/0dc799fad8a578e4fc936a6f60b7cd51a35fb007df006f3fbbccca020db0e587.jpg b/data/2025/2504_06xxx/2504.06201/images/0dc799fad8a578e4fc936a6f60b7cd51a35fb007df006f3fbbccca020db0e587.jpg new file mode 100644 index 0000000000000000000000000000000000000000..348e5769274e9c937455ffb16037f5e555000538 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/0dc799fad8a578e4fc936a6f60b7cd51a35fb007df006f3fbbccca020db0e587.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c0d7f307d5096c0a3c584bb9201d00f015bcc19a1bfa791c91cc6f12c3a7ebd +size 31684 diff --git a/data/2025/2504_06xxx/2504.06201/images/1417de3df697e4e053ffea4df6d77a5ebdaefb50b3187f1972f3050156c75f08.jpg b/data/2025/2504_06xxx/2504.06201/images/1417de3df697e4e053ffea4df6d77a5ebdaefb50b3187f1972f3050156c75f08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..934f6b7bead06bd733c96daf4ec20d15daac9c45 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/1417de3df697e4e053ffea4df6d77a5ebdaefb50b3187f1972f3050156c75f08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b93c08bf1ec936eaafab68454172156f748cd13309b768e0373f592dd934afa +size 17270 diff --git a/data/2025/2504_06xxx/2504.06201/images/1d459dddad5c45442c94fd4248e3d25df27831ab5659e8ee16015f75ac1fbd30.jpg b/data/2025/2504_06xxx/2504.06201/images/1d459dddad5c45442c94fd4248e3d25df27831ab5659e8ee16015f75ac1fbd30.jpg new file mode 100644 index 0000000000000000000000000000000000000000..782307ef6bb4641350f74cd3d33313e086898283 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/1d459dddad5c45442c94fd4248e3d25df27831ab5659e8ee16015f75ac1fbd30.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cf2c7414259f527f03f01ae070f70e88e7fe361651e795e4c02041e3d335548 +size 40689 diff --git a/data/2025/2504_06xxx/2504.06201/images/2a620e676a2beae0c82e88cad52c70b73e03b2af6941c4c6ef5de056c159c0c8.jpg b/data/2025/2504_06xxx/2504.06201/images/2a620e676a2beae0c82e88cad52c70b73e03b2af6941c4c6ef5de056c159c0c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e990e3a603d5713cef5091f6737e2cb337e513b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/2a620e676a2beae0c82e88cad52c70b73e03b2af6941c4c6ef5de056c159c0c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d721dcccf93fbd248a74b99bc5c1c3bd85033e6161ffa03d32ffde297fd2ef2 +size 15427 diff --git a/data/2025/2504_06xxx/2504.06201/images/2fba1a69a62f326d6496fa444805cf7cb9d4f83d11029d8a05f043c24cc1c55c.jpg b/data/2025/2504_06xxx/2504.06201/images/2fba1a69a62f326d6496fa444805cf7cb9d4f83d11029d8a05f043c24cc1c55c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ba6d3ae4069517639573e21153045ac3e378034 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/2fba1a69a62f326d6496fa444805cf7cb9d4f83d11029d8a05f043c24cc1c55c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf2ba9d7c150fb967db1c80f6eeae2a5f46552df173fd8123022197b28964a99 +size 24480 diff --git a/data/2025/2504_06xxx/2504.06201/images/31ba301c7d69a67dd38eb2e4932a89306684577806e4211b5dd4f78757c26c08.jpg b/data/2025/2504_06xxx/2504.06201/images/31ba301c7d69a67dd38eb2e4932a89306684577806e4211b5dd4f78757c26c08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6bccc5b74371872f50c78ab711b72038d6e36f16 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/31ba301c7d69a67dd38eb2e4932a89306684577806e4211b5dd4f78757c26c08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5ea779f391806d6e8964c99810675a7f827f676e3118e00b6c94193eba95c36 +size 36754 diff --git a/data/2025/2504_06xxx/2504.06201/images/3d096df4f9d88e5730c1ff85f4fc8195cc0ced5d1e2af08b268e6fae176f0453.jpg b/data/2025/2504_06xxx/2504.06201/images/3d096df4f9d88e5730c1ff85f4fc8195cc0ced5d1e2af08b268e6fae176f0453.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de9d3c5d959913e1a0849522b16ab4f20dbed9f0 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/3d096df4f9d88e5730c1ff85f4fc8195cc0ced5d1e2af08b268e6fae176f0453.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b013858134ec03a8a0af144216acd46c1c9e15dc8c11d1c96ab2a8d93b8a849 +size 90839 diff --git a/data/2025/2504_06xxx/2504.06201/images/590134f4c031fae88771d2e5aa825341c5ba16298aab8804b412e18b733c476d.jpg b/data/2025/2504_06xxx/2504.06201/images/590134f4c031fae88771d2e5aa825341c5ba16298aab8804b412e18b733c476d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..51f3c5ae6bb14c64d8e9f796b3e441760e8eae70 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/590134f4c031fae88771d2e5aa825341c5ba16298aab8804b412e18b733c476d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1993233f4390cf0a8c6f80e0998f107de8c2c5fe7cf9625a2518a2e650cbd1a +size 18046 diff --git a/data/2025/2504_06xxx/2504.06201/images/691843c0ba4b3d84d880dd78aac48d6de4dedbd2f73f05e2d97a542a22d9e6ad.jpg b/data/2025/2504_06xxx/2504.06201/images/691843c0ba4b3d84d880dd78aac48d6de4dedbd2f73f05e2d97a542a22d9e6ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ebb19f435425fe6be4710080bfb922fa779ab26f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/691843c0ba4b3d84d880dd78aac48d6de4dedbd2f73f05e2d97a542a22d9e6ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:715acbe2f0a16428aaa51e9b0790ed6694544f40526a07eae1ebac477417ec75 +size 57895 diff --git a/data/2025/2504_06xxx/2504.06201/images/6deccb0dcac8e41c85f23661aab5cb85956088933112941c178b3170cb344792.jpg b/data/2025/2504_06xxx/2504.06201/images/6deccb0dcac8e41c85f23661aab5cb85956088933112941c178b3170cb344792.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5cf38fdde762c67535648dc22a0fcff46569f9c3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/6deccb0dcac8e41c85f23661aab5cb85956088933112941c178b3170cb344792.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3daeba3a8a460c6e7226f9eb38b89da2244ed2f87398ad1b7760c55e530c7046 +size 22726 diff --git a/data/2025/2504_06xxx/2504.06201/images/75fdef3b52ce20c9f00a46dd1bec1956d3e2715545f4da4161b0157e1ccf1f0b.jpg b/data/2025/2504_06xxx/2504.06201/images/75fdef3b52ce20c9f00a46dd1bec1956d3e2715545f4da4161b0157e1ccf1f0b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2a5476cba316061b1982431041756b944507e29 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/75fdef3b52ce20c9f00a46dd1bec1956d3e2715545f4da4161b0157e1ccf1f0b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb9586528b4e73e06f2c2d57d0b382acf69c307d4e3481e76fe1197587444620 +size 6766 diff --git a/data/2025/2504_06xxx/2504.06201/images/7e2d928dd61b4f39f2ede4b6fad96500fe72db9030197351e71c02b3d19d4b8d.jpg b/data/2025/2504_06xxx/2504.06201/images/7e2d928dd61b4f39f2ede4b6fad96500fe72db9030197351e71c02b3d19d4b8d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6486ec95d2de3f19c10beefdb14f57ab1bcc3741 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/7e2d928dd61b4f39f2ede4b6fad96500fe72db9030197351e71c02b3d19d4b8d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:410c118a955d9649855f9a27b8a7817322a1d3863107fe3f16148d690c6ac670 +size 81002 diff --git a/data/2025/2504_06xxx/2504.06201/images/7ecbacb2f7527e7f764b668782c06e35fcdde7c952d5ef5c6fdc96ac74ec4d52.jpg b/data/2025/2504_06xxx/2504.06201/images/7ecbacb2f7527e7f764b668782c06e35fcdde7c952d5ef5c6fdc96ac74ec4d52.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee46be12c7df76ffe79ec3bd967d973f1aafaca8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/7ecbacb2f7527e7f764b668782c06e35fcdde7c952d5ef5c6fdc96ac74ec4d52.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c6ede9ce2a142a2a0345b55d97ca5ee18db880c9ee0a21b2af1c035ccdf3084 +size 9033 diff --git a/data/2025/2504_06xxx/2504.06201/images/8943641fd2b17bb66d8fcdc431898fc2377b4501654faa139b8dc2fecaed8fea.jpg b/data/2025/2504_06xxx/2504.06201/images/8943641fd2b17bb66d8fcdc431898fc2377b4501654faa139b8dc2fecaed8fea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b92ec2a6affb39861d859478a822e679396d281c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/8943641fd2b17bb66d8fcdc431898fc2377b4501654faa139b8dc2fecaed8fea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01098ed17b322624b33c305f0b0485488bc786a28a8d2ea4c46934a18a20f006 +size 4042 diff --git a/data/2025/2504_06xxx/2504.06201/images/9537d088a04967bfad78eb524d6fa7fc1eac919745a49fe9f57def7534fd8be3.jpg b/data/2025/2504_06xxx/2504.06201/images/9537d088a04967bfad78eb524d6fa7fc1eac919745a49fe9f57def7534fd8be3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ec334c67b74914b1750546837e17f1101511ab2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/9537d088a04967bfad78eb524d6fa7fc1eac919745a49fe9f57def7534fd8be3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54c6b6a87891254d3b4d06d69e7ba2b7baa7edecccdcad82cb0e7738a2705c03 +size 35144 diff --git a/data/2025/2504_06xxx/2504.06201/images/a8cb85a9316795dc6f82454dfcbe57b04e7e91a649d20ea1acb66f8e645c0d90.jpg b/data/2025/2504_06xxx/2504.06201/images/a8cb85a9316795dc6f82454dfcbe57b04e7e91a649d20ea1acb66f8e645c0d90.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c9e8cbbafe20289391403345f70fd648feeea5ab --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/a8cb85a9316795dc6f82454dfcbe57b04e7e91a649d20ea1acb66f8e645c0d90.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:797d9580ca6f679a14795c907a0ed6ca1d5f7a50653c9c520c71d06ad3f0fe26 +size 71536 diff --git a/data/2025/2504_06xxx/2504.06201/images/b2225fd80582c1a6d58446009e52cb7580120fa78d8fb8c21a6a179b22e90082.jpg b/data/2025/2504_06xxx/2504.06201/images/b2225fd80582c1a6d58446009e52cb7580120fa78d8fb8c21a6a179b22e90082.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ab4320bbb119fe5afcfdc1a2c03c30f833a8e22 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/b2225fd80582c1a6d58446009e52cb7580120fa78d8fb8c21a6a179b22e90082.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64cf67abf6058d4d71bb5719142f18622d8a3dfb3b6479fe5eddf8a1c5e5cd1e +size 34015 diff --git a/data/2025/2504_06xxx/2504.06201/images/bd938d5eeace304163712a0d6f853d54e179edfffc5c73dc6712007366e9be79.jpg b/data/2025/2504_06xxx/2504.06201/images/bd938d5eeace304163712a0d6f853d54e179edfffc5c73dc6712007366e9be79.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4edbc3d16bfa3b5b9ceac182c7350515cc1a0b6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/bd938d5eeace304163712a0d6f853d54e179edfffc5c73dc6712007366e9be79.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:877c092437ca527894b15b9e0cf5bafb82e3c067fd268a24c26f4f931e1d17ca +size 22644 diff --git a/data/2025/2504_06xxx/2504.06201/images/ca3232bf1275cd24819fdc30b46c463658d46f4f071687fa7d455c1996177a86.jpg b/data/2025/2504_06xxx/2504.06201/images/ca3232bf1275cd24819fdc30b46c463658d46f4f071687fa7d455c1996177a86.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6fd504783c111ba2d966d3d95b5ba339de63f36 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/ca3232bf1275cd24819fdc30b46c463658d46f4f071687fa7d455c1996177a86.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:612566e0982460c5c213a0aec7de1ac88c7561762746d456b4e0542c5f677dea +size 14997 diff --git a/data/2025/2504_06xxx/2504.06201/images/e3cfbe667099ce38f9968b4d6af5d3b563ff2a89cc053dae585ccfa165f4c2a7.jpg b/data/2025/2504_06xxx/2504.06201/images/e3cfbe667099ce38f9968b4d6af5d3b563ff2a89cc053dae585ccfa165f4c2a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b4cf1bc79c45b8d4bb1a3056853e3f59d07d332 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/e3cfbe667099ce38f9968b4d6af5d3b563ff2a89cc053dae585ccfa165f4c2a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f5834d55980518bd1b39be950adf15f42fd7943a98afc60cc0d2494337609e4 +size 16776 diff --git a/data/2025/2504_06xxx/2504.06201/images/e8de5aea7294c5fa21c710d033ec625ad4b86fcd3d99b909d3310eb92ec79d99.jpg b/data/2025/2504_06xxx/2504.06201/images/e8de5aea7294c5fa21c710d033ec625ad4b86fcd3d99b909d3310eb92ec79d99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f931ad11d7ac09be939333cae0966f60a4373ab5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/e8de5aea7294c5fa21c710d033ec625ad4b86fcd3d99b909d3310eb92ec79d99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da9c89a14adc4ef40a79e7af2670c357a245ba9fd04ef617b7776934c1d2b22b +size 33621 diff --git a/data/2025/2504_06xxx/2504.06201/images/fe7a95acb3f32f095491bb1dd697b024c145bbb563c66dff6e82cd2f05625c34.jpg b/data/2025/2504_06xxx/2504.06201/images/fe7a95acb3f32f095491bb1dd697b024c145bbb563c66dff6e82cd2f05625c34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4df508d955cc8c074918bb83d1c32f41b3b76a21 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/fe7a95acb3f32f095491bb1dd697b024c145bbb563c66dff6e82cd2f05625c34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:003b175048f76d0fd12a78303c5718e22a52444712060bee2d644131a6c64a06 +size 6616 diff --git a/data/2025/2504_06xxx/2504.06201/images/fed61778896eedc872a2ee953f0454d1c1d1df2e5d2dd11118b668eedd8f7389.jpg b/data/2025/2504_06xxx/2504.06201/images/fed61778896eedc872a2ee953f0454d1c1d1df2e5d2dd11118b668eedd8f7389.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83ebf04ec06397be541e5e4ea3b86042ca2517de --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/images/fed61778896eedc872a2ee953f0454d1c1d1df2e5d2dd11118b668eedd8f7389.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:643e772dadef2e88d0a98eb1272106a65fae5d5fac083d2eadf44196708dcc17 +size 15679 diff --git a/data/2025/2504_06xxx/2504.06201/layout.json b/data/2025/2504_06xxx/2504.06201/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..70c1153e9a637f38100295be86856821de89a496 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06201/layout.json @@ -0,0 +1,9119 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 112, + 71, + 499, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 71, + 499, + 109 + ], + "spans": [ + { + "bbox": [ + 112, + 71, + 499, + 109 + ], + "type": "text", + "content": "Quantum Annealing for Combinatorial Optimization: A Benchmarking Study" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "spans": [ + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "text", + "content": "Authors: Seongmin Kim" + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "inline_equation", + "content": "^{1,4}" + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "text", + "content": ", Sang-Woo Ahn" + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "text", + "content": ", In-Saeng Suh" + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "text", + "content": ", Alexander W. Dowling" + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "inline_equation", + "content": "^{3,*}" + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "text", + "content": ", Eungkyu Lee" + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "inline_equation", + "content": "^{2,*}" + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "text", + "content": ", and Tengfei Luo" + }, + { + "bbox": [ + 94, + 121, + 518, + 150 + ], + "type": "inline_equation", + "content": "^{1,*}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 163, + 538, + 286 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 68, + 163, + 538, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 163, + 538, + 190 + ], + "spans": [ + { + "bbox": [ + 68, + 163, + 538, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 68, + 163, + 538, + 190 + ], + "type": "text", + "content": "Department of Aerospace and Mechanical Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 191, + 538, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 191, + 538, + 217 + ], + "spans": [ + { + "bbox": [ + 68, + 191, + 538, + 217 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 68, + 191, + 538, + 217 + ], + "type": "text", + "content": "Department of Electronic Engineering, Kyung Hee University; Yongin-Si, Gyeonggi-do 17104, Republic of Korea." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 218, + 538, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 218, + 538, + 243 + ], + "spans": [ + { + "bbox": [ + 68, + 218, + 538, + 243 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 68, + 218, + 538, + 243 + ], + "type": "text", + "content": "Department of Chemical and Biomolecular Engineering, University of Notre Dame; Notre Dame, Indiana 46556. United States." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 246, + 538, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 246, + 538, + 270 + ], + "spans": [ + { + "bbox": [ + 68, + 246, + 538, + 270 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 68, + 246, + 538, + 270 + ], + "type": "text", + "content": "National Center for Computational Sciences, Oak Ridge National Laboratory, Oak Ridge, Tennessee 37830, United States." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 274, + 488, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 274, + 488, + 286 + ], + "spans": [ + { + "bbox": [ + 68, + 274, + 488, + 286 + ], + "type": "text", + "content": "*Corresponding author. Email: adowling@nd.edu, eleest@khu.ac.kr, and tluo@nd.edu" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 315, + 541, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 315, + 541, + 509 + ], + "spans": [ + { + "bbox": [ + 70, + 315, + 541, + 509 + ], + "type": "text", + "content": "Quantum annealing (QA) has the potential to significantly improve solution quality and reduce time complexity in solving combinatorial optimization problems compared to classical optimization methods. However, due to the limited number of qubits and their connectivity, the QA hardware did not show such an advantage over classical methods in past benchmarking studies. Recent advancements in QA with more than 5,000 qubits, enhanced qubit connectivity, and the hybrid architecture promise to realize the quantum advantage. Here, we use a quantum annealer with state-of-the-art techniques and benchmark its performance against classical solvers. To compare their performance, we solve over 50 optimization problem instances represented by large and dense Hamiltonian matrices using quantum and classical solvers. The results demonstrate that a state-of-the-art quantum solver has higher accuracy (" + }, + { + "bbox": [ + 70, + 315, + 541, + 509 + ], + "type": "inline_equation", + "content": "\\sim 0.013\\%" + }, + { + "bbox": [ + 70, + 315, + 541, + 509 + ], + "type": "text", + "content": ") and a significantly faster problem-solving time (" + }, + { + "bbox": [ + 70, + 315, + 541, + 509 + ], + "type": "inline_equation", + "content": "\\sim 6,561\\times" + }, + { + "bbox": [ + 70, + 315, + 541, + 509 + ], + "type": "text", + "content": ") than the best classical solver. Our results highlight the advantages of leveraging QA over classical counterparts, particularly in hybrid configurations, for achieving high accuracy and substantially reduced problem solving time in large-scale real-world optimization problems." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 71, + 522, + 539, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 522, + 539, + 550 + ], + "spans": [ + { + "bbox": [ + 71, + 522, + 539, + 550 + ], + "type": "text", + "content": "Keywords: quantum advantage, quantum-classical hybrid algorithm, quantum annealing, combinatorial optimization, benchmarking study" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 71, + 578, + 159, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 578, + 159, + 594 + ], + "spans": [ + { + "bbox": [ + 71, + 578, + 159, + 594 + ], + "type": "text", + "content": "Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 71, + 596, + 541, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 596, + 541, + 706 + ], + "spans": [ + { + "bbox": [ + 71, + 596, + 541, + 706 + ], + "type": "text", + "content": "Quantum computers mark a paradigm shift to tackle challenging tasks that classical computers cannot solve in a practical timescale" + }, + { + "bbox": [ + 71, + 596, + 541, + 706 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 71, + 596, + 541, + 706 + ], + "type": "text", + "content": ". The quantum annealer is a special quantum computer designed to solve combinatorial optimization problems with problem size-independent time complexity" + }, + { + "bbox": [ + 71, + 596, + 541, + 706 + ], + "type": "inline_equation", + "content": "^{3-5}" + }, + { + "bbox": [ + 71, + 596, + 541, + 706 + ], + "type": "text", + "content": ". This unique quantum annealing (QA) capability is based on the so-called adiabatic process" + }, + { + "bbox": [ + 71, + 596, + 541, + 706 + ], + "type": "inline_equation", + "content": "^{6,7}" + }, + { + "bbox": [ + 71, + 596, + 541, + 706 + ], + "type": "text", + "content": ". During this process, entangled qubits naturally evolve into the ground state of a given Hamiltonian to find the optimal vector of binary decisions for the corresponding quadratic unconstrained binary optimization (QUBO) problem" + }, + { + "bbox": [ + 71, + 596, + 541, + 706 + ], + "type": "inline_equation", + "content": "^{8-10}" + }, + { + "bbox": [ + 71, + 596, + 541, + 706 + ], + "type": "text", + "content": ". The adiabatic theorem of quantum mechanics ensures that QA identifies the optimal solution regardless of the size and landscape of" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 533, + 742, + 539, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 533, + 742, + 539, + 753 + ], + "spans": [ + { + "bbox": [ + 533, + 742, + 539, + 753 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 71, + 71, + 542, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 71, + 542, + 114 + ], + "spans": [ + { + "bbox": [ + 71, + 71, + 542, + 114 + ], + "type": "text", + "content": "the combinatorial parametric space, highlighting QA as a powerful and practical solver" + }, + { + "bbox": [ + 71, + 71, + 542, + 114 + ], + "type": "inline_equation", + "content": "^{11-14}" + }, + { + "bbox": [ + 71, + 71, + 542, + 114 + ], + "type": "text", + "content": ". The ability to efficiently explore high-dimensional combinational spaces makes QA capable of handling a wide range of optimization tasks" + }, + { + "bbox": [ + 71, + 71, + 542, + 114 + ], + "type": "inline_equation", + "content": "^{4,5,10,15,16}" + }, + { + "bbox": [ + 71, + 71, + 542, + 114 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "spans": [ + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "text", + "content": "The potential merit of QA motivates the systematic comparison with classical counterparts (e.g., simulated annealing, integer programming, steepest descent method, tabu search, and parallel tempering with isoenergetic cluster moves), focusing on the solution quality and the time complexity. While previous benchmarking studies showed some advantages of QA, most used low-dimensional or the sparse configuration of QUBO matrices due to the lack of available qubits in the QA hardware and poor topology to connect qubits" + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "inline_equation", + "content": "^{17-19}" + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "text", + "content": ". For example, O'Malley et al." + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "inline_equation", + "content": "^{17}" + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "text", + "content": " compared the performance of QA with classical methods (mathematical programming), but they limited the number of binary variables to 35 due to the QA hardware limitation. Similarly, Tasseff et al." + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "inline_equation", + "content": "^{18}" + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "text", + "content": " highlighted the potential advantages of QA compared to classical methods (such as simulated annealing, integer programming, and Markov chain Monte Carlo) for sparse optimization problems containing up to 5,000 decision variables and 40,000 quadratic terms. Haba et al." + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "inline_equation", + "content": "^{19}" + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "text", + "content": " demonstrated that a classical solver (integer programming) could be faster than QA for small problems, e.g., " + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "inline_equation", + "content": "\\sim 100" + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "text", + "content": " decision variables. Consequently, these benchmarking studies show that QA methods and their classical counterparts can exhibit similar solution quality and time complexity. However, such low-dimensional or sparse QUBOs considered in the previous benchmarking studies are challenging to map to a wide range of practical problems, which usually require high-dimensional and dense configuration of QUBO matrices" + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "inline_equation", + "content": "^{4,5,10,20}" + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "text", + "content": ". For example, in our previous QA optimization of one-dimensional and two-dimensional optical metamaterials, the QUBO matrices exhibit these properties (Fig. S1)" + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "inline_equation", + "content": "^{4,5,16,20}" + }, + { + "bbox": [ + 71, + 127, + 542, + 389 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 71, + 402, + 542, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 402, + 542, + 596 + ], + "spans": [ + { + "bbox": [ + 71, + 402, + 542, + 596 + ], + "type": "text", + "content": "The state-of-the-art QA hardware (D-Wave Advantage System) features more than 5,000 qubits, advanced topology to connect qubits, and efficient hybrid algorithms (e.g., Leap Hybrid sampler). For example, the recent development (e.g., Pegasus topology) has increased qubit connectivity from 6 to " + }, + { + "bbox": [ + 71, + 402, + 542, + 596 + ], + "type": "inline_equation", + "content": "15^{21-23}" + }, + { + "bbox": [ + 71, + 402, + 542, + 596 + ], + "type": "text", + "content": ". Improved qubit connectivity reduces the need for complex embedding processes, which map problem variables to physical qubits on the hardware. With better connectivity, such as in D-Wave's Pegasus topology, the embedding process becomes more efficient and can better preserve the structure of dense optimization problems. This enhancement allows the quantum annealer to increase the potential for finding high-quality solutions[24,25]. In addition, a QUBO decomposition algorithm (i.e., QBSolv) splits a large QUBO matrix into small pieces of subQUBO matrices, allowing us to handle a QUBO matrix with dimensions higher than the maximum number of qubits in the QA hardware[26,27]. Given these advancements, it is imperative to study the performance of the state-of-the-art QA system for high-dimensional and dense configuration of QUBO matrices, and systemically compare solution quality and the time complexity with the classical counterparts." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 71, + 609, + 542, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 609, + 542, + 679 + ], + "spans": [ + { + "bbox": [ + 71, + 609, + 542, + 679 + ], + "type": "text", + "content": "In this work, we benchmark the performance of quantum solvers against classical algorithms in solving QUBO problems with large and dense configurations to represent real-world optimization problems. We analyze the solution quality and the required time to solve these benchmark problems using several quantum and classical solvers. This benchmarking study provides important insights into employing QA in practical problem-solving scenarios." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 531, + 742, + 539, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 742, + 539, + 753 + ], + "spans": [ + { + "bbox": [ + 531, + 742, + 539, + 753 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 71, + 73, + 122, + 88 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 73, + 122, + 88 + ], + "spans": [ + { + "bbox": [ + 71, + 73, + 122, + 88 + ], + "type": "text", + "content": "Results" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 90, + 541, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 90, + 541, + 297 + ], + "spans": [ + { + "bbox": [ + 70, + 90, + 541, + 297 + ], + "type": "text", + "content": "We present a benchmarking study on combinatorial optimization problems representing real-world scenarios, e.g., materials design, characterized by dense and large QUBO matrices (Fig. S1). These problems are non-convex and exhibit a highly complex energy landscape, making it challenging and time-consuming to identify accurate solutions. Classical solvers, such as integer programming (IP), simulated annealing (SA), steepest descent (SD), tabu search (TS), parallel tempering with isoenergetic cluster moves (PT-ICM), perform well for small-scale problems. However, they are often relatively inaccurate for larger problems (problem size " + }, + { + "bbox": [ + 70, + 90, + 541, + 297 + ], + "type": "inline_equation", + "content": "\\geq 1,000" + }, + { + "bbox": [ + 70, + 90, + 541, + 297 + ], + "type": "text", + "content": "; Fig. 1a). In particular, SD and TS show low relative accuracy compared to other solvers. The combination of PT and ICM leverages the strengths of both techniques: PT facilitates crossing energy barriers, while ICM ensures exploration of the solution space, effectively covering broad and diverse regions. This makes PT-ICM particularly effective for exploring complex optimization spaces and enhancing convergence toward the global optimum[46,47]. However, the performance of PT-ICM can be problem-dependent[48]. While it can work well for sparse problems, its effectiveness decreases for denser problems[46]. Consequently, although SA, and PT-ICM perform better than SD and TS, they also fail to find high-quality solutions for large-scale problems." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 312, + 541, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 312, + 541, + 505 + ], + "spans": [ + { + "bbox": [ + 70, + 312, + 541, + 505 + ], + "type": "text", + "content": "To address these limitations, QUBO decomposition strategies can be employed to improve the relative accuracy. For example, integrating QUBO decomposition with classical solvers (e.g., SA-QBSolv and PT-ICM-QBSolv) improves their performance. Nonetheless, these approaches often remain insufficient for handling massive problems effectively, particularly considering problem-solving time (Fig. 1b), which will be further discussed in the following. On the other hand, quantum solvers provide excellent performance for solving these dense and large-scale problems representing real-world optimization scenarios. Although QA can perform excellently for small problems, it has difficulty solving large and dense QUBOs due to the limited number of qubits " + }, + { + "bbox": [ + 70, + 312, + 541, + 505 + ], + "type": "inline_equation", + "content": "(5,000+)" + }, + { + "bbox": [ + 70, + 312, + 541, + 505 + ], + "type": "text", + "content": " and connectivity (15). Several prior studies reported that QA may not be efficient since it cannot effectively handle dense and large QUBOs due to hardware limitations[23,53,54]. However, when it runs with the QUBO decomposition strategy (i.e., QA-QBSolv), large-scale problems (" + }, + { + "bbox": [ + 70, + 312, + 541, + 505 + ], + "type": "inline_equation", + "content": "n \\geq 100" + }, + { + "bbox": [ + 70, + 312, + 541, + 505 + ], + "type": "text", + "content": ") can be effectively handled. Furthermore, hybrid QA (HQA), which integrates quantum and classical approaches, also can solve large-scale problems efficiently. As a result, the quantum solvers consistently identify high-quality solutions across all problem sizes (Fig. 1a)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 519, + 541, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 519, + 541, + 684 + ], + "spans": [ + { + "bbox": [ + 70, + 519, + 541, + 684 + ], + "type": "text", + "content": "Computational time is also a critical metric for evaluating solver performance. Classical solvers exhibit rapidly increasing solving times as problem sizes grow, making them impractical for large-scale combinatorial optimization problems (Fig. 1b). While SD and TS are faster than other classical solvers, their relative accuracies are low, as can be seen in Fig. 1a. It is worth noting that the SA, and PT-ICM solvers struggle to handle problems with more than 3,000 variables due to excessively long solving time or computational constraints (e.g., memory limits). Although the IP solver is faster than SA and PT-ICM, its solving time increases greatly with problem size. The QUBO decomposition strategy significantly reduces computational time, yet quantum solvers remain faster than their classical counterparts across all problem sizes. For instance, for a problem size of 5,000, the solving time for HQA is " + }, + { + "bbox": [ + 70, + 519, + 541, + 684 + ], + "type": "inline_equation", + "content": "0.0854\\mathrm{s}" + }, + { + "bbox": [ + 70, + 519, + 541, + 684 + ], + "type": "text", + "content": " and for QA-QBSolv is " + }, + { + "bbox": [ + 70, + 519, + 541, + 684 + ], + "type": "inline_equation", + "content": "74.59\\mathrm{s}" + }, + { + "bbox": [ + 70, + 519, + 541, + 684 + ], + "type": "text", + "content": ", compared to " + }, + { + "bbox": [ + 70, + 519, + 541, + 684 + ], + "type": "inline_equation", + "content": "167.4\\mathrm{s}" + }, + { + "bbox": [ + 70, + 519, + 541, + 684 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 70, + 519, + 541, + 684 + ], + "type": "inline_equation", + "content": "195.1\\mathrm{s}" + }, + { + "bbox": [ + 70, + 519, + 541, + 684 + ], + "type": "text", + "content": " for SA-QBSolv and PT-ICM-QBSolv, respectively, highlighting superior efficiency of the quantum solvers." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 532, + 742, + 539, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 532, + 742, + 539, + 753 + ], + "spans": [ + { + "bbox": [ + 532, + 742, + 539, + 753 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 71, + 71, + 542, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 71, + 542, + 293 + ], + "spans": [ + { + "bbox": [ + 71, + 71, + 542, + 293 + ], + "type": "text", + "content": "To further evaluate scalability, we conduct a systematic benchmarking study on QUBO problems (size: up to 10,000 variables), designed to mimic real-world scenarios through randomly generated elements. PT-ICM is excluded from this analysis due to excessive solving times compared to other solvers (Fig. 1b). As shown in Fig. 2, classical solvers (IP, SA, SD, and TS) are accurate for smaller problems but become inaccurate as the problem size increases. Consistent with the results in Fig. 1, the SD and TS solvers exhibit low relative accuracy even for a relatively small problem (e.g., 2,000). IP and SA are more accurate than SD and TS but fail to identify the optimal state for large problems. It is known that IP can provide global optimality guarantees" + }, + { + "bbox": [ + 71, + 71, + 542, + 293 + ], + "type": "inline_equation", + "content": "^{40}" + }, + { + "bbox": [ + 71, + 71, + 542, + 293 + ], + "type": "text", + "content": ", but our study highlights that proving a solution is globally optimal is challenging for large and dense problems. For example, in one case (" + }, + { + "bbox": [ + 71, + 71, + 542, + 293 + ], + "type": "inline_equation", + "content": "n = 7,000" + }, + { + "bbox": [ + 71, + 71, + 542, + 293 + ], + "type": "text", + "content": "), the optimality gap remains as large as " + }, + { + "bbox": [ + 71, + 71, + 542, + 293 + ], + "type": "inline_equation", + "content": "\\sim 17.73\\%" + }, + { + "bbox": [ + 71, + 71, + 542, + 293 + ], + "type": "text", + "content": ", where the best bound is -19,660 while the solution obtained from the IP solver is -16,700, with the optimality gap not narrowing even after 2 hours of runtime. The relative accuracy can be improved by employing the QUBO decomposition strategy (e.g., SA-QBSolv), yet it still fails to identify high-quality solutions for problem sizes exceeding 4,000. In contrast, quantum solvers demonstrate superior accuracy for large-scale problems. Notably, the HQA solver consistently outperforms all other methods, reliably identifying the best solution regardless of problem size (Fig. 2)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "spans": [ + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "text", + "content": "Fig. 3a shows that the solving time rapidly increases as the problem size increases for the classical solvers, indicating that solving combinatorial optimization problems with classical solvers can become intractable for large-size problems (Fig. 3b). The solving time trends with increasing problem size agree well with the theoretical time complexities of the classical solvers (Fig. 3b and Fig. S3, see 2-4-2. Computational Time section). While the IP solver can be faster than other classical solvers, it also requires significant time for large problems (e.g., " + }, + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "inline_equation", + "content": "n > 5,000" + }, + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "text", + "content": "). The use of the QUBO decomposition strategy dramatically reduces the solving time, but the quantum solvers consistently outpace classical counterparts (Fig. 3a). For example, the solving time (" + }, + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "inline_equation", + "content": "n = 10,000" + }, + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "text", + "content": ") is " + }, + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "inline_equation", + "content": "0.0855" + }, + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "text", + "content": " s for HQA, " + }, + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "inline_equation", + "content": "101" + }, + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "text", + "content": " s for QA-QBSolv, and " + }, + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "inline_equation", + "content": "561" + }, + { + "bbox": [ + 70, + 305, + 542, + 430 + ], + "type": "text", + "content": " s for SA-QBSolv." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 71, + 443, + 542, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 443, + 542, + 582 + ], + "spans": [ + { + "bbox": [ + 71, + 443, + 542, + 582 + ], + "type": "text", + "content": "Decomposing a large QUBO into smaller pieces leads to a higher relative accuracy, as a solver can find better solutions for each decomposed QUBOs, mitigating the current hardware limitations. Note that the accuracy of QA for QUBOs with problem sizes of 30 and 100 is, respectively, 1.0 and 0.9956 (without leveraging the QUBO decomposition method). Hence, the accuracy of QA-QBSolv with a sub-QUBO size of 30 is higher than that with a sub-QUBO size of 100, as decomposed QUBOs with a smaller size fit the QA hardware better (Fig. 4a). However, a smaller sub-QUBO size results in a greater number of sub-QUBOs after decomposition, leading to increased time required to solve all decomposed problems (Fig. 4b). It is noted that the QA-QBSolv solver does not guarantee finding the best solution for large problems (size " + }, + { + "bbox": [ + 71, + 443, + 542, + 582 + ], + "type": "inline_equation", + "content": ">4,000" + }, + { + "bbox": [ + 71, + 443, + 542, + 582 + ], + "type": "text", + "content": "), resulting in lower accuracies regardless of sub-QUBO sizes, as can be seen in Fig. 2 and Fig. 4a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 71, + 595, + 542, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 595, + 542, + 706 + ], + "spans": [ + { + "bbox": [ + 71, + 595, + 542, + 706 + ], + "type": "text", + "content": "Our results show that HQA, which incorporates QA with classical algorithms to overcome the current quantum hardware limitations, is currently the most efficient solver for complex real-world problems that require the formulation of dense and large QUBOs. In this context, we define \"Quantum Advantage\" as the ability of a quantum-enhanced solver to achieve high accuracy and significantly faster problem-solving time compared to the classical solvers for large-scale optimization problems. Our findings suggest that leveraging quantum resources, particularly in hybrid configurations, can provide a computational advantage over classical approaches. Besides, as the current state of HQA demonstrates, we expect QA will have much higher accuracy and" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 531, + 742, + 539, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 742, + 539, + 753 + ], + "spans": [ + { + "bbox": [ + 531, + 742, + 539, + 753 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 100 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 100 + ], + "type": "text", + "content": "require much shorter time to solve QUBO problems with the development of the quantum hardware with more qubits and better qubit connectivity." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 129, + 144, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 129, + 144, + 144 + ], + "spans": [ + { + "bbox": [ + 69, + 129, + 144, + 144 + ], + "type": "text", + "content": "Discussion" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 145, + 543, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 145, + 543, + 340 + ], + "spans": [ + { + "bbox": [ + 67, + 145, + 543, + 340 + ], + "type": "text", + "content": "This work comprehensively compares state-of-the-art QA hardware and software against several classical optimization solvers for large and dense QUBO problems (up to 10,000 variables, fully connected interactions). The classical solvers struggled to solve large-scale problems, but their performance can be improved when combined with the QUBO decomposition method (i.e., QBSolv). Nevertheless, they become inaccurate and inefficient with increasing problem size, indicating that classical methods can face challenges for complex real-world problems represented by large and dense QUBO matrices. On the contrary, HQA performs significantly better than its classical counterparts, exhibiting the highest accuracy (" + }, + { + "bbox": [ + 67, + 145, + 543, + 340 + ], + "type": "inline_equation", + "content": "\\sim 0.013\\%" + }, + { + "bbox": [ + 67, + 145, + 543, + 340 + ], + "type": "text", + "content": " improvement) and shortest time to obtain solutions (" + }, + { + "bbox": [ + 67, + 145, + 543, + 340 + ], + "type": "inline_equation", + "content": "\\sim 6,561 \\times" + }, + { + "bbox": [ + 67, + 145, + 543, + 340 + ], + "type": "text", + "content": " acceleration) for 10,000 dimensional QUBO problems, demonstrating 'Quantum Advantage' for large and dense QUBO problems. Pure QA and QA with the QUBO decomposition method still exhibit limitations in solving large problems due to the current QA hardware limitations (e.g., number of qubits and qubit connectivity). However, we anticipate that QA will eventually reach the efficiency of HQA with the ongoing development of the quantum hardware. Thus, we expect QA to demonstrate true 'Quantum Advantage' in the future." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 367, + 133, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 367, + 133, + 383 + ], + "spans": [ + { + "bbox": [ + 69, + 367, + 133, + 383 + ], + "type": "text", + "content": "Methods" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 384, + 185, + 398 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 384, + 185, + 398 + ], + "spans": [ + { + "bbox": [ + 69, + 384, + 185, + 398 + ], + "type": "text", + "content": "Definition of a QUBO" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 399, + 541, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 399, + 541, + 426 + ], + "spans": [ + { + "bbox": [ + 67, + 399, + 541, + 426 + ], + "type": "text", + "content": "QA hardware is designed to efficiently solve combinatorial optimization problems that are formulated with a QUBO matrix, which can be given by" + }, + { + "bbox": [ + 67, + 399, + 541, + 426 + ], + "type": "inline_equation", + "content": "^{28,29}" + }, + { + "bbox": [ + 67, + 399, + 541, + 426 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 241, + 438, + 529, + 482 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 438, + 529, + 482 + ], + "spans": [ + { + "bbox": [ + 241, + 438, + 529, + 482 + ], + "type": "interline_equation", + "content": "y = \\sum_ {i = 1} ^ {n} \\sum_ {j = i} ^ {n} Q _ {i, j} x _ {i} x _ {j} \\tag {1}", + "image_path": "fe7a95acb3f32f095491bb1dd697b024c145bbb563c66dff6e82cd2f05625c34.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "spans": [ + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "Q_{i,j}" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": "-th row and " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": "-th column real-number element of the QUBO matrix " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "(\\mathbf{Q})" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": ", which is an " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "n \\times n" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": " Hermitian, i.e., " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} \\in \\mathbb{R}^{n \\times n}" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": "-th element of a binary vector " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": " with a length of " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in [0,1^n]" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "Q_{i,j}" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": " is often referred to as a linear coefficient for " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "i = j" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": " and a quadratic interaction coefficient for " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "i \\neq j" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": ". The objective of QA is to identify the optimal binary vector of a given QUBO, which minimizes the scalar output " + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 495, + 544, + 568 + ], + "type": "text", + "content": " as29:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 256, + 582, + 529, + 604 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 582, + 529, + 604 + ], + "spans": [ + { + "bbox": [ + 256, + 582, + 529, + 604 + ], + "type": "interline_equation", + "content": "\\boldsymbol {x} ^ {*} = \\underset {x} {\\operatorname {a r g m i n}} y \\tag {2}", + "image_path": "8943641fd2b17bb66d8fcdc431898fc2377b4501654faa139b8dc2fecaed8fea.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 616, + 543, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 616, + 543, + 700 + ], + "spans": [ + { + "bbox": [ + 67, + 616, + 543, + 700 + ], + "type": "text", + "content": "In optimization problems, the linear coefficients correspond to cost or benefit terms associated with individual variables, while the quadratic coefficients represent interaction terms or dependencies between pairs of variables. These coefficients can be learned using machine learning models, such as the factorization machine (FM), trained on datasets containing input structures and their corresponding performance metrics. By mapping these learned coefficients into a QUBO formulation, we effectively represent an energy function of a material system or other real-world" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "spans": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 100 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 100 + ], + "type": "text", + "content": "optimization problem. This QUBO then describes the optimization space, enabling the identification of the optimal state with the best performance" + }, + { + "bbox": [ + 67, + 72, + 542, + 100 + ], + "type": "inline_equation", + "content": "^{30,31}" + }, + { + "bbox": [ + 67, + 72, + 542, + 100 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 113, + 208, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 113, + 208, + 126 + ], + "spans": [ + { + "bbox": [ + 69, + 113, + 208, + 126 + ], + "type": "text", + "content": "Methods to Solve a QUBO" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 127, + 543, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 127, + 543, + 196 + ], + "spans": [ + { + "bbox": [ + 67, + 127, + 543, + 196 + ], + "type": "text", + "content": "Various methods have been proposed to solve QUBO problems. For our benchmarking study, we consider seven representative methods: QA, hybrid QA (HQA), integer programming (IP), simulated annealing (SA), steepest descent (SD), tabu search (TS), parallel tempering with isoenergetic cluster moves (PT-ICM). Below, we provide a brief introduction to each of the solvers used in solving combinatorial optimization problems:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 210, + 328, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 210, + 328, + 224 + ], + "spans": [ + { + "bbox": [ + 67, + 210, + 328, + 224 + ], + "type": "text", + "content": "Quantum Annealing and Hybrid Quantum Annealing" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 224, + 544, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 224, + 544, + 297 + ], + "spans": [ + { + "bbox": [ + 67, + 224, + 544, + 297 + ], + "type": "text", + "content": "QA starts with a superposition state for all qubits, which has the lowest energy state of the initial Hamiltonian " + }, + { + "bbox": [ + 67, + 224, + 544, + 297 + ], + "type": "inline_equation", + "content": "(H_0)" + }, + { + "bbox": [ + 67, + 224, + 544, + 297 + ], + "type": "text", + "content": ". In the annealing process, the system evolves toward the lowest energy state of the final Hamiltonian (also called a problem Hamiltonian, " + }, + { + "bbox": [ + 67, + 224, + 544, + 297 + ], + "type": "inline_equation", + "content": "H_{p}" + }, + { + "bbox": [ + 67, + 224, + 544, + 297 + ], + "type": "text", + "content": ") by minimizing the influence of the initial Hamiltonian. The measured state at the end of the annealing is supposed to be the ground state of " + }, + { + "bbox": [ + 67, + 224, + 544, + 297 + ], + "type": "inline_equation", + "content": "H_{p}" + }, + { + "bbox": [ + 67, + 224, + 544, + 297 + ], + "type": "text", + "content": ", which can be expressed as the following equation32,33:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 201, + 310, + 529, + 326 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 310, + 529, + 326 + ], + "spans": [ + { + "bbox": [ + 201, + 310, + 529, + 326 + ], + "type": "interline_equation", + "content": "H (t / t _ {a}) = A (t / t _ {a}) H _ {0} + B (t / t _ {a}) H _ {p} \\tag {3}", + "image_path": "75fdef3b52ce20c9f00a46dd1bec1956d3e2715545f4da4161b0157e1ccf1f0b.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "spans": [ + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": " is the elapsed annealing time, and " + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "inline_equation", + "content": "t_a" + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": " is the total annealing time. Equation (3) evolves from " + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "inline_equation", + "content": "A(t / t_a) = 1" + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "inline_equation", + "content": "B(t / t_a) \\approx 0" + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": " at the beginning of the annealing " + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "inline_equation", + "content": "(t / t_a = 0)" + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "inline_equation", + "content": "A(t / t_a) \\approx 0" + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "inline_equation", + "content": "B(t / t_a) = 1" + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": " at the end of the annealing " + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "inline_equation", + "content": "(t / t_a = 1)" + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": ". Sufficiently slow evolution from " + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "inline_equation", + "content": "H_0" + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "inline_equation", + "content": "H_p" + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": " enables the quantum system to stay at the ground state, which leads to the identification of the optimal solution of a given combinatorial optimization problem3,34. We use D-Wave Systems' quantum annealer (Advantage 4.1) to solve the problems using QA, and we set the number of reads for QA to 1,000 with a total annealing time of " + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "inline_equation", + "content": "20~\\mu s" + }, + { + "bbox": [ + 67, + 339, + 543, + 453 + ], + "type": "text", + "content": ". We select the best solution corresponding to the lowest energy state found among 1,000 reads." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 466, + 543, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 466, + 543, + 716 + ], + "spans": [ + { + "bbox": [ + 67, + 466, + 543, + 716 + ], + "type": "text", + "content": "The D-Wave Ocean software development kit (SDK, ver. 3.3.0) provides many useful libraries, which include quantum or classical samplers such as the QA, HQA, SA, SD, and TS. They allow us to solve QUBO problems" + }, + { + "bbox": [ + 67, + 466, + 543, + 716 + ], + "type": "inline_equation", + "content": "^{22,35,36}" + }, + { + "bbox": [ + 67, + 466, + 543, + 716 + ], + "type": "text", + "content": ". We employ these samplers, which are implemented in the D-wave Ocean SDK, for the benchmarking study. Classical or QA solvers often benefit from decomposition algorithms to identify a high-quality solution (i.e., an optimal solution or a good solution close to the global optimum) for large QUBO problems. Hence, the decomposition of a QUBO matrix into sub-QUBOs is very useful when the size of QUBO matrix is larger than the physical volume of a sampler (i.e., QUBO size > physical number of qubits in QA or memory capacity of a classical computer). We employ the QBSolv package implemented in D-wave Ocean SDK for QUBO decomposition. The QBSolv splits a QUBO matrix into smaller QUBO matrices, and each of them is sequentially solved by classical or QA solvers. This algorithm enables us to handle a wide range of complex real-world problems" + }, + { + "bbox": [ + 67, + 466, + 543, + 716 + ], + "type": "inline_equation", + "content": "^{21,22,37}" + }, + { + "bbox": [ + 67, + 466, + 543, + 716 + ], + "type": "text", + "content": ". The size of the decomposed QUBOs is set to 30 unless otherwise specified. HQA (Leap Hybrid solver), developed by D-Wave systems, also decomposes large QUBO into smaller subproblems well-suited for QA's QPU, and then aggregates the results" + }, + { + "bbox": [ + 67, + 466, + 543, + 716 + ], + "type": "inline_equation", + "content": "^{27,38}" + }, + { + "bbox": [ + 67, + 466, + 543, + 716 + ], + "type": "text", + "content": ". The detailed algorithm of HQA, however, is not publicly released. We utilize a D-Wave sampler (dwave-system 1.4.0) for SA, SD, and TS with a specified number of reads (1,000) and default settings for other parameters. Furthermore, we employ D-Wave hybrid framework for PT-ICM." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "spans": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 86, + 178, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 86, + 178, + 99 + ], + "spans": [ + { + "bbox": [ + 69, + 86, + 178, + 99 + ], + "type": "text", + "content": "Integer Programming" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 100, + 541, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 100, + 541, + 196 + ], + "spans": [ + { + "bbox": [ + 67, + 100, + 541, + 196 + ], + "type": "text", + "content": "IP uses branch-and-bound, cutting planes, and other methods to search the solution space for optimal integer decisions and prove global optimality within a tolerance (gap). We use Gurobi (version 10.0.2)39 for benchmarking with the default settings (0.1% global optimality gap) plus a two-hour time limit and 240 GB software memory limit per optimization problem. The benchmark QUBO problem is implemented in the Pyomo modeling environment (version 6.6.2)40. We also experimented with a large gap and observed the first identified integer solution often had a poor objective function value. These results are not further reported for brevity." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 210, + 173, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 210, + 173, + 224 + ], + "spans": [ + { + "bbox": [ + 69, + 210, + 173, + 224 + ], + "type": "text", + "content": "Simulated Annealing" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 224, + 541, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 224, + 541, + 335 + ], + "spans": [ + { + "bbox": [ + 67, + 224, + 541, + 335 + ], + "type": "text", + "content": "SA, which is inspired by the annealing process in metallurgy, is a probabilistic optimization algorithm designed to approximate a global optimum of a given objective function. It is considered a metaheuristic method, which can be applied to a wide range of optimization problems" + }, + { + "bbox": [ + 67, + 224, + 541, + 335 + ], + "type": "inline_equation", + "content": "^{41,42}" + }, + { + "bbox": [ + 67, + 224, + 541, + 335 + ], + "type": "text", + "content": ". In SA, temperature and cooling schedule are major factors that determine how extensively the algorithm explores the solution space" + }, + { + "bbox": [ + 67, + 224, + 541, + 335 + ], + "type": "inline_equation", + "content": "^{43}" + }, + { + "bbox": [ + 67, + 224, + 541, + 335 + ], + "type": "text", + "content": ". This algorithm often identifies near-optimal solutions but cannot guarantee that local or global optimality conditions are satisfied. For SA, the hyperparameters are configured as follows: 1,000 reads, 1,000 sweeps, a 'random' initial state generation, and a 'geometric' temperature schedule." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 348, + 154, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 348, + 154, + 361 + ], + "spans": [ + { + "bbox": [ + 69, + 348, + 154, + 361 + ], + "type": "text", + "content": "Steepest Descent" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 361, + 541, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 361, + 541, + 431 + ], + "spans": [ + { + "bbox": [ + 67, + 361, + 541, + 431 + ], + "type": "text", + "content": "SD operates by employing variable flips to reduce the energy of a given QUBO through local minimization computations rather than relying on a calculated gradient in a traditional gradient descent algorithm" + }, + { + "bbox": [ + 67, + 361, + 541, + 431 + ], + "type": "inline_equation", + "content": "^{44}" + }, + { + "bbox": [ + 67, + 361, + 541, + 431 + ], + "type": "text", + "content": ". This algorithm is computationally inexpensive and beneficial for local refinement; thus, it can be used to search for local optima. In our benchmarking study, SD utilizes hyperparameters set to 1,000 reads and a 'random' strategy for initial state generation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 444, + 134, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 444, + 134, + 456 + ], + "spans": [ + { + "bbox": [ + 69, + 444, + 134, + 456 + ], + "type": "text", + "content": "Tabu Search" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 458, + 539, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 458, + 539, + 528 + ], + "spans": [ + { + "bbox": [ + 67, + 458, + 539, + 528 + ], + "type": "text", + "content": "TS is designed to solve combinatorial and discrete optimization problems by using memory to guide the search for better solutions, as introduced by Glover" + }, + { + "bbox": [ + 67, + 458, + 539, + 528 + ], + "type": "inline_equation", + "content": "^{45}" + }, + { + "bbox": [ + 67, + 458, + 539, + 528 + ], + "type": "text", + "content": ". This algorithm can escape already visited local minima by remembering those points (called 'Tabu List' to keep track of moves during the search), aiming to identify high-quality solutions in a large solution space. This algorithm works well for combinatorial optimization problems with small search spaces." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 528, + 529, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 528, + 529, + 568 + ], + "spans": [ + { + "bbox": [ + 67, + 528, + 529, + 568 + ], + "type": "text", + "content": "However, it can be hard to evaluate neighboring solutions and to maintain and update the Tabu List with increasing problem sizes. The hyperparameter settings for TS are as follows: 1,000 reads, a timeout of " + }, + { + "bbox": [ + 67, + 528, + 529, + 568 + ], + "type": "inline_equation", + "content": "100\\mathrm{ms}" + }, + { + "bbox": [ + 67, + 528, + 529, + 568 + ], + "type": "text", + "content": ", and 'random' initial state generation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 582, + 376, + 596 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 582, + 376, + 596 + ], + "spans": [ + { + "bbox": [ + 67, + 582, + 376, + 596 + ], + "type": "text", + "content": "Parallel Tempering with Isoenergetic Cluster Moves (PT-ICM)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 596, + 541, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 596, + 541, + 693 + ], + "spans": [ + { + "bbox": [ + 67, + 596, + 541, + 693 + ], + "type": "text", + "content": "PT-ICM is an advanced Monte Carlo method designed to navigate optimization space, such as QUBO problems" + }, + { + "bbox": [ + 67, + 596, + 541, + 693 + ], + "type": "inline_equation", + "content": "^{46-48}" + }, + { + "bbox": [ + 67, + 596, + 541, + 693 + ], + "type": "text", + "content": ". PT operates by maintaining multiple replicas of the system at different temperatures and allowing exchanges between replicas based on a Metropolis criterion. This approach helps lower-temperature replicas escape local minima with the aid of higher-temperature replicas. ICM identifies clusters of variables that can flip without changing the system's energy" + }, + { + "bbox": [ + 67, + 596, + 541, + 693 + ], + "type": "inline_equation", + "content": "^{46}" + }, + { + "bbox": [ + 67, + 596, + 541, + 693 + ], + "type": "text", + "content": ". In this study, the hyperparameters for PT-ICM are set as follows: the number of sweeps is 1,000, the number of replicas is 10, and the number of iterations is 10." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "spans": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 200, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 200, + 85 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 200, + 85 + ], + "type": "text", + "content": "Benchmarking Problems" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 87, + 174, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 87, + 174, + 99 + ], + "spans": [ + { + "bbox": [ + 69, + 87, + 174, + 99 + ], + "type": "text", + "content": "Real-world problems" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 99, + 541, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 99, + 541, + 321 + ], + "spans": [ + { + "bbox": [ + 67, + 99, + 541, + 321 + ], + "type": "text", + "content": "Material optimization is selected to represent real-world problems, with the design of planar multilayers (PMLs) optical film as a testbed for benchmarking. PMLs can be seen in many applications. For example, they have been explored for transparent radiative cooling windows to address global warming by emitting thermal radiation through the atmospheric window (" + }, + { + "bbox": [ + 67, + 99, + 541, + 321 + ], + "type": "inline_equation", + "content": "8\\mu \\mathrm{m} < \\lambda < 13\\mu \\mathrm{m}" + }, + { + "bbox": [ + 67, + 99, + 541, + 321 + ], + "type": "text", + "content": ")4, while transmitting visible photons. PMLs consist of layers with one of four dielectric materials: silicon dioxide, silicon nitride, aluminum oxide, and titanium dioxide. The configuration of these layers can be expressed as a binary vector, where each layer is assigned a two-digit binary label. Optical characteristics and corresponding figure-of-merit (FOM) of the PML can be calculated by solving Maxwell's equations using the transfer matrix method (TMM). To formulate QUBOs, layer configurations (input binary vectors) and their FOMs (outputs) are used to train the FM model. FM learns the linear and quadratic coefficients, effectively modeling the optimization landscape of the material system. QUBO matrices are then generated using these coefficients30,31. PML configurations are randomly generated for training datasets, and their FOMs are calculated using TMM. The resulting QUBO matrices represent real-world materials optimization problems, characterized by highly dense (fully connected) configurations (Fig. S1), which are used for the benchmarking study in Fig. 1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 334, + 190, + 347 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 334, + 190, + 347 + ], + "spans": [ + { + "bbox": [ + 69, + 334, + 190, + 347 + ], + "type": "text", + "content": "Benchmarking problems" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "spans": [ + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "text", + "content": "We formulate QUBO matrices with random elements to further systematically explore scalability (Fig. 2 and Fig. 3), following the characteristics of QUBOs from real-world problems, for the benchmarking study as the following:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 390, + 541, + 640 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 69, + 390, + 541, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 390, + 541, + 431 + ], + "spans": [ + { + "bbox": [ + 69, + 390, + 541, + 431 + ], + "type": "text", + "content": "- Problem size: The problem size, corresponding to the length of a binary vector " + }, + { + "bbox": [ + 69, + 390, + 541, + 431 + ], + "type": "inline_equation", + "content": "(n)" + }, + { + "bbox": [ + 69, + 390, + 541, + 431 + ], + "type": "text", + "content": ", varies from 120 to 10,000 (120, 200, 500, 1,000, 1,500, 2,000, 2,500, 3,000, 4,000, 5,000, 6,000, 7,000, 8,000, 9,000 and 10,000)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 433, + 541, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 433, + 541, + 516 + ], + "spans": [ + { + "bbox": [ + 67, + 433, + 541, + 516 + ], + "type": "text", + "content": "- Distribution of elements: For each problem size, four QUBO matrices with different distributions of elements are studied. These elements are random numbers with a mean value of 0 and standard deviations of 0.001, 0.01, 0.1, or 1. These distributions reflect the variability observed in QUBO coefficients derived from real-world problems (Table S1). A QUBO configured with elements having a large deviation yields a significant variation in the energy landscape, potentially resulting in high energy barriers that must be overcome to find the ground state." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 517, + 535, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 517, + 535, + 640 + ], + "spans": [ + { + "bbox": [ + 67, + 517, + 535, + 640 + ], + "type": "text", + "content": "- Density of matrices: The density of QUBO matrices reflects the proportion of pairwise interactions among variables relative to the maximum possible interactions. Fully connected QUBOs, such as those derived from real-world problems, represent cases where all variables interact with each other. For example, in layered photonic structures, each layer interacts with every other layer, influencing optical responses, which leads to a fully connected QUBO. In contrast, Max-Cut problems typically result in sparse QUBOs, where only a subset of variables (nodes) interact through edges. The maximum number of interaction coefficients (i.e., the number of edges in Max-Cut problems) is " + }, + { + "bbox": [ + 67, + 517, + 535, + 640 + ], + "type": "inline_equation", + "content": "nC_2" + }, + { + "bbox": [ + 67, + 517, + 535, + 640 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 517, + 535, + 640 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 517, + 535, + 640 + ], + "type": "text", + "content": " denotes the problem size. The density of a QUBO can be calculated as:" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 148, + 653, + 529, + 684 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 653, + 529, + 684 + ], + "spans": [ + { + "bbox": [ + 148, + 653, + 529, + 684 + ], + "type": "interline_equation", + "content": "\\text {d e n s i t y} = \\frac {\\text {n u m b e r o f i n t e r a c t i o n c o e f f i c i e n t s}}{\\text {m a x i m u m n u m b e r o f i n t e r a c t i o n c o e f f i c i e n t s}} \\tag {4}", + "image_path": "e3cfbe667099ce38f9968b4d6af5d3b563ff2a89cc053dae585ccfa165f4c2a7.jpg" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "spans": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "text", + "content": "For example, a benchmark problem instance (G10) with 800 nodes and 19,176 edges has a density of " + }, + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "text", + "content": ", calculated as: density " + }, + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "inline_equation", + "content": "= 19,176 / 319,600 = 0.06" + }, + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "text", + "content": ". The density of Max-Cut problems can be adjusted by changing the number of edges, with typical instances having densities ranging from " + }, + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "inline_equation", + "content": "0.02\\%" + }, + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "text", + "content": " (Fig. S1, Table S2). In contrast, real-world problems feature fully connected configurations, corresponding to a density of " + }, + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 67, + 72, + 542, + 185 + ], + "type": "text", + "content": ". QUBOs for this benchmarking study have dense matrices fully filled with real-number elements in the upper triangular part (i.e., fully connected graph nodes, Fig. S2). This configuration aims to approximate real-world optimization problems, which usually requires a dense QUBO matrix4,28." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 196, + 415, + 209 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 196, + 415, + 209 + ], + "spans": [ + { + "bbox": [ + 67, + 196, + 415, + 209 + ], + "type": "text", + "content": "Performance Metrics: Relative Accuracy and Computational Time" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 210, + 160, + 223 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 210, + 160, + 223 + ], + "spans": [ + { + "bbox": [ + 69, + 210, + 160, + 223 + ], + "type": "text", + "content": "Relative Accuracy" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 224, + 543, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 224, + 543, + 350 + ], + "spans": [ + { + "bbox": [ + 67, + 224, + 543, + 350 + ], + "type": "text", + "content": "For small-scale problems, brute-force search guarantees the identification of the global optimum by evaluating all possible solutions. However, this approach becomes infeasible for large-scale problems due to the exponential growth of the search space. The IP solver, such as Gurobi, utilizes the branch-and-bound method to efficiently explore the solution space and prove global optimality within an optimality gap. However, due to computational limitations or time constraints, IP may struggle to find the global optimum for large-scale problems. To address this challenge in our benchmarking study, we employ a 'Relative Accuracy' metric to compare the relative performance of different solvers. Relative accuracy is defined as the ratio of a solver's objective value to the best objective found across all solvers:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 167, + 361, + 529, + 378 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 361, + 529, + 378 + ], + "spans": [ + { + "bbox": [ + 167, + 361, + 529, + 378 + ], + "type": "interline_equation", + "content": "\\text {R e l a t i v e A c c u r a c y} = \\text {S o l u t i o n} _ {\\text {s o l v e r}} / \\text {S o l u t i o n} _ {\\text {b e s t}} \\tag {5}", + "image_path": "7ecbacb2f7527e7f764b668782c06e35fcdde7c952d5ef5c6fdc96ac74ec4d52.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 389, + 544, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 389, + 544, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 389, + 544, + 544 + ], + "type": "text", + "content": "This metric provides a way to evaluate the solution quality when the global optimum cannot be definitively found or proven for large-scale problem instances. Note that the best solution is the lowest value among the solutions obtained from all solvers since the solvers are designed to find the lowest energy state (generally negative values for the QUBOs used in this study). The relative accuracies of the solvers are plotted as a function of problem sizes. In Fig. 1, the relative accuracy represents the average value calculated from three different QUBOs that represent material optimization, and in Fig. 2, it represents the average from four different QUBOs with varying standard deviations for each problem size (ranging from 120 to 10,000). Error bars on the plot represent the standard deviation of accuracies calculated from the four different QUBOs for each problem size, relative to the average values. By definition, the relative accuracy is 1.0 when the solver finds a solution with the best-known objective function value (equation 5)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 555, + 171, + 568 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 555, + 171, + 568 + ], + "spans": [ + { + "bbox": [ + 69, + 555, + 171, + 568 + ], + "type": "text", + "content": "Computational Time" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 569, + 542, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 569, + 542, + 708 + ], + "spans": [ + { + "bbox": [ + 67, + 569, + 542, + 708 + ], + "type": "text", + "content": "Computational time is another important factor in determining the solvers' performance. Combinatorial optimization problems are considered NP-hard, so increasing problem sizes can lead to an explosion of search space, posing challenges in optimization processes. We measure the computational time dedicated solely to solving given problems, excluding problem reading time, queue time, or communication time between the local computer and quantum annealer. This is consistent with other benchmarking studies[17,18]. For problems solved on D-Wave systems' QPU for QA, the execution time includes programming and sampling times (anneal, readout, and delay time). QPU access time is calculated for all of them after programmed anneal-read cycles, corresponding to the time charged to users in their allocations, which is used as the computational time for QA and HQA. Classical solvers (SA, SD, TS, and PT-ICM) run on a workstation (AMD" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "spans": [ + { + "bbox": [ + 531, + 742, + 541, + 753 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": "Ryzen Threadripper PRO 3975WX @ 3.5 GHz processor with 32 cores and 32GB of RAM), and IP (Gurobi) run on a cluster node (an Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz processor with 24 cores and 256 GB of RAM). Problem reading time can be significant when the problem size is large, but it is excluded from the computational time consideration. We measure the time solely taken to solve given problems with classical solvers. In Fig. 1b and Fig. 3, the solution time for classical and quantum solvers is presented as a function of problem sizes. Note that a QUBO problem is NP-hard" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "inline_equation", + "content": "^{49}" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": ". Evaluating the energy of a given solution has a computational cost of " + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "inline_equation", + "content": "O(n^{2})" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": " (= problem size) is the number of variables. The number of reads or sweeps does not scale with " + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": ", but the cost for each sweep scales as " + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "inline_equation", + "content": "O(n)" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": " for SA. Consequently, the theoretical time complexities of the classical solvers are known as " + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "inline_equation", + "content": "O(n^{3})" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": " for SA" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "inline_equation", + "content": "^{50}" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "inline_equation", + "content": "O(n^{2})" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": " for SD" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "inline_equation", + "content": "^{51}" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "inline_equation", + "content": "O(n^{2})" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": " for TS" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "inline_equation", + "content": "^{52}" + }, + { + "bbox": [ + 67, + 72, + 542, + 239 + ], + "type": "text", + "content": ". On the other hand, the theoretical time complexity of the quantum solvers can be considered constant." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 268, + 186, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 268, + 186, + 285 + ], + "spans": [ + { + "bbox": [ + 69, + 268, + 186, + 285 + ], + "type": "text", + "content": "Data availability" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 285, + 541, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 285, + 541, + 313 + ], + "spans": [ + { + "bbox": [ + 67, + 285, + 541, + 313 + ], + "type": "text", + "content": "All data generated and analyzed during the study are available from the corresponding author upon reasonable request." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 342, + 189, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 342, + 189, + 358 + ], + "spans": [ + { + "bbox": [ + 69, + 342, + 189, + 358 + ], + "type": "text", + "content": "Code availability" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 358, + 541, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 358, + 541, + 387 + ], + "spans": [ + { + "bbox": [ + 67, + 358, + 541, + 387 + ], + "type": "text", + "content": "The codes used for generating and analyzing data are available from the corresponding author upon reasonable request." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 415, + 205, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 415, + 205, + 432 + ], + "spans": [ + { + "bbox": [ + 69, + 415, + 205, + 432 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 432, + 544, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 432, + 544, + 515 + ], + "spans": [ + { + "bbox": [ + 67, + 432, + 544, + 515 + ], + "type": "text", + "content": "This research used resources of the Oak Ridge Leadership Computing Facility at the Oak Ridge National Laboratory, which is supported by the Office of Science of the U.S. Department of Energy under Contract No. DE-AC05-00OR22725. This research was supported by the Quantum Computing Based on Quantum Advantage Challenge Research (RS-2023-00255442) through the National Research Foundation of Korea (NRF) funded by the Korean Government (Ministry of Science and ICT(MSIT))." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 544, + 208, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 544, + 208, + 559 + ], + "spans": [ + { + "bbox": [ + 69, + 544, + 208, + 559 + ], + "type": "text", + "content": "Author information" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 561, + 190, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 561, + 190, + 573 + ], + "spans": [ + { + "bbox": [ + 69, + 561, + 190, + 573 + ], + "type": "text", + "content": "Authors and Affiliations" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 574, + 541, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 574, + 541, + 601 + ], + "spans": [ + { + "bbox": [ + 69, + 574, + 541, + 601 + ], + "type": "text", + "content": "Department of Aerospace and Mechanical Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 602, + 220, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 602, + 220, + 616 + ], + "spans": [ + { + "bbox": [ + 69, + 602, + 220, + 616 + ], + "type": "text", + "content": "Seongmin Kim & Tengfei Luo" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 629, + 541, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 629, + 541, + 658 + ], + "spans": [ + { + "bbox": [ + 68, + 629, + 541, + 658 + ], + "type": "text", + "content": "Department of Electronic Engineering, Kyung Hee University; Yongin-Si, Gyeonggi-do 17104, Republic of Korea." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 658, + 220, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 658, + 220, + 672 + ], + "spans": [ + { + "bbox": [ + 69, + 658, + 220, + 672 + ], + "type": "text", + "content": "Sangwoo Ahn & Eungkyu Lee" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 685, + 541, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 685, + 541, + 713 + ], + "spans": [ + { + "bbox": [ + 68, + 685, + 541, + 713 + ], + "type": "text", + "content": "Department of Chemical and Biomolecular Engineering, University of Notre Dame; Notre Dame, Indiana 46556, United States." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 526, + 742, + 541, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 526, + 742, + 541, + 753 + ], + "spans": [ + { + "bbox": [ + 526, + 742, + 541, + 753 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 168, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 168, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 168, + 87 + ], + "type": "text", + "content": "Alexander Dowling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 99, + 541, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 99, + 541, + 126 + ], + "spans": [ + { + "bbox": [ + 70, + 99, + 541, + 126 + ], + "type": "text", + "content": "National Center for Computational Sciences, Oak Ridge National Laboratory, Oak Ridge, Tennessee 37830, United States." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 128, + 226, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 128, + 226, + 140 + ], + "spans": [ + { + "bbox": [ + 70, + 128, + 226, + 140 + ], + "type": "text", + "content": "Seongmin Kim & In-Saeng Suh" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 155, + 144, + 167 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 155, + 144, + 167 + ], + "spans": [ + { + "bbox": [ + 69, + 155, + 144, + 167 + ], + "type": "text", + "content": "Contributions" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 168, + 541, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 168, + 541, + 224 + ], + "spans": [ + { + "bbox": [ + 69, + 168, + 541, + 224 + ], + "type": "text", + "content": "S.K., A.D., E.L., and T.L. conceived the idea. S.K. and S.A. performed benchmarking studies to generate data. A.D. and S.K. implemented the IP benchmark. S.K. analyzed the data with advice from I.S., A.D., E.L., and T.L. All authors discussed the results and contributed to the writing of the manuscript." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 238, + 192, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 238, + 192, + 251 + ], + "spans": [ + { + "bbox": [ + 69, + 238, + 192, + 251 + ], + "type": "text", + "content": "Corresponding authors" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 251, + 429, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 251, + 429, + 265 + ], + "spans": [ + { + "bbox": [ + 69, + 251, + 429, + 265 + ], + "type": "text", + "content": "Correspondence to Alexander W. Dowling, Eungkyu Lee, or Tengfei Luo." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 294, + 202, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 294, + 202, + 309 + ], + "spans": [ + { + "bbox": [ + 69, + 294, + 202, + 309 + ], + "type": "text", + "content": "Ethics declarations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 312, + 177, + 325 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 312, + 177, + 325 + ], + "spans": [ + { + "bbox": [ + 69, + 312, + 177, + 325 + ], + "type": "text", + "content": "Competing Interests" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 325, + 280, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 325, + 280, + 338 + ], + "spans": [ + { + "bbox": [ + 69, + 325, + 280, + 338 + ], + "type": "text", + "content": "The authors declare no competing interests." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 367, + 141, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 367, + 141, + 383 + ], + "spans": [ + { + "bbox": [ + 70, + 367, + 141, + 383 + ], + "type": "text", + "content": "Reference" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 384, + 541, + 702 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 69, + 384, + 541, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 384, + 541, + 411 + ], + "spans": [ + { + "bbox": [ + 69, + 384, + 541, + 411 + ], + "type": "text", + "content": "1 Arute, F. et al. Quantum supremacy using a programmable superconducting processor. Nature 574, 505-510 (2019)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 412, + 541, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 412, + 541, + 438 + ], + "spans": [ + { + "bbox": [ + 69, + 412, + 541, + 438 + ], + "type": "text", + "content": "2 Daley, A. J. et al. Practical quantum advantage in quantum simulation. Nature 607, 667-676 (2022)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 440, + 541, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 440, + 541, + 466 + ], + "spans": [ + { + "bbox": [ + 70, + 440, + 541, + 466 + ], + "type": "text", + "content": "3 Johnson, M. W. et al. Quantum annealing with manufactured spins. Nature 473, 194-198 (2011)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 467, + 541, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 467, + 541, + 495 + ], + "spans": [ + { + "bbox": [ + 70, + 467, + 541, + 495 + ], + "type": "text", + "content": "4 Kim, S. et al. High-Performance Transparent Radiative Cooler Designed by Quantum Computing. ACS Energy Lett 7, 4134-4141 (2022)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 495, + 541, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 495, + 541, + 536 + ], + "spans": [ + { + "bbox": [ + 70, + 495, + 541, + 536 + ], + "type": "text", + "content": "5 Kim, S., Jung, S., Bobbitt, A., Lee, E. & Luo, T. Wide-angle spectral filter for energy-saving windows designed by quantum annealing-enhanced active learning. Cell Rep Phys Sci (2024)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 537, + 541, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 537, + 541, + 577 + ], + "spans": [ + { + "bbox": [ + 69, + 537, + 541, + 577 + ], + "type": "text", + "content": "6 Li, R. Y., Di Felice, R., Rohs, R. & Lidar, D. A. Quantum annealing versus classical machine learning applied to a simplified computational biology problem. npj Quantum Inf 4 (2018)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 578, + 541, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 578, + 541, + 605 + ], + "spans": [ + { + "bbox": [ + 70, + 578, + 541, + 605 + ], + "type": "text", + "content": "7 Vinci, W., Albash, T. & Lidar, D. A. Nested quantum annealing correction. npj Quantum Inf 2 (2016)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 606, + 541, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 606, + 541, + 632 + ], + "spans": [ + { + "bbox": [ + 70, + 606, + 541, + 632 + ], + "type": "text", + "content": "8 Santoro, G. E. & Tosatti, E. Optimization using quantum mechanics: quantum annealing through adiabatic evolution. J Phys A: Math Gen 39, R393-R431 (2006)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 633, + 541, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 633, + 541, + 673 + ], + "spans": [ + { + "bbox": [ + 69, + 633, + 541, + 673 + ], + "type": "text", + "content": "9 Mandra, S., Zhu, Z. & Katzgraber, H. G. Exponentially Biased Ground-State Sampling of Quantum Annealing Machines with Transverse-Field Driving Hamiltonians. Phys Rev Lett 118, 070502 (2017)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 70, + 674, + 541, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 674, + 541, + 702 + ], + "spans": [ + { + "bbox": [ + 70, + 674, + 541, + 702 + ], + "type": "text", + "content": "10 Kitai, K. et al. Designing metamaterials with quantum annealing and factorization machines. Phys Rev Res 2, 013319 (2020)." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "spans": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 71, + 541, + 693 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 70, + 71, + 541, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 71, + 541, + 99 + ], + "spans": [ + { + "bbox": [ + 70, + 71, + 541, + 99 + ], + "type": "text", + "content": "11 Santoro, G. E., Marton a'k, R., Tosatti, E. & Car, R. Theory of Quantum Annealing of an Ising Spin Glass. Science 295, 2427-2430 (2002)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 100, + 541, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 100, + 541, + 126 + ], + "spans": [ + { + "bbox": [ + 70, + 100, + 541, + 126 + ], + "type": "text", + "content": "12 Hen, I. & Spedalieri, F. M. Quantum Annealing for Constrained Optimization. Phys Rev Appl 5 (2016)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 127, + 541, + 154 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 127, + 541, + 154 + ], + "spans": [ + { + "bbox": [ + 70, + 127, + 541, + 154 + ], + "type": "text", + "content": "13 Kadowaki, T. & Nishimori, H. Quantum annealing in the transverse Ising model. Phys Rev E 58, 5355-5363 (1998)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 155, + 541, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 155, + 541, + 182 + ], + "spans": [ + { + "bbox": [ + 70, + 155, + 541, + 182 + ], + "type": "text", + "content": "14 Morita, S. & Nishimori, H. Mathematical foundation of quantum annealing J Math Phys 49, 125210 (2008)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 182, + 541, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 182, + 541, + 209 + ], + "spans": [ + { + "bbox": [ + 70, + 182, + 541, + 209 + ], + "type": "text", + "content": "15 Wilson, B. A. et al. Machine learning framework for quantum sampling of highly constrained, continuous optimization problems. Appl Phys Rev 8, 041418 (2021)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 211, + 541, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 211, + 541, + 251 + ], + "spans": [ + { + "bbox": [ + 70, + 211, + 541, + 251 + ], + "type": "text", + "content": "16 Kim, S., Wu, S., Jian, R., Xiong, G. & Luo, T. Design of a High-Performance Titanium Nitride Metastructure-Based Solar Absorber Using Quantum Computing-Assisted Optimization. ACS Appl Mater Interfaces 15, 40606-40613 (2023)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 251, + 541, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 251, + 541, + 293 + ], + "spans": [ + { + "bbox": [ + 70, + 251, + 541, + 293 + ], + "type": "text", + "content": "17 O'Malley, D., Vesselinov, V. V., Alexandrov, B. S. & Alexandrov, L. B. Nonnegative/Binary matrix factorization with a D-Wave quantum annealer. PLoS One 13, e0206653 (2018)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 293, + 541, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 293, + 541, + 319 + ], + "spans": [ + { + "bbox": [ + 70, + 293, + 541, + 319 + ], + "type": "text", + "content": "18 Tasseff, B. et al. On the Emerging Potential of Quantum Annealing Hardware for Combinatorial Optimization. arXiv:2210.04291 (2022)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 321, + 541, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 321, + 541, + 348 + ], + "spans": [ + { + "bbox": [ + 70, + 321, + 541, + 348 + ], + "type": "text", + "content": "19 Hab, R., Ohzeki, M. & Tanaka, K. Travel time optimization on multi-AGV routing by reverse annealing. Sci Rep 12, 17753 (2022)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 349, + 541, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 349, + 541, + 374 + ], + "spans": [ + { + "bbox": [ + 70, + 349, + 541, + 374 + ], + "type": "text", + "content": "20 Kim, S. et al. Quantum annealing-aided design of an ultrathin-metamaterial optical diode. Nano Converg 11, 16 (2024)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 376, + 541, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 376, + 541, + 403 + ], + "spans": [ + { + "bbox": [ + 70, + 376, + 541, + 403 + ], + "type": "text", + "content": "21 Pelofske, E., Hahn, G. & Djidjev, H. N. Noise dynamics of quantum annealers: estimating the effective noise using idle qubits. Quantum Sci Technol 8 (2023)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 403, + 541, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 403, + 541, + 445 + ], + "spans": [ + { + "bbox": [ + 70, + 403, + 541, + 445 + ], + "type": "text", + "content": "22 Yoneda, Y., Shimada, M., Yoshida, A. & Shirakashi, J.-i. Searching for optimal experimental parameters with D-Wave quantum annealer for fabrication of Au atomic junctions. Appl Phys Exp 16 (2023)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 445, + 541, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 445, + 541, + 472 + ], + "spans": [ + { + "bbox": [ + 70, + 445, + 541, + 472 + ], + "type": "text", + "content": "23 Willsch, D. et al. Benchmarking Advantage and D-Wave 2000Q quantum annealers with exact cover problems. Quantum Inf Process 21 (2022)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 473, + 541, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 473, + 541, + 500 + ], + "spans": [ + { + "bbox": [ + 70, + 473, + 541, + 500 + ], + "type": "text", + "content": "24 Yarkoni, S., Raponi, E., Back, T. & Schmitt, S. Quantum annealing for industry applications: introduction and review. Rep Prog Phys 85 (2022)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 501, + 541, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 501, + 541, + 540 + ], + "spans": [ + { + "bbox": [ + 70, + 501, + 541, + 540 + ], + "type": "text", + "content": "25 Kasi, S., Warburton, P., Kaewell, J. & Jamieson, K. A Cost and Power Feasibility Analysis of Quantum Annealing for NextG Cellular Wireless Networks. IEEE Transactions on Quantum Engineering 4, 1-17 (2023)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 541, + 541, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 541, + 541, + 582 + ], + "spans": [ + { + "bbox": [ + 70, + 541, + 541, + 582 + ], + "type": "text", + "content": "26 Teplukhin, A., Kendrick, B. K. & Babikov, D. Solving complex eigenvalue problems on a quantum annealer with applications to quantum scattering resonances. Phys Chem Chem Phys 22, 26136-26144 (2020)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 583, + 541, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 583, + 541, + 624 + ], + "spans": [ + { + "bbox": [ + 70, + 583, + 541, + 624 + ], + "type": "text", + "content": "27 Atobe, Y., Tawada, M. & Togawa, N. Hybrid Annealing Method Based on subQUBO Model Extraction With Multiple Solution Instances. IEEE Trans Comput 71, 2606-2619 (2022)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 624, + 541, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 624, + 541, + 665 + ], + "spans": [ + { + "bbox": [ + 70, + 624, + 541, + 665 + ], + "type": "text", + "content": "28 Zaman, M., Tanahashi, K. & Tanaka, S. PyQUBO: Python Library for Mapping Combinatorial Optimization Problems to QUBO Form. IEEE Trans Comput 71, 838-850 (2022)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 666, + 541, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 666, + 541, + 693 + ], + "spans": [ + { + "bbox": [ + 70, + 666, + 541, + 693 + ], + "type": "text", + "content": "29 Tao, M. et al. in IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW) 557-566 (2020)." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "spans": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 541, + 693 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 69, + 71, + 541, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 541, + 99 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 541, + 99 + ], + "type": "text", + "content": "30 Kim, S. et al. A review on machine learning-guided design of energy materials. Progress in Energy 6 (2024)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 100, + 541, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 100, + 541, + 140 + ], + "spans": [ + { + "bbox": [ + 67, + 100, + 541, + 140 + ], + "type": "text", + "content": "31 Kim, S., Luo, T., Lee, E. & Suh, I.-S. Distributed Quantum Approximate Optimization Algorithm on Integrated High-Performance Computing and Quantum Computing Systems for Large-Scale Optimization. arXiv:2407.20212 (2024)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 141, + 541, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 141, + 541, + 182 + ], + "spans": [ + { + "bbox": [ + 69, + 141, + 541, + 182 + ], + "type": "text", + "content": "32 Gemeinhardt, F., Garmendia, A., Wimmer, M., Weder, B. & Leymann, F. Quantum Combinatorial Optimization in the NISQ Era: A Systematic Mapping Study. ACM Comput Surv 56, 1-36 (2023)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 182, + 541, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 182, + 541, + 210 + ], + "spans": [ + { + "bbox": [ + 69, + 182, + 541, + 210 + ], + "type": "text", + "content": "33 Willsch, M., Willsch, D., Jin, F., De Raedt, H. & Michielsen, K. Benchmarking the quantum approximate optimization algorithm. Quantum Inf Process 19 (2020)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 211, + 541, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 211, + 541, + 237 + ], + "spans": [ + { + "bbox": [ + 70, + 211, + 541, + 237 + ], + "type": "text", + "content": "34 Hauke, P., Katzgraber, H. G., Lechner, W., Nishimori, H. & Oliver, W. D. Perspectives of quantum annealing: methods and implementations. Rep Prog Phys 83, 054401 (2020)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 238, + 541, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 238, + 541, + 266 + ], + "spans": [ + { + "bbox": [ + 70, + 238, + 541, + 266 + ], + "type": "text", + "content": "35 Carugno, C., Ferrari Dacrema, M. & Cremonesi, P. Evaluating the job shop scheduling problem on a D-wave quantum annealer. Sci Rep 12, 6539 (2022)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 266, + 541, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 266, + 541, + 293 + ], + "spans": [ + { + "bbox": [ + 70, + 266, + 541, + 293 + ], + "type": "text", + "content": "36 Irie, H., Liang, H., Doi, T., Gongyo, S. & Hatsuda, T. Hybrid quantum annealing via molecular dynamics. Sci Rep 11, 8426 (2021)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 293, + 541, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 293, + 541, + 320 + ], + "spans": [ + { + "bbox": [ + 70, + 293, + 541, + 320 + ], + "type": "text", + "content": "37 Raymond, J. et al. Hybrid Quantum Annealing for Larger-than-QPU Lattice-structured Problems. ACM Transactions on Quantum Computing 4, 1-30 (2023)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 321, + 541, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 321, + 541, + 348 + ], + "spans": [ + { + "bbox": [ + 70, + 321, + 541, + 348 + ], + "type": "text", + "content": "38 Ceselli, A. & Premoli, M. On good encodings for quantum annealer and digital optimization solvers. Sci Rep 13, 5628 (2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 349, + 541, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 349, + 541, + 389 + ], + "spans": [ + { + "bbox": [ + 70, + 349, + 541, + 389 + ], + "type": "text", + "content": "39 Song, J., Lanka, R., Yue, Y. & Dilkina, B. A General Large Neighborhood Search Framework for Solving Integer Linear Programs. 34th Conference on Neural Information Processing Systems (NeurIPS 2020) (2020)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 390, + 541, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 390, + 541, + 418 + ], + "spans": [ + { + "bbox": [ + 70, + 390, + 541, + 418 + ], + "type": "text", + "content": "40 Bynum, M. L. et al. Pyomo — Optimization Modeling in Python, 3rd edition. Springer Optimization and Its Applications 67 (2021)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 418, + 541, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 418, + 541, + 458 + ], + "spans": [ + { + "bbox": [ + 70, + 418, + 541, + 458 + ], + "type": "text", + "content": "41 Alnowibet, K. A., Mahdi, S., El-Alem, M., Abdelawwad, M. & Mohamed, A. W. Guided Hybrid Modified Simulated Annealing Algorithm for Solving Constrained Global Optimization Problems. Mathematics 10 (2022)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 459, + 541, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 459, + 541, + 487 + ], + "spans": [ + { + "bbox": [ + 70, + 459, + 541, + 487 + ], + "type": "text", + "content": "42 Rere, L. M. R., Fanany, M. I. & Arymurthy, A. M. Simulated Annealing Algorithm for Deep Learning. Procedia Comput Sci 72, 137-144 (2015)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 487, + 541, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 487, + 541, + 528 + ], + "spans": [ + { + "bbox": [ + 70, + 487, + 541, + 528 + ], + "type": "text", + "content": "43 Gonzales, G. V. et al. A comparison of simulated annealing schedules for constructable design of complex cavities intruded into conductive walls with internal heat generation. Energy 93, 372-382 (2015)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 528, + 541, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 528, + 541, + 555 + ], + "spans": [ + { + "bbox": [ + 70, + 528, + 541, + 555 + ], + "type": "text", + "content": "44 Wadayama, T. et al. Gradient descent bit flipping algorithms for decoding LDPC codes. IEEE Trans Communi 58, 1610-1614 (2010)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 555, + 541, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 555, + 541, + 583 + ], + "spans": [ + { + "bbox": [ + 70, + 555, + 541, + 583 + ], + "type": "text", + "content": "45 Glover, F., Laguna, M. & Marti', R. Principles of Tabu Search. Handbook of Approximation Algorithms and Metaheuristics 23 (2007)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 583, + 541, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 583, + 541, + 610 + ], + "spans": [ + { + "bbox": [ + 70, + 583, + 541, + 610 + ], + "type": "text", + "content": "46 Aramon, M. et al. Physics-Inspired Optimization for Quadratic Unconstrained Problems Using a Digital Annealer. Frontiers in Physics 7 (2019)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 611, + 541, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 611, + 541, + 638 + ], + "spans": [ + { + "bbox": [ + 70, + 611, + 541, + 638 + ], + "type": "text", + "content": "47 Zhu, Z., Ochoa, A. J. & Katzgraber, H. G. Fair sampling of ground-state configurations of binary optimization problems. arXiv:1903.07600 (2019)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 638, + 541, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 638, + 541, + 666 + ], + "spans": [ + { + "bbox": [ + 70, + 638, + 541, + 666 + ], + "type": "text", + "content": "48 Mandrà, S. & Katzgraber, H. G. A deceptive step towards quantum speedup detection. Quantum Science and Technology 3 (2018)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 666, + 541, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 666, + 541, + 693 + ], + "spans": [ + { + "bbox": [ + 70, + 666, + 541, + 693 + ], + "type": "text", + "content": "49 Yasuoka, H. Computational Complexity of Quadratic Unconstrained Binary Optimization. arXiv:2109.10048 (2022)." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "spans": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 542, + 224 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 69, + 71, + 542, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 542, + 99 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 542, + 99 + ], + "type": "text", + "content": "50 Hansen, P. B. Simulated Annealing. Electrical Engineering and Computer Science Technical Reports 170 (1992)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 100, + 542, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 100, + 542, + 140 + ], + "spans": [ + { + "bbox": [ + 69, + 100, + 542, + 140 + ], + "type": "text", + "content": "51 Dupin, N., Nielsen, F. & Talbi, E. Dynamic Programming heuristic for k-means Clustering among a 2-dimensional Pareto Frontier. 7th Internat. Conf. on Metaheuristics and Nature Inspired Computing (2018)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 141, + 542, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 141, + 542, + 168 + ], + "spans": [ + { + "bbox": [ + 69, + 141, + 542, + 168 + ], + "type": "text", + "content": "52 Sakabe, M. & Yagiura, M. An efficient tabu search algorithm for the linear ordering problem. J Adv Mech Des Syst Manuf 16, JAMDSM0041-JAMDSM0041 (2022)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 169, + 542, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 169, + 542, + 195 + ], + "spans": [ + { + "bbox": [ + 69, + 169, + 542, + 195 + ], + "type": "text", + "content": "53 Delgado, A. & Thaler, J. Quantum annealing for jet clustering with thrust. Phys Rev D 106 (2022)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 197, + 542, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 197, + 542, + 224 + ], + "spans": [ + { + "bbox": [ + 69, + 197, + 542, + 224 + ], + "type": "text", + "content": "54 Mao, Z., Matsuda, Y., Tamura, R. & Tsuda, K. Chemical design with GPU-based Ising machines. Digit Discov 2, 1098-1103 (2023)." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "spans": [ + { + "bbox": [ + 525, + 742, + 539, + 753 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 79, + 113, + 531, + 335 + ], + "blocks": [ + { + "bbox": [ + 70, + 72, + 124, + 91 + ], + "lines": [ + { + "bbox": [ + 70, + 72, + 124, + 91 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 124, + 91 + ], + "type": "text", + "content": "Figures" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 79, + 113, + 531, + 335 + ], + "lines": [ + { + "bbox": [ + 79, + 113, + 531, + 335 + ], + "spans": [ + { + "bbox": [ + 79, + 113, + 531, + 335 + ], + "type": "image", + "image_path": "a8cb85a9316795dc6f82454dfcbe57b04e7e91a649d20ea1acb66f8e645c0d90.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 80, + 344, + 529, + 564 + ], + "blocks": [ + { + "bbox": [ + 80, + 344, + 529, + 564 + ], + "lines": [ + { + "bbox": [ + 80, + 344, + 529, + 564 + ], + "spans": [ + { + "bbox": [ + 80, + 344, + 529, + 564 + ], + "type": "image", + "image_path": "691843c0ba4b3d84d880dd78aac48d6de4dedbd2f73f05e2d97a542a22d9e6ad.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 566, + 541, + 622 + ], + "lines": [ + { + "bbox": [ + 67, + 566, + 541, + 622 + ], + "spans": [ + { + "bbox": [ + 67, + 566, + 541, + 622 + ], + "type": "text", + "content": "Fig. 1. Performance analysis of classical (IP, SA, SD, TS, PT-ICM, SA-QBSolv, and PT-ICM-QBSolv) and quantum (QA-QBSolv, and HQA) solvers on QUBO problems representing real-world optimization tasks in material science. (a) Relative accuracy and (b) solving time of the solvers." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 526, + 742, + 540, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 526, + 742, + 540, + 753 + ], + "spans": [ + { + "bbox": [ + 526, + 742, + 540, + 753 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 72, + 541, + 305 + ], + "blocks": [ + { + "bbox": [ + 72, + 72, + 541, + 305 + ], + "lines": [ + { + "bbox": [ + 72, + 72, + 541, + 305 + ], + "spans": [ + { + "bbox": [ + 72, + 72, + 541, + 305 + ], + "type": "image", + "image_path": "3d096df4f9d88e5730c1ff85f4fc8195cc0ced5d1e2af08b268e6fae176f0453.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 308, + 541, + 352 + ], + "lines": [ + { + "bbox": [ + 67, + 308, + 541, + 352 + ], + "spans": [ + { + "bbox": [ + 67, + 308, + 541, + 352 + ], + "type": "text", + "content": "Fig. 2. The relative accuracy of the classical (IP, SA, SD, TS, and SA-QBSolv) and quantum (QA-QBSolv, and HQA) solvers for given QUBO problems. HQA is the best solver for finding the highest-quality solution for all problem sizes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 76, + 404, + 306, + 566 + ], + "blocks": [ + { + "bbox": [ + 76, + 404, + 306, + 566 + ], + "lines": [ + { + "bbox": [ + 76, + 404, + 306, + 566 + ], + "spans": [ + { + "bbox": [ + 76, + 404, + 306, + 566 + ], + "type": "image", + "image_path": "b2225fd80582c1a6d58446009e52cb7580120fa78d8fb8c21a6a179b22e90082.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 570, + 541, + 627 + ], + "lines": [ + { + "bbox": [ + 67, + 570, + 541, + 627 + ], + "spans": [ + { + "bbox": [ + 67, + 570, + 541, + 627 + ], + "type": "text", + "content": "Fig. 3. Solving time of the solvers for given QUBO problems. The solving time of (a) the classical and quantum solvers and (b) the classical solvers (SA, SD, and TS) for small QUBO problems. Quantum solvers do not scale in solving time as the problem size increases, which is a great advantage over classical counterparts." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 309, + 403, + 533, + 566 + ], + "blocks": [ + { + "bbox": [ + 309, + 403, + 533, + 566 + ], + "lines": [ + { + "bbox": [ + 309, + 403, + 533, + 566 + ], + "spans": [ + { + "bbox": [ + 309, + 403, + 533, + 566 + ], + "type": "image", + "image_path": "2fba1a69a62f326d6496fa444805cf7cb9d4f83d11029d8a05f043c24cc1c55c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 526, + 742, + 541, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 526, + 742, + 541, + 753 + ], + "spans": [ + { + "bbox": [ + 526, + 742, + 541, + 753 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 77, + 86, + 309, + 242 + ], + "blocks": [ + { + "bbox": [ + 77, + 86, + 309, + 242 + ], + "lines": [ + { + "bbox": [ + 77, + 86, + 309, + 242 + ], + "spans": [ + { + "bbox": [ + 77, + 86, + 309, + 242 + ], + "type": "image", + "image_path": "e8de5aea7294c5fa21c710d033ec625ad4b86fcd3d99b909d3310eb92ec79d99.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 247, + 541, + 288 + ], + "lines": [ + { + "bbox": [ + 67, + 247, + 541, + 288 + ], + "spans": [ + { + "bbox": [ + 67, + 247, + 541, + 288 + ], + "type": "text", + "content": "Fig. 4. Performance of the QA-QBSolv solver with different decomposition sizes. (a) Relative accuracy and (b) Solving time of the QA-QBSolv solver for given QUBO problems with different sub-QUBO sizes." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 313, + 84, + 539, + 242 + ], + "blocks": [ + { + "bbox": [ + 313, + 84, + 539, + 242 + ], + "lines": [ + { + "bbox": [ + 313, + 84, + 539, + 242 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 539, + 242 + ], + "type": "image", + "image_path": "0dc799fad8a578e4fc936a6f60b7cd51a35fb007df006f3fbbccca020db0e587.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 526, + 742, + 539, + 753 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 526, + 742, + 539, + 753 + ], + "spans": [ + { + "bbox": [ + 526, + 742, + 539, + 753 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 122, + 219, + 233 + ], + "blocks": [ + { + "bbox": [ + 76, + 122, + 219, + 233 + ], + "lines": [ + { + "bbox": [ + 76, + 122, + 219, + 233 + ], + "spans": [ + { + "bbox": [ + 76, + 122, + 219, + 233 + ], + "type": "image", + "image_path": "590134f4c031fae88771d2e5aa825341c5ba16298aab8804b412e18b733c476d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 227, + 121, + 376, + 233 + ], + "blocks": [ + { + "bbox": [ + 227, + 121, + 376, + 233 + ], + "lines": [ + { + "bbox": [ + 227, + 121, + 376, + 233 + ], + "spans": [ + { + "bbox": [ + 227, + 121, + 376, + 233 + ], + "type": "image", + "image_path": "ca3232bf1275cd24819fdc30b46c463658d46f4f071687fa7d455c1996177a86.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 378, + 123, + 522, + 233 + ], + "blocks": [ + { + "bbox": [ + 378, + 123, + 522, + 233 + ], + "lines": [ + { + "bbox": [ + 378, + 123, + 522, + 233 + ], + "spans": [ + { + "bbox": [ + 378, + 123, + 522, + 233 + ], + "type": "image", + "image_path": "fed61778896eedc872a2ee953f0454d1c1d1df2e5d2dd11118b668eedd8f7389.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 77, + 235, + 227, + 346 + ], + "blocks": [ + { + "bbox": [ + 77, + 235, + 227, + 346 + ], + "lines": [ + { + "bbox": [ + 77, + 235, + 227, + 346 + ], + "spans": [ + { + "bbox": [ + 77, + 235, + 227, + 346 + ], + "type": "image", + "image_path": "6deccb0dcac8e41c85f23661aab5cb85956088933112941c178b3170cb344792.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 352, + 541, + 449 + ], + "lines": [ + { + "bbox": [ + 67, + 352, + 541, + 449 + ], + "spans": [ + { + "bbox": [ + 67, + 352, + 541, + 449 + ], + "type": "text", + "content": "Fig. S1. Comparison of QUBO matrices for real-world optimization and Max-Cut problems. (a-c) QUBO matrices representing the optimization of planar multilayered structures (PMLs) with problem sizes of (a) 100, (b) 500, and (c) 3,000. The dense configurations of these matrices reflect the fully connected nature of interactions in material optimization problems. (d-f) QUBO matrices derived from Max-Cut problem instances in the G-set" + }, + { + "bbox": [ + 67, + 352, + 541, + 449 + ], + "type": "inline_equation", + "content": "^{S1}" + }, + { + "bbox": [ + 67, + 352, + 541, + 449 + ], + "type": "text", + "content": ": (d) G5, (e) G15, and (f) G40. These matrices exhibit sparse configurations, with relatively few pairwise interactions compared to their maximum possible connections." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 228, + 235, + 376, + 346 + ], + "blocks": [ + { + "bbox": [ + 228, + 235, + 376, + 346 + ], + "lines": [ + { + "bbox": [ + 228, + 235, + 376, + 346 + ], + "spans": [ + { + "bbox": [ + 228, + 235, + 376, + 346 + ], + "type": "image", + "image_path": "1417de3df697e4e053ffea4df6d77a5ebdaefb50b3187f1972f3050156c75f08.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 380, + 235, + 527, + 346 + ], + "blocks": [ + { + "bbox": [ + 380, + 235, + 527, + 346 + ], + "lines": [ + { + "bbox": [ + 380, + 235, + 527, + 346 + ], + "spans": [ + { + "bbox": [ + 380, + 235, + 527, + 346 + ], + "type": "image", + "image_path": "2a620e676a2beae0c82e88cad52c70b73e03b2af6941c4c6ef5de056c159c0c8.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 78, + 496, + 301, + 662 + ], + "blocks": [ + { + "bbox": [ + 78, + 496, + 301, + 662 + ], + "lines": [ + { + "bbox": [ + 78, + 496, + 301, + 662 + ], + "spans": [ + { + "bbox": [ + 78, + 496, + 301, + 662 + ], + "type": "image", + "image_path": "1d459dddad5c45442c94fd4248e3d25df27831ab5659e8ee16015f75ac1fbd30.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 69, + 662, + 541, + 691 + ], + "lines": [ + { + "bbox": [ + 69, + 662, + 541, + 691 + ], + "spans": [ + { + "bbox": [ + 69, + 662, + 541, + 691 + ], + "type": "text", + "content": "Fig. S2. Example QUBO matrices. The size of the given QUBO problems is (a) 120 and (b) 1,000 with a standard deviation of 0.1." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 318, + 495, + 541, + 662 + ], + "blocks": [ + { + "bbox": [ + 318, + 495, + 541, + 662 + ], + "lines": [ + { + "bbox": [ + 318, + 495, + 541, + 662 + ], + "spans": [ + { + "bbox": [ + 318, + 495, + 541, + 662 + ], + "type": "image", + "image_path": "31ba301c7d69a67dd38eb2e4932a89306684577806e4211b5dd4f78757c26c08.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 72, + 217, + 86 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 217, + 86 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 217, + 86 + ], + "type": "text", + "content": "Supplementary Information" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 203, + 105, + 424, + 275 + ], + "blocks": [ + { + "bbox": [ + 203, + 105, + 424, + 275 + ], + "lines": [ + { + "bbox": [ + 203, + 105, + 424, + 275 + ], + "spans": [ + { + "bbox": [ + 203, + 105, + 424, + 275 + ], + "type": "image", + "image_path": "bd938d5eeace304163712a0d6f853d54e179edfffc5c73dc6712007366e9be79.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 280, + 541, + 336 + ], + "lines": [ + { + "bbox": [ + 68, + 280, + 541, + 336 + ], + "spans": [ + { + "bbox": [ + 68, + 280, + 541, + 336 + ], + "type": "text", + "content": "Fig. S3. Time complexity of simulated annealing (SA), steepest descent (SD), and tabu search (TS). This plot is from calculation results based on the theoretical time complexity (see 2-4-2. Computational Time in the main text), so it does not have metrics. The plot agrees well with the solving time plot depicted in Fig. 2b." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 152, + 545, + 208 + ], + "blocks": [ + { + "bbox": [ + 70, + 85, + 541, + 140 + ], + "lines": [ + { + "bbox": [ + 70, + 85, + 541, + 140 + ], + "spans": [ + { + "bbox": [ + 70, + 85, + 541, + 140 + ], + "type": "text", + "content": "Table S1. Statistical properties of QUBO coefficients for real-world optimization problems. The table summarizes the average (avg) and standard deviation (std) of QUBO coefficients across different problem sizes " + }, + { + "bbox": [ + 70, + 85, + 541, + 140 + ], + "type": "inline_equation", + "content": "(n)" + }, + { + "bbox": [ + 70, + 85, + 541, + 140 + ], + "type": "text", + "content": ". The average values of the coefficients are close to zero, and the standard deviation ranges from 0.2 to 2." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 152, + 545, + 208 + ], + "lines": [ + { + "bbox": [ + 70, + 152, + 545, + 208 + ], + "spans": [ + { + "bbox": [ + 70, + 152, + 545, + 208 + ], + "type": "table", + "html": "
n5010020050010003000500010000
avg0.0025-0.00140.0003-0.00040.00010.00160.00120.0008
std0.24910.74400.80831.33191.50901.95192.03722.0706
", + "image_path": "9537d088a04967bfad78eb524d6fa7fc1eac919745a49fe9f57def7534fd8be3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 79, + 315, + 532, + 517 + ], + "blocks": [ + { + "bbox": [ + 71, + 275, + 541, + 303 + ], + "lines": [ + { + "bbox": [ + 71, + 275, + 541, + 303 + ], + "spans": [ + { + "bbox": [ + 71, + 275, + 541, + 303 + ], + "type": "text", + "content": "Table S2. Density of Max-Cut problem instances. These instances feature sparse QUBO matrices with a density lower than " + }, + { + "bbox": [ + 71, + 275, + 541, + 303 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 71, + 275, + 541, + 303 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 315, + 532, + 517 + ], + "lines": [ + { + "bbox": [ + 79, + 315, + 532, + 517 + ], + "spans": [ + { + "bbox": [ + 79, + 315, + 532, + 517 + ], + "type": "table", + "html": "
Instances# Nodes# Edges# Maximum EdgesDensity (%)
G580019,176319,6006.0000
G1080019,176319,6006.0000
G158004,661319,6001.4583
G208004,672319,6001.4618
G302,00019,9001,999,0000.9954
G402,00011,7661,999,0000.5885
G503,0006,0004,498,5000.1333
G555,00012,49812,497,5000.1000
G607,00017,14824,496,5000.0700
G7010,0009,99949,995,0000.0200
", + "image_path": "7e2d928dd61b4f39f2ede4b6fad96500fe72db9030197351e71c02b3d19d4b8d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 71, + 72, + 126, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 72, + 126, + 85 + ], + "spans": [ + { + "bbox": [ + 71, + 72, + 126, + 85 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 99, + 444, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 99, + 444, + 114 + ], + "spans": [ + { + "bbox": [ + 70, + 99, + 444, + 114 + ], + "type": "text", + "content": "S1 Ye, Y. [online] Available: https://web.stanford.edu/~yyyve/yyye/Gset/." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06225/48347d42-40fb-4979-b798-617f024e9b22_content_list.json b/data/2025/2504_06xxx/2504.06225/48347d42-40fb-4979-b798-617f024e9b22_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..dd8ffcc2598c9f5134d22459e25de9161c9cde8c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/48347d42-40fb-4979-b798-617f024e9b22_content_list.json @@ -0,0 +1,1587 @@ +[ + { + "type": "text", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "text_level": 1, + "bbox": [ + 109, + 109, + 864, + 156 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Biao Zhang* Fedor Moiseev* Joshua Ainslie* Paul Suganthan* Min Ma* Surya Bhupatiraju Fede Lebron Orhan Firat Armand Joulin Zhe Dong*", + "bbox": [ + 94, + 198, + 879, + 232 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 241, + 257, + 320, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While decoder-only large language models (LLMs) have shown impressive results, encoder-decoder models are still widely adopted in real-world applications for their inference efficiency and richer encoder representation. In this paper, we study a novel problem: adapting pretrained decoder-only LLMs to encoder-decoder, with the goal of leveraging the strengths of both approaches to achieve a more favorable quality-efficiency trade-off. We argue that adaptation not only enables inheriting the capability of decoder-only LLMs but also reduces the demand for computation compared to pretraining from scratch. We rigorously explore different pretraining objectives and parameter initialization/optimization techniques. Through extensive experiments based on Gemma 2 (2B and 9B) and a suite of newly pretrained mT5-sized models (up to 1.6B), we demonstrate the effectiveness of adaptation and the advantage of encoder-decoder LLMs. Under similar inference budget, encoder-decoder LLMs achieve comparable (often better) pretraining performance but substantially better finetuning performance than their decoder-only counterpart. For example, Gemma 2B-2B outperforms Gemma 2B by $\\sim 7\\%$ after instruction tuning. Encoder-decoder adaptation also allows for flexible combination of different-sized models, where Gemma 9B-2B significantly surpasses Gemma 2B-2B by $>3\\%$ . The adapted encoder representation also yields better results on SuperGLUE. We will release our checkpoints to facilitate future research.", + "bbox": [ + 117, + 277, + 444, + 760 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 86, + 787, + 217, + 803 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Neural network architectures are often designed to incorporate certain assumptions or inductive biases regarding the", + "bbox": [ + 84, + 813, + 475, + 844 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "* Core Contributor. Google. Correspondence to: Biao Zhang , Zhe Dong .", + "bbox": [ + 84, + 852, + 477, + 881 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "input data, leading to either improved model performance or better computational efficiency, if not both. Unlike the popular decoder-only architecture used for large language model (LLM) (Brown et al., 2020), the encoder-decoder architecture adopts separate modeling modules – an encoder for input understanding and a decoder for output generation (Vaswani et al., 2017). This separation decouples parameters for different functionalities and thus enjoys higher freedom in handling contextual representation and challenging tasks (Tay et al., 2022; Wang et al., 2022). It also offers high flexibility in changing the encoder and decoder size (e.g., a large encoder paired with a small decoder) to control the quality-efficiency trade-off (Kasai et al., 2020; Zhang et al., 2022), an increasingly important aspect for LLM deployment (Gemini et al., 2024). Despite these benefits, however, the study on encoder-decoder LLMs receive little to no attention nowadays.", + "bbox": [ + 495, + 258, + 887, + 517 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we revisit this classical architecture by exploring the following question: can we get strong(er) encoder-decoder LLMs by adapting from existing pretrained decoder-only LLMs? We consider the adaptation more significantly than pretraining new models from scratch since pretraining is resource-intensive and powerful decoder-only models at different sizes are already widely available (Dubey et al., 2024; Team et al., 2024; Liu et al., 2024a; Yang et al., 2024; Jiang et al., 2024). Our hypothesis is that, by reusing parameters from decoder-only models, we can accelerate training and effectively transfer their internal knowledge to encoder-decoder, preserving (even enhancing) their capabilities. Note adaptation also allows for pairing varying-sized decoder-only models to achieve specific quality-efficiency considerations. Yet, the optimal method for such adaptation and the extent to which performance can be improved remain open questions, which we aim to address rigorously.", + "bbox": [ + 495, + 523, + 888, + 781 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We employ Gemma 2 (Team et al., 2024) as the testbed. As shown in Figure 1, the encoder-decoder architecture follows the original Transformer (Vaswani et al., 2017) but equipped with Gemma 2 modifications. The key idea behind the adaptation is to initialize the parameters of the encoder-decoder model from pretrained decoder-only model(s) as a warmup and then pretrain or adapt all parameters with self", + "bbox": [ + 495, + 787, + 888, + 893 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06225v1 [cs.CL] 8 Apr 2025", + "bbox": [ + 22, + 272, + 60, + 700 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/117636db4067cddc308df04a48879f38f22774c43820412bd49fab3eac458ef7.jpg", + "image_caption": [ + "Figure 1: Overview of our approach. We build encoder-decoder models by adapting from pretrained decoder-only models. Model architecture and parameters are inherited from the decoder-only model except the cross-attention, for which we adopt different initialization methods depending on the encoder and decoder size. \"ROPE\": rotary embedding; \"FFN\": feed-forward layer." + ], + "image_footnote": [], + "bbox": [ + 209, + 80, + 763, + 218 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "supervised learning. Depending on whether the encoder and the decoder share the same configuration, we propose different initialization and optimization strategies for the cross-attention layer. We also compare different pretraining objectives, including prefix language modeling with knowledge distillation (Hinton et al., 2015) and UL2 (Tay et al., 2022). Apart from Gemma 2 2B and 9B, we pretrain a series of small models to better understand the adaptation at different scales.", + "bbox": [ + 84, + 292, + 475, + 429 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To thoroughly evaluate model performance, we adopt different benchmarks for pretrained and instruction-tuned models respectively, each covering a range of established academic evaluations. In addition, we use SuperGLUE (Wang et al., 2019a) to measure the quality of the learned contextual representations. Our main findings are below:", + "bbox": [ + 84, + 436, + 475, + 527 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Leveraging pretrained decoder-only LLMs is an effective way to build powerful encoder-decoder LLMs, which yields substantially improved downstream performance particularly after instruction tuning under similar inference flops.", + "- Our adaptation method is highly flexible, allowing for pairing large encoder with small decoder, such as 9B-2B, with significant quality gains over Gemma 2 2B but similar generation latency.", + "- Adaptation is not only more compute efficient but also more effective than pretraining from scratch.", + "- Pretraining objective matters. Models trained with prefix language modeling and knowledge distillation are generally better at generative tasks, while UL2 models have better encoder representations." + ], + "bbox": [ + 104, + 544, + 473, + 799 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 84, + 819, + 227, + 834 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "While the decoder-only architecture has become the de facto standard for LLMs, the debate between encoder-decoder and decoder-only modeling is still not conclusive. Many prior studies proposed different approaches to pretrain strong", + "bbox": [ + 84, + 845, + 475, + 906 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "encoder-decoder models, e.g., MASS (Song et al., 2019), T5 (Raffel et al., 2020), mT5 (Xue et al., 2021), byT5 (Xue et al., 2022), BART (Lewis et al., 2020), and OpenBA (Li et al., 2023). Tay et al. (2022) compared different pretraining objectives, highlighting the superiority of UL2 and encoder-decoder modeling. Zhang et al. (2022) systematically examined the scaling behavior of both architectures on machine translation, showing their similarity when adequate objectives are applied. Wang et al. (2022) thoroughly explored different modeling choices and training objectives with a focus on LLM zero-shot generalization. They discovered that encoder-decoder LLMs after instruction tuning achieve the best performance, echoing with our experiments. They also studied adaptation, but it is between different pretraining objectives rather than from decoder-only LLMs to encoder-decoder LLMs.", + "bbox": [ + 495, + 292, + 888, + 532 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Leveraging pretrained models for encoder-decoder modeling has been extensively explored. In the BERT era (Devlin et al., 2019), researchers developed different ways of utilizing it to enhance encoder-decoder performance on downstream tasks, such as machine translation (Zhu et al., 2020; Clinchant et al., 2019; Yang et al., 2020), grammatical error correction (Kaneko et al., 2020), summarization (Liu & Lapata, 2019), and text generation (Chen et al., 2019). Our work follows a similar spirit but is based on pretrained decoder-only LLMs and focuses on developing general-purpose encoder-decoder LLMs.", + "bbox": [ + 495, + 542, + 888, + 708 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Another related direction is the development of inference friendly LLMs. Techniques for improving inference efficiency are many, ranging from quantization (Dettmers & Zettlemoyer, 2023), key-value cache optimization (Corallo & Papotti, 2024), recurrent modeling (Gu & Dao, 2023; Botev et al., 2024), to strong small LLMs with improved pretraining (Abdin et al., 2024; Liu et al., 2024b), to name a few. While these techniques offer significant efficiency gains, their focus is fundamentally distinct and complementary to our proposed encoder-decoder adaptation, i.e., both approaches can be used in conjunction to realize greater overall efficiency.", + "bbox": [ + 495, + 715, + 888, + 897 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "bbox": [ + 210, + 56, + 759, + 70 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Approach: Encoder-Decoder Adaptation", + "text_level": 1, + "bbox": [ + 84, + 83, + 450, + 101 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Architecture", + "text_level": 1, + "bbox": [ + 86, + 109, + 207, + 125 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Pretraining LLMs is both compute and time intensive. To reduce the amount of training required, we propose to adapt existing decoder-only LLMs to encoder-decoder and leverage pretrained decoder-only checkpoints for initialization, as shown in Figure 1. Due to this, we keep the encoder-decoder architecture as similar as possible to original decoder-only model, only introducing changes when necessary. This results in the following architecture:", + "bbox": [ + 84, + 132, + 475, + 253 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Encoder has exactly the same architecture as the decoder-only model, but self-attention is switched from causal to bidirectional. We provide ablations in Section 6 that illustrate the critical effect of bidirectional attention on downstream performance.", + "2. In each Decoder block, FFN and self-attention parts are identical to the corresponding parts in decoder-only models, and cross-attention has the same number of heads and head dimension as self-attention, but attends to the whole output of the encoder." + ], + "bbox": [ + 98, + 268, + 475, + 429 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We base our study on Gemma 2 (Team et al., 2024). But note our approach is highly flexible and isn't restricted to specific decoder-only architectures. We can easily apply our method to other model families, such as LLaMA (Dubey et al., 2024), QWen (Yang et al., 2024), and DeepSeek (Liu et al., 2024a). In theory, we can also adapt decoder-only models from different families, such as pairing LLaMA models with QWen models.", + "bbox": [ + 84, + 441, + 475, + 564 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In addition, our approach allows for unbalanced encoder-decoder models, where the decoder is significantly smaller than the encoder. This provides better support for applications where input processing capabilities are more important than generative capacity. For example, for summarization, deep understanding of the input text is often more important than the generation part, as it doesn't need to generate any new information. As a result, generation time is significantly reduced, while providing competitive quality.", + "bbox": [ + 84, + 571, + 475, + 708 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Initialization", + "text_level": 1, + "bbox": [ + 86, + 723, + 210, + 738 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "When initializing an encoder-decoder model from a decoder-only checkpoint, we try to map every layer to the most similar weight in the decoder-only checkpoint. In particular, the encoder is fully initialized from the decoder-only checkpoint, as it doesn't introduce any new weights. In the decoder, FFN and self-attention subblocks are initialized from the FFN and self-attention weights from the corresponding layers in the decoder-only checkpoint.", + "bbox": [ + 84, + 747, + 475, + 868 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Cross-attention is initialized from self-attention weights in the balanced setup where encoder and decoder have the", + "bbox": [ + 84, + 875, + 473, + 905 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "same configuration. Otherwise, we first initialize crossattention from scratch and then finetune it for the first $K$ steps as a warmup while freezing other model parameters. After $K$ steps, all model parameters are tuned.", + "bbox": [ + 496, + 84, + 887, + 146 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Pretraining Objective", + "text_level": 1, + "bbox": [ + 496, + 161, + 684, + 176 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Decoder-only pretraining often adopts causal language modeling on a single sequence. In contrast, encoder-decoder adaptation requires separate input and target sequences to be fed to the encoder and decoder separately. We explore two classical pretraining objectives for encoder-decoder modeling: prefix language modeling (PrefixLM) and UL2 (Tay et al., 2022; Wang et al., 2022).", + "bbox": [ + 495, + 185, + 887, + 291 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "PrefixLM behaves similar to causal language modeling except for its prefix condition. To simplify the preprocessing, we split a sequence equally into two halves, the first half used as input and the second one as target. This also eases the adoption of knowledge distillation from decoder-only models. UL2 is more complicated. It is composed of several denoising tasks at different levels of complexity. We prepare UL2 data following Tay et al. (2022). We compare their performance in experiments.", + "bbox": [ + 495, + 297, + 888, + 435 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4. Setup", + "text_level": 1, + "bbox": [ + 496, + 454, + 571, + 470 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Setting Our data for pretraining and instruction tuning – including supervised finetuning (SFT) and reinforcement learning from human feedback (RLHF) – follow Gemma 2 (Team et al., 2024). For the adaptation, we preprocess the Gemma 2 pretraining data (8 trillion tokens) with PrefixLM and UL2. Note Gemma 2 pretraining data comes with knowledge distillation. We preserve this information for PrefixLM while adopting ground-truth targets for UL2 as mapping the teacher logits to UL2 is non-trivial. The preprocessed data has an input-output sequence length of 4096-4096 and 8192-8192 for PrefixLM and UL2, respectively. We adapt our models on up to 2 trillion tokens.", + "bbox": [ + 495, + 479, + 887, + 661 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Model Setting We use Gemma 2 (2B and 9B) as the base decoder-only LLM. We also pretrain several smaller models (Small, Base, Large, and XL) following mT5 configurations (Xue et al., 2021) under the Gemma 2 framework, and then adapt them to encoder-decoder LLMs. Detailed model configurations are given in Table 1.", + "bbox": [ + 495, + 676, + 887, + 768 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Evaluation We employ diverse academic evaluation datasets to evaluate different capabilities of LLMs. Concretely, we use the following benchmarks:", + "bbox": [ + 495, + 782, + 887, + 829 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Pretraining (PT) benchmark: Boolq (Clark et al., 2019), SIQA (Sap et al., 2019), PIQA (Bisk et al., 2020), ARC-c&ARC-e (Clark et al., 2018), MMLU (Hendrycks et al., 2021), MMLU Pro (Wang et al., 2024), Hel", + "bbox": [ + 514, + 845, + 888, + 906 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "bbox": [ + 210, + 56, + 759, + 70 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/416bbeebb01547253fc7b5fc27c306c2b693d521b32756f8c4ea25bef1cb7ed1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
#Layersdmodeldfn#heads (q/kv)head#Params
Decoder-OnlyEncoder-Decoder
2B262304184328/42562.0B4.0B (2B-2B)
9B4235842867216/82568.3B16.7B (9B-9B)
S (Small)851210248/86414.7M29.4M (S-S)
B (Base)12768204812/126456.7M113.3M (B-B)
L (Large)241024281616/1664204.6M409.1M (L-L)
XL (Xlarge)242048512032/3264780.3M1.6B (XL-XL)
", + "bbox": [ + 155, + 80, + 816, + 205 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1: Model configurations. #Layers: number of layers; $d_{model/ffn/head}$ : model/feed-forward/head dimension; #heads ( $q/kv$ ): number of query/value heads. #Params: number of model parameters excluding embeddings. For encoder-decoder models, we show the number of parameters for the balanced architecture, e.g. 2B-2B. The 9B-2B model has 10.4B parameters. “B/M”: billion/million.", + "bbox": [ + 83, + 215, + 887, + 256 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "laSwag (Zellers et al., 2019), Winogrande (Sakaguchi et al., 2021), TruthfulQA (Lin et al., 2021), AGIEval (Zhong et al., 2023), BBH (Suzgun et al., 2022), DROP (Dua et al., 2019), GPQA (Rein et al., 2023), GSM8K (Cobbe et al., 2021), HumanEval (Chen et al., 2021), Lambada (Paperno et al., 2016), MATH-500 (Hendrycks et al., 2021), MBPP (Austin et al., 2021), NQ (Kwiatkowski et al., 2019), TriviaQA (Joshi et al., 2017), and WMT23 (Kocmi et al., 2023). We perform zero/few-shot prompting for pretrained LLMs, and report the averaged result as $PT$ score.", + "bbox": [ + 116, + 281, + 475, + 462 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Instruction-tuning (IT) benchmark: GSM8K, MMLU, MMLU Pro, MBPP, HumanEval, MATH-500, BBH, GPQA (Diamond), WMT23, and MGSM (Shi et al., 2022). We perform zero/few-shot prompting with task-specific instruction for instruction-tuned models, and report the averaged result as IT score.", + "- SuperGLUE (Wang et al., 2019b): we use this benchmark to examine the learned contextual representation. We stack a task-specific head on the representation of the last token in the encoder (decoder) of the encoder-decoder (decoder-only) LLM, and finetune all parameters on the training set. Learning rate, batch size, and dropout are grid-searched for each task. We reformulate all tasks as classification tasks and report averaged dev-set accuracy over COPA, WIC, WSC, RTE, MultiRC, CB, and Boolq." + ], + "bbox": [ + 104, + 470, + 475, + 723 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For generative tasks, we always apply greedy sampling. We perform pretraining, SFT, and RLHF based on the Gemma 2 recipe except for the learning rate which we tune empirically for encoder-decoder LLMs. In unbalanced encoder-decoder adaptation, e.g. 9B-2B, we set the cross-attention warmup step $K$ to 1000.", + "bbox": [ + 84, + 739, + 473, + 829 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "5. Results", + "text_level": 1, + "bbox": [ + 84, + 849, + 171, + 864 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The encoder-decoder adaptation converges rapidly, particularly for balanced architectures. While adaptation", + "bbox": [ + 84, + 875, + 475, + 905 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e84af8ed438269fcb4b932d2777badf545e4bc6420d932ca29791018c17890d4.jpg", + "image_caption": [ + "Figure 2: Pretraining performance as a function of the number of pretrained tokens during the adaptation." + ], + "image_footnote": [], + "bbox": [ + 563, + 282, + 821, + 438 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "leverages pretrained parameters for initialization, whether and how this benefits model convergence is still questionable. Figure 2 shows the change of PT performance with respect to the amount of pretrained tokens. Obviously, adaptation is very computationally efficient, converging quickly and achieving similar performance to its decoder-only counterpart after only tens of billions of tokens. Balanced architectures (2B-2B and 9B-9B) converge much faster than the unbalanced ones (9B-2B) since all parameters in the former are initialized from pretrained decoder-only models while the cross-attention in the latter is randomly initialized.", + "bbox": [ + 495, + 505, + 887, + 671 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We also notice that additional pretraining improves balanced models a little on average but substantially benefits some tasks, like GSM8K and DROP. Besides, 9B-2B performance increases consistently during the adaptation, quickly surpassing Gemma 2 2B and moving towards Gemma 2 9B. This demonstrates the feasibility of encoder-decoder adaptation from varying-sized decoder-only LLMs, as well as its ability to utilize the knowledge from pretrained models.", + "bbox": [ + 495, + 678, + 888, + 800 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Pretraining objective matters: UL2 and PrefixLM show different characteristics. Previous study reported the superiority of UL2 over PrefixLM (Tay et al., 2022), but PrefixLM in our study is enhanced with knowledge distillation, which often improves small models significantly. We compare these two objectives for the adaptation in Table 2.", + "bbox": [ + 495, + 814, + 888, + 906 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "bbox": [ + 210, + 56, + 759, + 70 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/4bbb3f398b8b1bb36540721f23580fdcd02028eff36e3066fff913714b0c47e3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PT ScoreIT Score
Gemma 2+ PrefixLM+ UL2Gemma 2+ PrefixLM+ UL2
2B-2B47.949.750.1(39.0)46.4 (46.1)42.4
9B-2B-55.052.9-49.3 (50.6)45.7
9B-9B61.763.163.9(59.6)62.9 (64.5)61.5
S-S23.422.823.16.29.810.7
B-B26.726.926.09.812.911.1
L-L32.331.630.912.917.518.9
XL-XL39.739.538.523.530.729.2
", + "bbox": [ + 228, + 80, + 743, + 224 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/e342687ba52ca17516eaef964d4960ad15f8aa80f31daad7aea4c345f96fb85e.jpg", + "table_caption": [ + "(a) Results on PT and IT benchmarks." + ], + "table_footnote": [], + "table_body": "
PT ModelsIT Models
Gemma 2+ PrefixLM+ UL2Gemma 2+ PrefixLM+ UL2
2B-2B75.588.188.1(86.2)88.3 (87.9)90.5
9B-2B-90.290.7-90.6 (90.3)91.3
9B-9B82.591.491.8(89.8)91.8 (91.4)91.6
S-S67.669.869.667.668.869.4
B-B68.671.271.568.772.373.6
L-L68.478.779.768.878.180.3
XL-XL70.784.485.469.285.787.0
", + "bbox": [ + 230, + 255, + 745, + 398 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(b) Finetuned performance on SuperGLUE.", + "bbox": [ + 352, + 402, + 617, + 416 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2: Main results on PT, IT, and SuperGLUE benchmarks. \"Gemma 2\": decoder-only models; \"+\"PrefixLM/UL2\": encoder-decoder models adapted via prefix language modeling (with knowledge distillation)/UL2. We put Gemma 2 results into the corresponding encoder-decoder rows to save space, e.g. 2B-2B for Gemma 2 means Gemma 2 2B. Numbers in parentheses are for RLHFed models. Best results are in bold. Note PT and IT scores are not directly comparable since they are averaged over different tasks.", + "bbox": [ + 83, + 428, + 888, + 481 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We find that PrefixLM and UL2 have their own strengths. Specifically, UL2 delivers stronger contextual representations, outweighing PrefixLM on SuperGLUE across most model scales, resonating with previous findings (Tay et al., 2022). In contrast, PrefixLM produces more powerful generative LLMs thanks to its generation nature and the knowledge distillation. It surpasses UL2 on PT and IT benchmarks in most cases. Particularly, it outperforms UL2 at 9B-2B on both PT and IT by up to 3.6, a significant margin. Since generative LLMs have become the mainstream, we base our following analysis on PrefixLM. We discuss our attempts to combine PrefixLM and UL2 in the next section.", + "bbox": [ + 83, + 506, + 475, + 686 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Encoder-decoder LLMs outperform decoder-only LLMs especially after instruction tuning. Table 2 also shows that the adapted encoder-decoder LLMs achieve comparable or slightly better pretraining performance than their decoder-only counterpart but with substantially improved instruction-tuning performance, echoing with the findings of Wang et al. (2022). For example, the 9B-9B encoder-decoder LLM surpasses Gemma 2 9B by 1.4 and 4.9 on PT and IT, respectively. The performance gap further increases to 1.8 and 7.1 at 2B-2B scale. We notice that the adaption performs slightly worse at scales below 2B on PT, but the improvements on IT are still promising, e.g. 7.2 at XL-XL.", + "bbox": [ + 83, + 724, + 475, + 905 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Regardless of PT or IT models, pretraining objectives, and model scales, encoder-decoder LLMs perform consistently better than decoder-only LLMs on SuperGLUE. This suggests that the contextual representation from encoder-decoder LLMs is often of higher quality, likely due to bidirectional self-attention.", + "bbox": [ + 493, + 506, + 888, + 597 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We need to highlight that the above analysis is based on the overall performance, which may not apply when it comes to a specific downstream task. As shown in Table 3, there are some tasks favoring encoder-decoder models while others favoring decoder-only models especially for PT models. For example, after pretraining, Gemma 2 9B surpasses 9B-9B by 4.1 on ARC-C but underperforms it by 4.4 on Winogrande; while encoder-decoder LLM shows more consistent advantage after instruction tuning, 9B-9B still lags behind Gemma 2 9B by 0.9 on WMT23. This illustrates the complexity when evaluating LLM capability as well as the risk of reaching misleading conclusions when adopting biased evaluation tasks. We reduce such risk by selecting as diverse and broad tasks as possible for evaluation.", + "bbox": [ + 493, + 604, + 888, + 816 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Encoder-decoder LLMs balance quality and inference efficiency more effectively. We next analyze different models from the perspective of inference efficiency which becomes increasingly crucial for model deployment. Figure", + "bbox": [ + 493, + 845, + 885, + 906 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "bbox": [ + 210, + 56, + 759, + 70 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/3f839176a76f6ca49b21f2fe1d9927f962024d2444b345aaeefe51e43261e557.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TaskMetricGemma 2Encoder-Decoder Adaptation
2B9B2B-2B9B-2B9B-9B
MMLU5-shot51.971.146.860.371.3
ARC-C25-shot55.569.152.059.965.0
GSM8K5-shot23.763.241.748.772.8
AGIEval3-5-shot31.553.335.043.653.1
DROP3-shot, F153.371.561.466.975.7
BBH3-shot, CoT40.268.951.951.674.7
Winogrande5-shot65.274.369.568.178.7
HellaSwag10-shot72.981.874.975.781.0
MATH-5004-shot17.233.424.223.637.8
ARC-e0-shot81.088.377.182.985.3
PIQA0-shot78.481.679.078.381.1
SIQA0-shot51.753.650.150.150.5
Boolq0-shot75.577.575.684.685.6
TriviaQA5-shot60.176.651.266.275.2
NQ5-shot30.743.928.437.143.1
HumanEvalpass@119.539.027.433.540.2
MBPP3-shot30.452.037.443.455.6
Average49.364.752.057.366.3
", + "bbox": [ + 251, + 80, + 720, + 368 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/c5f30f5d5027dab8614556e9555e0a41a7cbd800c71bc65594505349fcf4bb25.jpg", + "table_caption": [ + "(a) Results for pretrained models." + ], + "table_footnote": [], + "table_body": "
TaskMetricGemma 2Encoder-Decoder Adaptation
2B9B2B-2B9B-2B9B-9B
GSM8K11-shot58.084.370.773.888.6
MMLU5-shot49.871.861.566.776.7
MMLU Pro5-shot27.449.936.643.055.7
MBPP3-shot37.859.244.049.864.8
HumanEvalpass@143.365.947.655.572.0
MATH-5000-shot24.445.828.230.047.2
BBH3-shot44.872.057.557.676.4
GPQA0-shot24.829.927.532.635.7
GPQA Diamond0-shot27.829.826.829.340.4
WMT235-shot, BLEURT65.272.059.965.371.1
MGSM8-shot26.374.946.853.580.7
Average39.059.646.150.664.5
", + "bbox": [ + 225, + 400, + 746, + 604 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(b) Results for RLHFed models.", + "bbox": [ + 385, + 609, + 580, + 623 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3: Detailed results on different tasks for PT and RLHFed models. We compare Gemma 2 and encoder-decoder models adapted via PrefixLM. Best results are in bold.", + "bbox": [ + 84, + 647, + 883, + 674 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 shows that balanced encoder-decoder LLMs have similar inference flops to their decoder-only counterparts, e.g. 2B-2B vs. Gemma 2 2B. As such, encoder-decoder models often dominate the quality-inference efficiency frontier across PT, IT, and SuperGLUE benchmarks.", + "bbox": [ + 84, + 700, + 473, + 777 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We acknowledge that inference flops may not correlate well with actual running speed due to factors like inter-device communication, key-value caching, and autoregressive bottleneck. We then provide the latency results measured on GSM8K for 2B and 9B models in Figure 4, which further verified the above analysis. 9B-9B and 2B-2B show similar latency to Gemma 2 9B and 2B, respectively, but clearly better performance. In particular, 9B-2B, the one pairing large", + "bbox": [ + 84, + 784, + 475, + 905 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "encoder and small decoder, shows similar latency to Gemma 2 2B but significantly better performance than 2B-2B.", + "bbox": [ + 496, + 700, + 883, + 732 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Together, these confirm that encoder-decoder adaptation indeed provides a more flexible way for balancing between quality and inference speed.", + "bbox": [ + 496, + 739, + 883, + 784 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6. Discussion", + "text_level": 1, + "bbox": [ + 496, + 804, + 609, + 819 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Is the improvement after the adaptation simply due to the extra pretraining compute? Not really. We also tried to apply more pretraining compute to Gemma 2 2B by going through another 6 trillion tokens, which leads to a PT score of 48.57, still significantly below the encoder-", + "bbox": [ + 495, + 830, + 885, + 905 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "bbox": [ + 210, + 56, + 759, + 70 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5bc3292c9075390db3ad037b56681680df820b5ed6361ee93024ceb18506f6da.jpg", + "image_caption": [ + "Figure 3: Comparisons of decoder-only LLMs with adapted encoder-decoder models under inference flops. We show PT, IT, and SuperGLUE performance. Inference flops is estimated with a sequence length of 4096-4096 and 8192 for encoder-decoder and decoder-only LLMs, respectively. Note the upper left corner marks the quality-efficiency frontier." + ], + "image_footnote": [], + "bbox": [ + 114, + 84, + 359, + 233 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/51bbcc46d8fda806b753bf35c24c7ec6a4f852660553929aa38199e5069a40d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 364, + 82, + 609, + 233 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d2f2cfdf0577b291f7b73a7bf7b879e70cfaae71e626c66933421a10b6d03157.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 80, + 859, + 233 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b62435503ae00d84a7a4eecfced3a619d2992128db4f0dea34e324725f8e2c26.jpg", + "image_caption": [ + "Figure 4: GSM8K performance as a function of latency for RL-HFed models. Latency is estimated as milliseconds (ms) per query by answering 200 reasoning questions from GSM8K. Batch size of 1 is used." + ], + "image_footnote": [], + "bbox": [ + 150, + 313, + 408, + 470 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "decoder adaptation, 49.7. This indicates that the additional pretraining compute can't fully explain the improvements from the adaptation and we argue that the inductive bias of encoder-decoder modeling plays a crucial role.", + "bbox": [ + 83, + 564, + 473, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Does cross-attention warmup matter for unbalanced encoder-decoder? Yes. Our preliminary experiments with 9B-2B and UL2 on 800B tokens show that the pretraining performance over Boolq and GSM8K reduces from 62.5 to 61.8 without the warmup. Besides, increasing warmup steps from 1K to 5K further reduces performance to 60.2. An adequate amount of warmup optimization is required to reach the optimal performance.", + "bbox": [ + 83, + 643, + 475, + 765 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Can we switch from grouped-query attention to multi-head self attention for the encoder? Yes but with mixed results. Gemma 2 adopts grouped-query attention (GQA) to improve its decoding efficiency. However, unlike the decoder, the encoder can be fully parallelized during inference, making the use of multi-head attention (MHA) reasonable. We tried to expand GQA in Gemma 2 2B to MHA by replicating head parameters for the encoder self-attention. Under", + "bbox": [ + 83, + 785, + 475, + 905 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/35058235b3a8e97de81c56604e7aa53eee71b1af57cda67fbad7585e55cb8bd2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
AdaptationScratch
PTITSGPTITSG
S-S22.89.868.824.09.970.5
B-B26.912.972.328.111.875.5
L-L31.617.578.130.917.178.5
XL-XL39.530.785.737.728.879.5
2B-2B49.746.488.347.143.984.5
", + "bbox": [ + 522, + 308, + 861, + 420 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4: Results for encoder-decoder models adapted with PrefixLM (Adaptation) and pretrained from scratch (Scratch). SG: SuperGLUE score for SFTed models.", + "bbox": [ + 496, + 429, + 888, + 469 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "PrefixLM, this improves PT performance to 50.2 by 0.5 at 2B-2B but reduces IT performance to 43.5 by 2.9. We thus still stick to GQA when adapting Gemma 2 2B and 9B for the encoder.", + "bbox": [ + 495, + 494, + 885, + 555 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Does bidirectional self-attention matter for the encoder? Yes. A crucial difference between encoder-decoder and decoder-only LLMs is the use of bidirectional self-attention. We also tested keeping the encoder self-attention causal at 2B-2B, which achieves a PT and IT score of 45.6 and 41.7, lagging behind its bidirectional counterpart substantially by 4.1 and 4.7, respectively. Note, the causal 2B-2B model surpasses Gemma 2 2B on IT by 2.7, although it performs worse on PT. This suggests that bidirectional self-attention contributes greatly to the success of our adaptation, but is not the only factor.", + "bbox": [ + 495, + 571, + 885, + 739 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Would pretraining encoder-decoder LLMs from scratch yield better performance? Not really. Pretraining from scratch is a common method for developing new LLMs. We also pretrained encoder-decoder LLMs from scratch on 8 trillion tokens with PrefixLM. Table 4 summarizes the results. Despite using more pretraining tokens, encoder-decoder LLMs pretrained from scratch only perform better at small scales, such as S-S and B-B, beyond which adaptation shows clear superiority. As such, adaptation is a more computationally efficient way of developing powerful", + "bbox": [ + 495, + 753, + 885, + 905 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "bbox": [ + 210, + 56, + 759, + 70 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/dbcb0e6cce13b037f4c0883ed286cbdc0d95b29ded518c718b721cfc2f428ce9.jpg", + "image_caption": [ + "Figure 5: Quality change for the two-stage optimization. \"UL2-then-PrefixLM\": switch the training objective from UL2 to PrefixLM for the final $10\\%$ tokens; \"PrefixLM-then-UL2\": similar but from PrefixLM to UL2." + ], + "image_footnote": [], + "bbox": [ + 114, + 84, + 359, + 231 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5ec6336377cc82d82a620bc9b2385a17178a9693bcd557a46c65604ff7d76ed2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 364, + 85, + 607, + 229 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bb18af8918cb67a1778002f0f629d0c1e7c9645672e4e9d7249a3689aab6eb52.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 85, + 856, + 229 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/094aec7e87b8d447722242304ed5483546e0e65034b026f8a17558ab2195a3f1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 150, + 299, + 410, + 455 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4de0f4c3868f0d61d6237f19aeceac00a345c0d159226ff2806bd0f3aed4d81f.jpg", + "image_caption": [ + "Figure 6: Correlation analysis between PT performance and its corresponding IT/SuperGLUE performance." + ], + "image_footnote": [], + "bbox": [ + 151, + 462, + 410, + 618 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "encoder-decoder LLMs.", + "bbox": [ + 84, + 686, + 246, + 699 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Is IT/SuperGLUE score predicable from PT score? Mixed. A general assumption in LLM development is that PT performance can be used as an indicator for downstream applications. We summarize all our ablations and put them in Figure 6. Over all data points and across all model sizes, the correlation is pretty strong: a Spearman's $\\rho$ of 0.97 and 0.89 for IT vs. PT and SuperGLUE vs. PT, respectively. However, when considering data points within each model size separately, the averaged Spearman's $\\rho$ reduces to 0.42 and 0.05, respectively and is not significant anymore.", + "bbox": [ + 84, + 715, + 475, + 867 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In practice, we also noticed that PT checkpoints with weaker performance sometimes yield significantly better IT or Su", + "bbox": [ + 84, + 875, + 475, + 905 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "perGLUE performance. When selecting PT checkpoints for a specific model size, it's better to also examine their IT performance apart from PT results to avoid some biases or overfitting.", + "bbox": [ + 496, + 299, + 885, + 359 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Can we get the best of both worlds from PrefixLM and UL2? This is non-trivial. Our first attempt is to merge checkpoints trained from PrefixLM and UL2 with uniform weighting. Unfortunately, the merged model results in either similar or much worse performance. We argue that PrefixLM and UL2 lead to different training dynamics and converge to very different local minima. Directly merging their weights doesn't work right out of the box.", + "bbox": [ + 496, + 383, + 885, + 505 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We next explore a two-stage optimization, where we first adapt with PrefixLM and then shift to UL2 for the last $10\\%$ of training, and vice versa. Figure 5 shows very mixed results. Switching from PrefixLM to UL2 generally hurts performance. In contrast, switching from UL2 to PrefixLM improves IT performance, but suffers from reduction in PT and SuperGLUE performance.", + "bbox": [ + 495, + 513, + 885, + 619 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Another direction is to jointly optimize the model on PrefixLM and UL2, which we leave for future work.", + "bbox": [ + 496, + 626, + 887, + 656 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Conclusion and Future Work", + "text_level": 1, + "bbox": [ + 496, + 675, + 766, + 691 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we presented methods for building powerful, general purpose encoder-decoder LLMs by adapting from pretrained decoder-only LLMs. Such adaptation offers high flexibility in leveraging different types/families of pretrained decoder-only models as well as combining different-sized models. Through extensive experiments based on Gemma 2, we demonstrated the feasibility and effectiveness of the adaptation: the adapted encoder-decoder LLMs outperform their decoder-only counterparts substantially after instruction tuning, dominating the quality-inference efficiency frontier. Besides, encoder-decoder LLMs also provide better contextual representations as evaluated on SuperGLUE.", + "bbox": [ + 495, + 700, + 885, + 882 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We hope our findings inspire more researchers from", + "bbox": [ + 496, + 890, + 883, + 905 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "bbox": [ + 210, + 56, + 759, + 70 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "academia and industry to revisit the encoder-decoder paradigm for LLM development. To facilitate the research, we will release the code and checkpoints at XXX (coming soon).", + "bbox": [ + 84, + 85, + 478, + 145 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Our work still suffers from several limitations. Particularly, we only experimented with Gemma 2 models up to 9B, although the proposed approach could apply to other LLM families. In the future, we are interested in scaling the model size (e.g., to 27B), exploring other LLMs (such as LLaMA), examining more unbalanced setups, and testing the combination of dense and MoE LLMs. As mentioned above, we will also investigate better ways to leverage PrefixLM, knowledge distillation, and UL2. Extending our adapted encoder-decoder LLM to cross/multi-modality modeling (e.g., vision-language and speech-language) would be another intriguing direction.", + "bbox": [ + 84, + 152, + 478, + 335 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 86, + 353, + 253, + 371 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We'd like to thank Enrique Alfonseca, Tris Warkentin, Xiaodan Song, Sugato Basu, Inderjit Dhillon, Alexander Grushetsky, Pandu Nayak, Ramakrishnan Srikant, and Slav Petrov for their constructive feedback on the manuscript. We are grateful to Srinivasan Venkatachary for supporting this project.", + "bbox": [ + 84, + 378, + 475, + 470 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 86, + 489, + 183, + 505 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Abdin, M., Aneja, J., Awadalla, H., Awadallah, A., Awan, A. A., Bach, N., Bahree, A., Bakhtiari, A., Bao, J., Behl, H., et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024.", + "Austin, J., Odena, A., Nye, M., Bosma, M., Michalewski, H., Dohan, D., Jiang, E., Cai, C., Terry, M., Le, Q., et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021.", + "Bisk, Y., Zellers, R., Gao, J., Choi, Y., et al. Piqa: Reasoning about physical commonsense in natural language. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pp. 7432-7439, 2020.", + "Botev, A., De, S., Smith, S. L., Fernando, A., Muraru, G.-C., Haroun, R., Berrada, L., Pascanu, R., Sessa, P. G., Dadashi, R., et al. Recurrentgemma: Moving past transformers for efficient open language models. arXiv preprint arXiv:2404.07839, 2024.", + "Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems, 33: 1877-1901, 2020." + ], + "bbox": [ + 86, + 513, + 475, + 904 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Chen, M., Tworek, J., Jun, H., Yuan, Q., Pinto, H. P. D. O., Kaplan, J., Edwards, H., Burda, Y., Joseph, N., Brockman, G., et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021.", + "Chen, Y.-C., Gan, Z., Cheng, Y., Liu, J., and Liu, J. Distilling knowledge learned in bert for text generation. arXiv preprint arXiv:1911.03829, 2019.", + "Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. Boolq: Exploring the surprising difficulty of natural yes/no questions. In NAACL, 2019.", + "Clark, P., Cowhey, I., Etzioni, O., Khot, T., Sabharwal, A., Schoenick, C., and Tafjord, O. Think you have solved question answering? try arc, the ai2 reasoning challenge. arXiv:1803.05457v1, 2018.", + "Clinchant, S., Jung, K. W., and Nikoulina, V. On the use of BERT for neural machine translation. In Birch, A., Finch, A., Hayashi, H., Konstas, I., Luong, T., Neubig, G., Oda, Y., and Sudoh, K. (eds.), Proceedings of the 3rd Workshop on Neural Generation and Translation, pp. 108-117, Hong Kong, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-5611. URL https://aclanthology.org/D19-5611/.", + "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "Corallo, G. and Papotti, P. FINCH: Prompt-guided key-value cache compression for large language models. Transactions of the Association for Computational Linguistics, 12:1517-1532, 2024. doi: 10.1162/tacl_a_00716. URL https://aclanthology.org/2024.tacl-1.83/.", + "Dettmers, T. and Zettlemoyer, L. The case for 4-bit precision: k-bit inference scaling laws. In International Conference on Machine Learning, pp. 7750-7774. PMLR, 2023.", + "Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. BERT: Pre-training of deep bidirectional transformers for language understanding. In Burstein, J., Doran, C., and Solorio, T. (eds.), Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1423. URL https://aclanthology.org/N19-1423." + ], + "bbox": [ + 498, + 84, + 887, + 905 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "bbox": [ + 210, + 56, + 759, + 71 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Dua, D., Wang, Y., Dasigi, P., Stanovsky, G., Singh, S., and Gardner, M. Drop: A reading comprehension benchmark requiring discrete reasoning over paragraphs. arXiv preprint arXiv:1903.00161, 2019.", + "Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.", + "Gemini, T., Reid, M., Savinov, N., Teplyashin, D., Lepikhin, D., Lillicrap, T., Alayrac, J.-b., Soricut, R., Lazaridou, A., First, O., Schrittwieser, J., et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024.", + "Gu, A. and Dao, T. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752, 2023.", + "Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. Measuring massive multi-task language understanding. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=d7KBjmI3GmQ.", + "Hinton, G., Vinyals, O., and Dean, J. Distilling the knowledge in a neural network, 2015.", + "Jiang, A. Q., Sablayrolles, A., Roux, A., Mensch, A., Savary, B., Bamford, C., Chaplot, D. S., Casas, D. d. l., Hanna, E. B., Bressand, F., et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024.", + "Joshi, M., Choi, E., Weld, D., and Zettlemoyer, L. triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension. arXiv e-prints, art. arXiv:1705.03551, 2017.", + "Kaneko, M., Mita, M., Kiyono, S., Suzuki, J., and Inui, K. Encoder-decoder models can benefit from pre-trained masked language models in grammatical error correction. arXiv preprint arXiv:2005.00987, 2020.", + "Kasai, J., Pappas, N., Peng, H., Cross, J., and Smith, N. A. Deep encoder, shallow decoder: Reevaluating non-autoregressive machine translation. arXiv preprint arXiv:2006.10369, 2020.", + "Kocmi, T., Avramidis, E., Bawden, R., Bojar, O., Dvorkovich, A., Federmann, C., Fishel, M., Freitag, M., Gowda, T., Grundkiewicz, R., Haddow, B., Koehn, P., Marie, B., Monz, C., Morishita, M., Murray, K., Nagata, M., Nakazawa, T., Popel, M., Popovic, M., and Shmatova, M. Findings of the 2023 conference on machine translation (WMT23): LLMs are here but not quite there yet. In Koehn, P., Haddow, B., Kocmi, T., and Monz, C. (eds.), Proceedings of the Eighth Conference on Machine" + ], + "bbox": [ + 86, + 84, + 475, + 906 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Translation, pp. 1-42, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.wmt-1.1. URL https://aclanthology.org/2023.wmt-1.1/.", + "Kwiatkowski, T., Palomaki, J., Redfield, O., Collins, M., Parikh, A., Alberti, C., Epstein, D., Polosukhin, I., Devlin, J., Lee, K., Toutanova, K., Jones, L., Kelley, M., Chang, M.-W., Dai, A. M., Uszkoreit, J., Le, Q., and Petrov, S. Natural questions: A benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:452-466, 2019. doi: 10.1162/tacl_a_00276. URL https://aclanthology.org/Q19-1026/.", + "Lewis, M., Liu, Y., Goyal, N., Ghazvininejad, M., Mohamed, A., Levy, O., Stoyanov, V., and Zettlemoyer, L. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7871-7880, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.703. URL https://aclanthology.org/2020.acl-main.703.", + "Li, J., Tang, Z., Ding, Y., Wang, P., Guo, P., You, W., Qiao, D., Chen, W., Fu, G., Zhu, Q., et al. Openba: An open-sourced 15b bilingual asymmetric seq2seq model pretrained from scratch. arXiv preprint arXiv:2309.10706, 2023.", + "Lin, S., Hilton, J., and Evans, O. Truthfulqa: Measuring how models mimic human falsehoods. arXiv preprint arXiv:2109.07958, 2021.", + "Liu, A., Feng, B., Xue, B., Wang, B., Wu, B., Lu, C., Zhao, C., Deng, C., Zhang, C., Ruan, C., et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a.", + "Liu, Y. and Lapata, M. Text summarization with pretrained encoders. arXiv preprint arXiv:1908.08345, 2019.", + "Liu, Z., Zhao, C., Iandola, F., Lai, C., Tian, Y., Fedorov, I., Xiong, Y., Chang, E., Shi, Y., Krishnamoorthi, R., et al. Mobilellm: Optimizing sub-billion parameter language models for on-device use cases. arXiv preprint arXiv:2402.14905, 2024b.", + "Paperno, D., Kruszewski, G., Lazaridou, A., Pham, Q. N., Bernardi, R., Pezzelle, S., Baroni, M., Boleda, G., and Fernandez, R. The lambada dataset: Word prediction requiring a broad discourse context. arXiv preprint arXiv:1606.06031, 2016." + ], + "bbox": [ + 498, + 84, + 887, + 906 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "bbox": [ + 210, + 56, + 759, + 71 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., Zhou, Y., Li, W., and Liu, P. J. Exploring the limits of transfer learning with a unified text-to-text transformer. 21(1), jan 2020. ISSN 1532-4435.", + "Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J., and Bowman, S. R. Gpqa: A graduate-level google-proof q&a benchmark. arXiv preprint arXiv:2311.12022, 2023.", + "Sakaguchi, K., Bras, R. L., Bhagavatula, C., and Choi, Y. Winogrande: An adversarial winograd schema challenge at scale. Communications of the ACM, 64(9):99-106, 2021.", + "Sap, M., Rashkin, H., Chen, D., LeBras, R., and Choi, Y. Socialiaq: Commonsense reasoning about social interactions. arXiv preprint arXiv:1904.09728, 2019.", + "Shi, F., Suzgun, M., Freitag, M., Wang, X., Srivats, S., Vosoughi, S., Chung, H. W., Tay, Y., Ruder, S., Zhou, D., et al. Language models are multilingual chain-of-thought reasoners. arXiv preprint arXiv:2210.03057, 2022.", + "Song, K., Tan, X., Qin, T., Lu, J., and Liu, T.-Y. Mass: Masked sequence to sequence pre-training for language generation, 2019.", + "Suzgun, M., Scales, N., Schärli, N., Gehrmann, S., Tay, Y., Chung, H. W., Chowdhery, A., Le, Q. V., Chi, E. H., Zhou, D., et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022.", + "Tay, Y., Dehghani, M., Tran, V. Q., Garcia, X., Wei, J., Wang, X., Chung, H. W., Bahri, D., Schuster, T., Zheng, S., et al. Ul2: Unifying language learning paradigms. In The Eleventh International Conference on Learning Representations, 2022.", + "Team, G., Riviere, M., Pathak, S., Sessa, P. G., Hardin, C., Bhupatiraju, S., Hussenot, L., Mesnard, T., Shahriari, B., Ramé, A., et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118, 2024.", + "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L. u., and Polosukhin, I. Attention is all you need. In Guyon, I., Luxburg, U. V., Bengio, S., Wallach, H., Fergus, R., Vishwanathan, S., and Garnett, R. (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. URL https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf." + ], + "bbox": [ + 86, + 84, + 483, + 905 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wang, A., Pruksachatkun, Y., Nangia, N., Singh, A., Michael, J., Hill, F., Levy, O., and Bowman, S. Superglue: A stickier benchmark for general-purpose language understanding systems. Advances in neural information processing systems, 32, 2019a.", + "Wang, A., Pruksachatkun, Y., Nangia, N., Singh, A., Michael, J., Hill, F., Levy, O., and Bowman, S. R. SuperGLUE: a stickier benchmark for general-purpose language understanding systems. Curran Associates Inc., Red Hook, NY, USA, 2019b.", + "Wang, T., Roberts, A., Hesslow, D., Scao, T. L., Chung, H. W., Beltagy, I., Launay, J., and Raffel, C. What language model architecture and pretraining objective works best for zero-shot generalization? In Chaudhuri, K., Jegelka, S., Song, L., Szepesvari, C., Niu, G., and Sabato, S. (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 22964-22984. PMLR, 17-23 Jul 2022. URL https://proceedings.mlr.press/v162/wang22u.html.", + "Wang, Y., Ma, X., Zhang, G., Ni, Y., Chandra, A., Guo, S., Ren, W., Arulraj, A., He, X., Jiang, Z., et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. arXiv preprint arXiv:2406.01574, 2024.", + "Xue, L., Constant, N., Roberts, A., Kale, M., Al-Rfou, R., Siddhant, A., Barua, A., and Raffel, C. mT5: A massively multilingual pre-trained text-to-text transformer. In Toutanova, K., Rumshisky, A., Zettlemoyer, L., Hakkani-Tur, D., Beltagy, I., Bethard, S., Cotterell, R., Chakraborty, T., and Zhou, Y. (eds.), Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 483-498, Online, June 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.naacl-main.41. URL https://aclanthology.org/2021.naacl-main.41.", + "Xue, L., Barua, A., Constant, N., Al-Rfou, R., Narang, S., Kale, M., Roberts, A., and Raffel, C. ByT5: Towards a token-free future with pre-trained byte-to-byte models. Transactions of the Association for Computational Linguistics, 10:291-306, 2022. doi: 10.1162/tacl_a_00461. URL https://aclanthology.org/2022.tacl-1.17.", + "Yang, A., Yang, B., Zhang, B., Hui, B., Zheng, B., Yu, B., Li, C., Liu, D., Huang, F., Wei, H., et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024.", + "Yang, J., Wang, M., Zhou, H., Zhao, C., Zhang, W., Yu, Y., and Li, L. Towards making the most of bert" + ], + "bbox": [ + 500, + 84, + 887, + 905 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "bbox": [ + 210, + 56, + 759, + 71 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 477, + 922, + 493, + 934 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "in neural machine translation. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pp. 9378-9385, 2020.", + "Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. Hellaswag: Can a machine really finish your sentence? In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, 2019.", + "Zhang, B., Ghorbani, B., Bapna, A., Cheng, Y., Garcia, X., Shen, J., and First, O. Examining scaling and transfer of language model architectures for machine translation. In Chaudhuri, K., Jegelka, S., Song, L., Szepesvari, C., Niu, G., and Sabato, S. (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 26176-26192. PMLR, 17-23 Jul 2022. URL https://proceedings.mlrpress/v162/zhang22h.html.", + "Zhong, W., Cui, R., Guo, Y., Liang, Y., Lu, S., Wang, Y., Saied, A., Chen, W., and Duan, N. Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364, 2023.", + "Zhu, J., Xia, Y., Wu, L., He, D., Qin, T., Zhou, W., Li, H., and Liu, T.-Y. Incorporating bert into neural machine translation. arXiv preprint arXiv:2002.06823, 2020." + ], + "bbox": [ + 86, + 84, + 477, + 488 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation", + "bbox": [ + 210, + 56, + 759, + 71 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06225/48347d42-40fb-4979-b798-617f024e9b22_model.json b/data/2025/2504_06xxx/2504.06225/48347d42-40fb-4979-b798-617f024e9b22_model.json new file mode 100644 index 0000000000000000000000000000000000000000..78290055f55e7b070d70a95c6007d11e2a427390 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/48347d42-40fb-4979-b798-617f024e9b22_model.json @@ -0,0 +1,2259 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.11, + 0.11, + 0.865, + 0.157 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "text", + "bbox": [ + 0.095, + 0.199, + 0.88, + 0.233 + ], + "angle": 0, + "content": "Biao Zhang* Fedor Moiseev* Joshua Ainslie* Paul Suganthan* Min Ma* Surya Bhupatiraju Fede Lebron Orhan Firat Armand Joulin Zhe Dong*" + }, + { + "type": "title", + "bbox": [ + 0.242, + 0.258, + 0.321, + 0.274 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.278, + 0.445, + 0.761 + ], + "angle": 0, + "content": "While decoder-only large language models (LLMs) have shown impressive results, encoder-decoder models are still widely adopted in real-world applications for their inference efficiency and richer encoder representation. In this paper, we study a novel problem: adapting pretrained decoder-only LLMs to encoder-decoder, with the goal of leveraging the strengths of both approaches to achieve a more favorable quality-efficiency trade-off. We argue that adaptation not only enables inheriting the capability of decoder-only LLMs but also reduces the demand for computation compared to pretraining from scratch. We rigorously explore different pretraining objectives and parameter initialization/optimization techniques. Through extensive experiments based on Gemma 2 (2B and 9B) and a suite of newly pretrained mT5-sized models (up to 1.6B), we demonstrate the effectiveness of adaptation and the advantage of encoder-decoder LLMs. Under similar inference budget, encoder-decoder LLMs achieve comparable (often better) pretraining performance but substantially better finetuning performance than their decoder-only counterpart. For example, Gemma 2B-2B outperforms Gemma 2B by \\(\\sim 7\\%\\) after instruction tuning. Encoder-decoder adaptation also allows for flexible combination of different-sized models, where Gemma 9B-2B significantly surpasses Gemma 2B-2B by \\(>3\\%\\). The adapted encoder representation also yields better results on SuperGLUE. We will release our checkpoints to facilitate future research." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.789, + 0.218, + 0.804 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.814, + 0.477, + 0.845 + ], + "angle": 0, + "content": "Neural network architectures are often designed to incorporate certain assumptions or inductive biases regarding the" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.853, + 0.478, + 0.882 + ], + "angle": 0, + "content": "* Core Contributor. Google. Correspondence to: Biao Zhang , Zhe Dong ." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.259, + 0.888, + 0.518 + ], + "angle": 0, + "content": "input data, leading to either improved model performance or better computational efficiency, if not both. Unlike the popular decoder-only architecture used for large language model (LLM) (Brown et al., 2020), the encoder-decoder architecture adopts separate modeling modules – an encoder for input understanding and a decoder for output generation (Vaswani et al., 2017). This separation decouples parameters for different functionalities and thus enjoys higher freedom in handling contextual representation and challenging tasks (Tay et al., 2022; Wang et al., 2022). It also offers high flexibility in changing the encoder and decoder size (e.g., a large encoder paired with a small decoder) to control the quality-efficiency trade-off (Kasai et al., 2020; Zhang et al., 2022), an increasingly important aspect for LLM deployment (Gemini et al., 2024). Despite these benefits, however, the study on encoder-decoder LLMs receive little to no attention nowadays." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.524, + 0.889, + 0.782 + ], + "angle": 0, + "content": "In this paper, we revisit this classical architecture by exploring the following question: can we get strong(er) encoder-decoder LLMs by adapting from existing pretrained decoder-only LLMs? We consider the adaptation more significantly than pretraining new models from scratch since pretraining is resource-intensive and powerful decoder-only models at different sizes are already widely available (Dubey et al., 2024; Team et al., 2024; Liu et al., 2024a; Yang et al., 2024; Jiang et al., 2024). Our hypothesis is that, by reusing parameters from decoder-only models, we can accelerate training and effectively transfer their internal knowledge to encoder-decoder, preserving (even enhancing) their capabilities. Note adaptation also allows for pairing varying-sized decoder-only models to achieve specific quality-efficiency considerations. Yet, the optimal method for such adaptation and the extent to which performance can be improved remain open questions, which we aim to address rigorously." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.788, + 0.889, + 0.895 + ], + "angle": 0, + "content": "We employ Gemma 2 (Team et al., 2024) as the testbed. As shown in Figure 1, the encoder-decoder architecture follows the original Transformer (Vaswani et al., 2017) but equipped with Gemma 2 modifications. The key idea behind the adaptation is to initialize the parameters of the encoder-decoder model from pretrained decoder-only model(s) as a warmup and then pretrain or adapt all parameters with self" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.273, + 0.061, + 0.701 + ], + "angle": 270, + "content": "arXiv:2504.06225v1 [cs.CL] 8 Apr 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.057, + 0.761, + 0.071 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "image", + "bbox": [ + 0.21, + 0.082, + 0.764, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.228, + 0.888, + 0.268 + ], + "angle": 0, + "content": "Figure 1: Overview of our approach. We build encoder-decoder models by adapting from pretrained decoder-only models. Model architecture and parameters are inherited from the decoder-only model except the cross-attention, for which we adopt different initialization methods depending on the encoder and decoder size. \"ROPE\": rotary embedding; \"FFN\": feed-forward layer." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.294, + 0.477, + 0.43 + ], + "angle": 0, + "content": "supervised learning. Depending on whether the encoder and the decoder share the same configuration, we propose different initialization and optimization strategies for the cross-attention layer. We also compare different pretraining objectives, including prefix language modeling with knowledge distillation (Hinton et al., 2015) and UL2 (Tay et al., 2022). Apart from Gemma 2 2B and 9B, we pretrain a series of small models to better understand the adaptation at different scales." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.437, + 0.476, + 0.528 + ], + "angle": 0, + "content": "To thoroughly evaluate model performance, we adopt different benchmarks for pretrained and instruction-tuned models respectively, each covering a range of established academic evaluations. In addition, we use SuperGLUE (Wang et al., 2019a) to measure the quality of the learned contextual representations. Our main findings are below:" + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.545, + 0.475, + 0.62 + ], + "angle": 0, + "content": "- Leveraging pretrained decoder-only LLMs is an effective way to build powerful encoder-decoder LLMs, which yields substantially improved downstream performance particularly after instruction tuning under similar inference flops." + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.63, + 0.475, + 0.69 + ], + "angle": 0, + "content": "- Our adaptation method is highly flexible, allowing for pairing large encoder with small decoder, such as 9B-2B, with significant quality gains over Gemma 2 2B but similar generation latency." + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.7, + 0.473, + 0.731 + ], + "angle": 0, + "content": "- Adaptation is not only more compute efficient but also more effective than pretraining from scratch." + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.74, + 0.475, + 0.8 + ], + "angle": 0, + "content": "- Pretraining objective matters. Models trained with prefix language modeling and knowledge distillation are generally better at generative tasks, while UL2 models have better encoder representations." + }, + { + "type": "list", + "bbox": [ + 0.105, + 0.545, + 0.475, + 0.8 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.82, + 0.228, + 0.835 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.846, + 0.476, + 0.907 + ], + "angle": 0, + "content": "While the decoder-only architecture has become the de facto standard for LLMs, the debate between encoder-decoder and decoder-only modeling is still not conclusive. Many prior studies proposed different approaches to pretrain strong" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.294, + 0.889, + 0.534 + ], + "angle": 0, + "content": "encoder-decoder models, e.g., MASS (Song et al., 2019), T5 (Raffel et al., 2020), mT5 (Xue et al., 2021), byT5 (Xue et al., 2022), BART (Lewis et al., 2020), and OpenBA (Li et al., 2023). Tay et al. (2022) compared different pretraining objectives, highlighting the superiority of UL2 and encoder-decoder modeling. Zhang et al. (2022) systematically examined the scaling behavior of both architectures on machine translation, showing their similarity when adequate objectives are applied. Wang et al. (2022) thoroughly explored different modeling choices and training objectives with a focus on LLM zero-shot generalization. They discovered that encoder-decoder LLMs after instruction tuning achieve the best performance, echoing with our experiments. They also studied adaptation, but it is between different pretraining objectives rather than from decoder-only LLMs to encoder-decoder LLMs." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.543, + 0.889, + 0.709 + ], + "angle": 0, + "content": "Leveraging pretrained models for encoder-decoder modeling has been extensively explored. In the BERT era (Devlin et al., 2019), researchers developed different ways of utilizing it to enhance encoder-decoder performance on downstream tasks, such as machine translation (Zhu et al., 2020; Clinchant et al., 2019; Yang et al., 2020), grammatical error correction (Kaneko et al., 2020), summarization (Liu & Lapata, 2019), and text generation (Chen et al., 2019). Our work follows a similar spirit but is based on pretrained decoder-only LLMs and focuses on developing general-purpose encoder-decoder LLMs." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.716, + 0.889, + 0.898 + ], + "angle": 0, + "content": "Another related direction is the development of inference friendly LLMs. Techniques for improving inference efficiency are many, ranging from quantization (Dettmers & Zettlemoyer, 2023), key-value cache optimization (Corallo & Papotti, 2024), recurrent modeling (Gu & Dao, 2023; Botev et al., 2024), to strong small LLMs with improved pretraining (Abdin et al., 2024; Liu et al., 2024b), to name a few. While these techniques offer significant efficiency gains, their focus is fundamentally distinct and complementary to our proposed encoder-decoder adaptation, i.e., both approaches can be used in conjunction to realize greater overall efficiency." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.057, + 0.761, + 0.071 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.084, + 0.452, + 0.102 + ], + "angle": 0, + "content": "3. Approach: Encoder-Decoder Adaptation" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.11, + 0.208, + 0.125 + ], + "angle": 0, + "content": "3.1. Architecture" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.133, + 0.477, + 0.255 + ], + "angle": 0, + "content": "Pretraining LLMs is both compute and time intensive. To reduce the amount of training required, we propose to adapt existing decoder-only LLMs to encoder-decoder and leverage pretrained decoder-only checkpoints for initialization, as shown in Figure 1. Due to this, we keep the encoder-decoder architecture as similar as possible to original decoder-only model, only introducing changes when necessary. This results in the following architecture:" + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.269, + 0.475, + 0.345 + ], + "angle": 0, + "content": "1. Encoder has exactly the same architecture as the decoder-only model, but self-attention is switched from causal to bidirectional. We provide ablations in Section 6 that illustrate the critical effect of bidirectional attention on downstream performance." + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.353, + 0.476, + 0.43 + ], + "angle": 0, + "content": "2. In each Decoder block, FFN and self-attention parts are identical to the corresponding parts in decoder-only models, and cross-attention has the same number of heads and head dimension as self-attention, but attends to the whole output of the encoder." + }, + { + "type": "list", + "bbox": [ + 0.099, + 0.269, + 0.476, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.443, + 0.476, + 0.565 + ], + "angle": 0, + "content": "We base our study on Gemma 2 (Team et al., 2024). But note our approach is highly flexible and isn't restricted to specific decoder-only architectures. We can easily apply our method to other model families, such as LLaMA (Dubey et al., 2024), QWen (Yang et al., 2024), and DeepSeek (Liu et al., 2024a). In theory, we can also adapt decoder-only models from different families, such as pairing LLaMA models with QWen models." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.572, + 0.476, + 0.709 + ], + "angle": 0, + "content": "In addition, our approach allows for unbalanced encoder-decoder models, where the decoder is significantly smaller than the encoder. This provides better support for applications where input processing capabilities are more important than generative capacity. For example, for summarization, deep understanding of the input text is often more important than the generation part, as it doesn't need to generate any new information. As a result, generation time is significantly reduced, while providing competitive quality." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.724, + 0.212, + 0.739 + ], + "angle": 0, + "content": "3.2. Initialization" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.748, + 0.476, + 0.869 + ], + "angle": 0, + "content": "When initializing an encoder-decoder model from a decoder-only checkpoint, we try to map every layer to the most similar weight in the decoder-only checkpoint. In particular, the encoder is fully initialized from the decoder-only checkpoint, as it doesn't introduce any new weights. In the decoder, FFN and self-attention subblocks are initialized from the FFN and self-attention weights from the corresponding layers in the decoder-only checkpoint." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.876, + 0.475, + 0.906 + ], + "angle": 0, + "content": "Cross-attention is initialized from self-attention weights in the balanced setup where encoder and decoder have the" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.888, + 0.147 + ], + "angle": 0, + "content": "same configuration. Otherwise, we first initialize crossattention from scratch and then finetune it for the first \\(K\\) steps as a warmup while freezing other model parameters. After \\(K\\) steps, all model parameters are tuned." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.162, + 0.685, + 0.178 + ], + "angle": 0, + "content": "3.3. Pretraining Objective" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.186, + 0.888, + 0.292 + ], + "angle": 0, + "content": "Decoder-only pretraining often adopts causal language modeling on a single sequence. In contrast, encoder-decoder adaptation requires separate input and target sequences to be fed to the encoder and decoder separately. We explore two classical pretraining objectives for encoder-decoder modeling: prefix language modeling (PrefixLM) and UL2 (Tay et al., 2022; Wang et al., 2022)." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.299, + 0.889, + 0.436 + ], + "angle": 0, + "content": "PrefixLM behaves similar to causal language modeling except for its prefix condition. To simplify the preprocessing, we split a sequence equally into two halves, the first half used as input and the second one as target. This also eases the adoption of knowledge distillation from decoder-only models. UL2 is more complicated. It is composed of several denoising tasks at different levels of complexity. We prepare UL2 data following Tay et al. (2022). We compare their performance in experiments." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.455, + 0.572, + 0.472 + ], + "angle": 0, + "content": "4. Setup" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.481, + 0.888, + 0.662 + ], + "angle": 0, + "content": "Data Setting Our data for pretraining and instruction tuning – including supervised finetuning (SFT) and reinforcement learning from human feedback (RLHF) – follow Gemma 2 (Team et al., 2024). For the adaptation, we preprocess the Gemma 2 pretraining data (8 trillion tokens) with PrefixLM and UL2. Note Gemma 2 pretraining data comes with knowledge distillation. We preserve this information for PrefixLM while adopting ground-truth targets for UL2 as mapping the teacher logits to UL2 is non-trivial. The preprocessed data has an input-output sequence length of 4096-4096 and 8192-8192 for PrefixLM and UL2, respectively. We adapt our models on up to 2 trillion tokens." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.677, + 0.888, + 0.769 + ], + "angle": 0, + "content": "Model Setting We use Gemma 2 (2B and 9B) as the base decoder-only LLM. We also pretrain several smaller models (Small, Base, Large, and XL) following mT5 configurations (Xue et al., 2021) under the Gemma 2 framework, and then adapt them to encoder-decoder LLMs. Detailed model configurations are given in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.784, + 0.888, + 0.83 + ], + "angle": 0, + "content": "Evaluation We employ diverse academic evaluation datasets to evaluate different capabilities of LLMs. Concretely, we use the following benchmarks:" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.846, + 0.889, + 0.907 + ], + "angle": 0, + "content": "- Pretraining (PT) benchmark: Boolq (Clark et al., 2019), SIQA (Sap et al., 2019), PIQA (Bisk et al., 2020), ARC-c&ARC-e (Clark et al., 2018), MMLU (Hendrycks et al., 2021), MMLU Pro (Wang et al., 2024), Hel" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.057, + 0.761, + 0.071 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "table", + "bbox": [ + 0.156, + 0.082, + 0.817, + 0.206 + ], + "angle": 0, + "content": "
#Layersdmodeldfn#heads (q/kv)head#Params
Decoder-OnlyEncoder-Decoder
2B262304184328/42562.0B4.0B (2B-2B)
9B4235842867216/82568.3B16.7B (9B-9B)
S (Small)851210248/86414.7M29.4M (S-S)
B (Base)12768204812/126456.7M113.3M (B-B)
L (Large)241024281616/1664204.6M409.1M (L-L)
XL (Xlarge)242048512032/3264780.3M1.6B (XL-XL)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.216, + 0.888, + 0.257 + ], + "angle": 0, + "content": "Table 1: Model configurations. #Layers: number of layers; \\(d_{model/ffn/head}\\): model/feed-forward/head dimension; #heads (\\(q/kv\\)): number of query/value heads. #Params: number of model parameters excluding embeddings. For encoder-decoder models, we show the number of parameters for the balanced architecture, e.g. 2B-2B. The 9B-2B model has 10.4B parameters. “B/M”: billion/million." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.282, + 0.477, + 0.463 + ], + "angle": 0, + "content": "laSwag (Zellers et al., 2019), Winogrande (Sakaguchi et al., 2021), TruthfulQA (Lin et al., 2021), AGIEval (Zhong et al., 2023), BBH (Suzgun et al., 2022), DROP (Dua et al., 2019), GPQA (Rein et al., 2023), GSM8K (Cobbe et al., 2021), HumanEval (Chen et al., 2021), Lambada (Paperno et al., 2016), MATH-500 (Hendrycks et al., 2021), MBPP (Austin et al., 2021), NQ (Kwiatkowski et al., 2019), TriviaQA (Joshi et al., 2017), and WMT23 (Kocmi et al., 2023). We perform zero/few-shot prompting for pretrained LLMs, and report the averaged result as \\( PT \\) score." + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.472, + 0.475, + 0.563 + ], + "angle": 0, + "content": "- Instruction-tuning (IT) benchmark: GSM8K, MMLU, MMLU Pro, MBPP, HumanEval, MATH-500, BBH, GPQA (Diamond), WMT23, and MGSM (Shi et al., 2022). We perform zero/few-shot prompting with task-specific instruction for instruction-tuned models, and report the averaged result as IT score." + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.573, + 0.477, + 0.724 + ], + "angle": 0, + "content": "- SuperGLUE (Wang et al., 2019b): we use this benchmark to examine the learned contextual representation. We stack a task-specific head on the representation of the last token in the encoder (decoder) of the encoder-decoder (decoder-only) LLM, and finetune all parameters on the training set. Learning rate, batch size, and dropout are grid-searched for each task. We reformulate all tasks as classification tasks and report averaged dev-set accuracy over COPA, WIC, WSC, RTE, MultiRC, CB, and Boolq." + }, + { + "type": "list", + "bbox": [ + 0.105, + 0.472, + 0.477, + 0.724 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.74, + 0.475, + 0.83 + ], + "angle": 0, + "content": "For generative tasks, we always apply greedy sampling. We perform pretraining, SFT, and RLHF based on the Gemma 2 recipe except for the learning rate which we tune empirically for encoder-decoder LLMs. In unbalanced encoder-decoder adaptation, e.g. 9B-2B, we set the cross-attention warmup step \\( K \\) to 1000." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.85, + 0.173, + 0.866 + ], + "angle": 0, + "content": "5. Results" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.876, + 0.476, + 0.906 + ], + "angle": 0, + "content": "The encoder-decoder adaptation converges rapidly, particularly for balanced architectures. While adaptation" + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.283, + 0.822, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.497, + 0.453, + 0.887, + 0.48 + ], + "angle": 0, + "content": "Figure 2: Pretraining performance as a function of the number of pretrained tokens during the adaptation." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.506, + 0.888, + 0.672 + ], + "angle": 0, + "content": "leverages pretrained parameters for initialization, whether and how this benefits model convergence is still questionable. Figure 2 shows the change of PT performance with respect to the amount of pretrained tokens. Obviously, adaptation is very computationally efficient, converging quickly and achieving similar performance to its decoder-only counterpart after only tens of billions of tokens. Balanced architectures (2B-2B and 9B-9B) converge much faster than the unbalanced ones (9B-2B) since all parameters in the former are initialized from pretrained decoder-only models while the cross-attention in the latter is randomly initialized." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.679, + 0.889, + 0.801 + ], + "angle": 0, + "content": "We also notice that additional pretraining improves balanced models a little on average but substantially benefits some tasks, like GSM8K and DROP. Besides, 9B-2B performance increases consistently during the adaptation, quickly surpassing Gemma 2 2B and moving towards Gemma 2 9B. This demonstrates the feasibility of encoder-decoder adaptation from varying-sized decoder-only LLMs, as well as its ability to utilize the knowledge from pretrained models." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.815, + 0.889, + 0.907 + ], + "angle": 0, + "content": "Pretraining objective matters: UL2 and PrefixLM show different characteristics. Previous study reported the superiority of UL2 over PrefixLM (Tay et al., 2022), but PrefixLM in our study is enhanced with knowledge distillation, which often improves small models significantly. We compare these two objectives for the adaptation in Table 2." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.057, + 0.761, + 0.071 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "table", + "bbox": [ + 0.229, + 0.082, + 0.744, + 0.226 + ], + "angle": 0, + "content": "
PT ScoreIT Score
Gemma 2+ PrefixLM+ UL2Gemma 2+ PrefixLM+ UL2
2B-2B47.949.750.1(39.0)46.4 (46.1)42.4
9B-2B-55.052.9-49.3 (50.6)45.7
9B-9B61.763.163.9(59.6)62.9 (64.5)61.5
S-S23.422.823.16.29.810.7
B-B26.726.926.09.812.911.1
L-L32.331.630.912.917.518.9
XL-XL39.739.538.523.530.729.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.37, + 0.23, + 0.599, + 0.243 + ], + "angle": 0, + "content": "(a) Results on PT and IT benchmarks." + }, + { + "type": "table", + "bbox": [ + 0.231, + 0.256, + 0.746, + 0.399 + ], + "angle": 0, + "content": "
PT ModelsIT Models
Gemma 2+ PrefixLM+ UL2Gemma 2+ PrefixLM+ UL2
2B-2B75.588.188.1(86.2)88.3 (87.9)90.5
9B-2B-90.290.7-90.6 (90.3)91.3
9B-9B82.591.491.8(89.8)91.8 (91.4)91.6
S-S67.669.869.667.668.869.4
B-B68.671.271.568.772.373.6
L-L68.478.779.768.878.180.3
XL-XL70.784.485.469.285.787.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.354, + 0.403, + 0.618, + 0.417 + ], + "angle": 0, + "content": "(b) Finetuned performance on SuperGLUE." + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.429, + 0.889, + 0.482 + ], + "angle": 0, + "content": "Table 2: Main results on PT, IT, and SuperGLUE benchmarks. \"Gemma 2\": decoder-only models; \"+\"PrefixLM/UL2\": encoder-decoder models adapted via prefix language modeling (with knowledge distillation)/UL2. We put Gemma 2 results into the corresponding encoder-decoder rows to save space, e.g. 2B-2B for Gemma 2 means Gemma 2 2B. Numbers in parentheses are for RLHFed models. Best results are in bold. Note PT and IT scores are not directly comparable since they are averaged over different tasks." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.507, + 0.477, + 0.688 + ], + "angle": 0, + "content": "We find that PrefixLM and UL2 have their own strengths. Specifically, UL2 delivers stronger contextual representations, outweighing PrefixLM on SuperGLUE across most model scales, resonating with previous findings (Tay et al., 2022). In contrast, PrefixLM produces more powerful generative LLMs thanks to its generation nature and the knowledge distillation. It surpasses UL2 on PT and IT benchmarks in most cases. Particularly, it outperforms UL2 at 9B-2B on both PT and IT by up to 3.6, a significant margin. Since generative LLMs have become the mainstream, we base our following analysis on PrefixLM. We discuss our attempts to combine PrefixLM and UL2 in the next section." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.725, + 0.476, + 0.906 + ], + "angle": 0, + "content": "Encoder-decoder LLMs outperform decoder-only LLMs especially after instruction tuning. Table 2 also shows that the adapted encoder-decoder LLMs achieve comparable or slightly better pretraining performance than their decoder-only counterpart but with substantially improved instruction-tuning performance, echoing with the findings of Wang et al. (2022). For example, the 9B-9B encoder-decoder LLM surpasses Gemma 2 9B by 1.4 and 4.9 on PT and IT, respectively. The performance gap further increases to 1.8 and 7.1 at 2B-2B scale. We notice that the adaption performs slightly worse at scales below 2B on PT, but the improvements on IT are still promising, e.g. 7.2 at XL-XL." + }, + { + "type": "text", + "bbox": [ + 0.495, + 0.507, + 0.889, + 0.598 + ], + "angle": 0, + "content": "Regardless of PT or IT models, pretraining objectives, and model scales, encoder-decoder LLMs perform consistently better than decoder-only LLMs on SuperGLUE. This suggests that the contextual representation from encoder-decoder LLMs is often of higher quality, likely due to bidirectional self-attention." + }, + { + "type": "text", + "bbox": [ + 0.495, + 0.605, + 0.889, + 0.817 + ], + "angle": 0, + "content": "We need to highlight that the above analysis is based on the overall performance, which may not apply when it comes to a specific downstream task. As shown in Table 3, there are some tasks favoring encoder-decoder models while others favoring decoder-only models especially for PT models. For example, after pretraining, Gemma 2 9B surpasses 9B-9B by 4.1 on ARC-C but underperforms it by 4.4 on Winogrande; while encoder-decoder LLM shows more consistent advantage after instruction tuning, 9B-9B still lags behind Gemma 2 9B by 0.9 on WMT23. This illustrates the complexity when evaluating LLM capability as well as the risk of reaching misleading conclusions when adopting biased evaluation tasks. We reduce such risk by selecting as diverse and broad tasks as possible for evaluation." + }, + { + "type": "text", + "bbox": [ + 0.495, + 0.846, + 0.887, + 0.907 + ], + "angle": 0, + "content": "Encoder-decoder LLMs balance quality and inference efficiency more effectively. We next analyze different models from the perspective of inference efficiency which becomes increasingly crucial for model deployment. Figure" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.057, + 0.761, + 0.071 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "table", + "bbox": [ + 0.253, + 0.082, + 0.721, + 0.369 + ], + "angle": 0, + "content": "
TaskMetricGemma 2Encoder-Decoder Adaptation
2B9B2B-2B9B-2B9B-9B
MMLU5-shot51.971.146.860.371.3
ARC-C25-shot55.569.152.059.965.0
GSM8K5-shot23.763.241.748.772.8
AGIEval3-5-shot31.553.335.043.653.1
DROP3-shot, F153.371.561.466.975.7
BBH3-shot, CoT40.268.951.951.674.7
Winogrande5-shot65.274.369.568.178.7
HellaSwag10-shot72.981.874.975.781.0
MATH-5004-shot17.233.424.223.637.8
ARC-e0-shot81.088.377.182.985.3
PIQA0-shot78.481.679.078.381.1
SIQA0-shot51.753.650.150.150.5
Boolq0-shot75.577.575.684.685.6
TriviaQA5-shot60.176.651.266.275.2
NQ5-shot30.743.928.437.143.1
HumanEvalpass@119.539.027.433.540.2
MBPP3-shot30.452.037.443.455.6
Average49.364.752.057.366.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.383, + 0.375, + 0.586, + 0.388 + ], + "angle": 0, + "content": "(a) Results for pretrained models." + }, + { + "type": "table", + "bbox": [ + 0.226, + 0.401, + 0.747, + 0.606 + ], + "angle": 0, + "content": "
TaskMetricGemma 2Encoder-Decoder Adaptation
2B9B2B-2B9B-2B9B-9B
GSM8K11-shot58.084.370.773.888.6
MMLU5-shot49.871.861.566.776.7
MMLU Pro5-shot27.449.936.643.055.7
MBPP3-shot37.859.244.049.864.8
HumanEvalpass@143.365.947.655.572.0
MATH-5000-shot24.445.828.230.047.2
BBH3-shot44.872.057.557.676.4
GPQA0-shot24.829.927.532.635.7
GPQA Diamond0-shot27.829.826.829.340.4
WMT235-shot, BLEURT65.272.059.965.371.1
MGSM8-shot26.374.946.853.580.7
Average39.059.646.150.664.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.386, + 0.611, + 0.581, + 0.624 + ], + "angle": 0, + "content": "(b) Results for RLHFed models." + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.648, + 0.885, + 0.675 + ], + "angle": 0, + "content": "Table 3: Detailed results on different tasks for PT and RLHFed models. We compare Gemma 2 and encoder-decoder models adapted via PrefixLM. Best results are in bold." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.702, + 0.475, + 0.778 + ], + "angle": 0, + "content": "3 shows that balanced encoder-decoder LLMs have similar inference flops to their decoder-only counterparts, e.g. 2B-2B vs. Gemma 2 2B. As such, encoder-decoder models often dominate the quality-inference efficiency frontier across PT, IT, and SuperGLUE benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.785, + 0.476, + 0.906 + ], + "angle": 0, + "content": "We acknowledge that inference flops may not correlate well with actual running speed due to factors like inter-device communication, key-value caching, and autoregressive bottleneck. We then provide the latency results measured on GSM8K for 2B and 9B models in Figure 4, which further verified the above analysis. 9B-9B and 2B-2B show similar latency to Gemma 2 9B and 2B, respectively, but clearly better performance. In particular, 9B-2B, the one pairing large" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.702, + 0.885, + 0.733 + ], + "angle": 0, + "content": "encoder and small decoder, shows similar latency to Gemma 2 2B but significantly better performance than 2B-2B." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.74, + 0.885, + 0.785 + ], + "angle": 0, + "content": "Together, these confirm that encoder-decoder adaptation indeed provides a more flexible way for balancing between quality and inference speed." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.805, + 0.611, + 0.82 + ], + "angle": 0, + "content": "6. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.831, + 0.887, + 0.906 + ], + "angle": 0, + "content": "Is the improvement after the adaptation simply due to the extra pretraining compute? Not really. We also tried to apply more pretraining compute to Gemma 2 2B by going through another 6 trillion tokens, which leads to a PT score of 48.57, still significantly below the encoder-" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.057, + 0.761, + 0.071 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.085, + 0.36, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.366, + 0.083, + 0.61, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.082, + 0.861, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.246, + 0.889, + 0.287 + ], + "angle": 0, + "content": "Figure 3: Comparisons of decoder-only LLMs with adapted encoder-decoder models under inference flops. We show PT, IT, and SuperGLUE performance. Inference flops is estimated with a sequence length of 4096-4096 and 8192 for encoder-decoder and decoder-only LLMs, respectively. Note the upper left corner marks the quality-efficiency frontier." + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.314, + 0.41, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.483, + 0.477, + 0.534 + ], + "angle": 0, + "content": "Figure 4: GSM8K performance as a function of latency for RL-HFed models. Latency is estimated as milliseconds (ms) per query by answering 200 reasoning questions from GSM8K. Batch size of 1 is used." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.565, + 0.475, + 0.626 + ], + "angle": 0, + "content": "decoder adaptation, 49.7. This indicates that the additional pretraining compute can't fully explain the improvements from the adaptation and we argue that the inductive bias of encoder-decoder modeling plays a crucial role." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.645, + 0.476, + 0.766 + ], + "angle": 0, + "content": "Does cross-attention warmup matter for unbalanced encoder-decoder? Yes. Our preliminary experiments with 9B-2B and UL2 on 800B tokens show that the pretraining performance over Boolq and GSM8K reduces from 62.5 to 61.8 without the warmup. Besides, increasing warmup steps from 1K to 5K further reduces performance to 60.2. An adequate amount of warmup optimization is required to reach the optimal performance." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.786, + 0.476, + 0.906 + ], + "angle": 0, + "content": "Can we switch from grouped-query attention to multi-head self attention for the encoder? Yes but with mixed results. Gemma 2 adopts grouped-query attention (GQA) to improve its decoding efficiency. However, unlike the decoder, the encoder can be fully parallelized during inference, making the use of multi-head attention (MHA) reasonable. We tried to expand GQA in Gemma 2 2B to MHA by replicating head parameters for the encoder self-attention. Under" + }, + { + "type": "table", + "bbox": [ + 0.523, + 0.309, + 0.862, + 0.421 + ], + "angle": 0, + "content": "
AdaptationScratch
PTITSGPTITSG
S-S22.89.868.824.09.970.5
B-B26.912.972.328.111.875.5
L-L31.617.578.130.917.178.5
XL-XL39.530.785.737.728.879.5
2B-2B49.746.488.347.143.984.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.497, + 0.43, + 0.889, + 0.47 + ], + "angle": 0, + "content": "Table 4: Results for encoder-decoder models adapted with PrefixLM (Adaptation) and pretrained from scratch (Scratch). SG: SuperGLUE score for SFTed models." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.496, + 0.887, + 0.556 + ], + "angle": 0, + "content": "PrefixLM, this improves PT performance to 50.2 by 0.5 at 2B-2B but reduces IT performance to 43.5 by 2.9. We thus still stick to GQA when adapting Gemma 2 2B and 9B for the encoder." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.573, + 0.887, + 0.74 + ], + "angle": 0, + "content": "Does bidirectional self-attention matter for the encoder? Yes. A crucial difference between encoder-decoder and decoder-only LLMs is the use of bidirectional self-attention. We also tested keeping the encoder self-attention causal at 2B-2B, which achieves a PT and IT score of 45.6 and 41.7, lagging behind its bidirectional counterpart substantially by 4.1 and 4.7, respectively. Note, the causal 2B-2B model surpasses Gemma 2 2B on IT by 2.7, although it performs worse on PT. This suggests that bidirectional self-attention contributes greatly to the success of our adaptation, but is not the only factor." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.755, + 0.887, + 0.906 + ], + "angle": 0, + "content": "Would pretraining encoder-decoder LLMs from scratch yield better performance? Not really. Pretraining from scratch is a common method for developing new LLMs. We also pretrained encoder-decoder LLMs from scratch on 8 trillion tokens with PrefixLM. Table 4 summarizes the results. Despite using more pretraining tokens, encoder-decoder LLMs pretrained from scratch only perform better at small scales, such as S-S and B-B, beyond which adaptation shows clear superiority. As such, adaptation is a more computationally efficient way of developing powerful" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.923, + 0.492, + 0.935 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.057, + 0.761, + 0.071 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.085, + 0.36, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.366, + 0.086, + 0.609, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.086, + 0.857, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.246, + 0.888, + 0.273 + ], + "angle": 0, + "content": "Figure 5: Quality change for the two-stage optimization. \"UL2-then-PrefixLM\": switch the training objective from UL2 to PrefixLM for the final \\(10\\%\\) tokens; \"PrefixLM-then-UL2\": similar but from PrefixLM to UL2." + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.3, + 0.411, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.152, + 0.463, + 0.411, + 0.619 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.633, + 0.475, + 0.66 + ], + "angle": 0, + "content": "Figure 6: Correlation analysis between PT performance and its corresponding IT/SuperGLUE performance." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.687, + 0.248, + 0.7 + ], + "angle": 0, + "content": "encoder-decoder LLMs." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.717, + 0.476, + 0.868 + ], + "angle": 0, + "content": "Is IT/SuperGLUE score predicable from PT score? Mixed. A general assumption in LLM development is that PT performance can be used as an indicator for downstream applications. We summarize all our ablations and put them in Figure 6. Over all data points and across all model sizes, the correlation is pretty strong: a Spearman's \\(\\rho\\) of 0.97 and 0.89 for IT vs. PT and SuperGLUE vs. PT, respectively. However, when considering data points within each model size separately, the averaged Spearman's \\(\\rho\\) reduces to 0.42 and 0.05, respectively and is not significant anymore." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.876, + 0.476, + 0.906 + ], + "angle": 0, + "content": "In practice, we also noticed that PT checkpoints with weaker performance sometimes yield significantly better IT or Su" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.3, + 0.887, + 0.36 + ], + "angle": 0, + "content": "perGLUE performance. When selecting PT checkpoints for a specific model size, it's better to also examine their IT performance apart from PT results to avoid some biases or overfitting." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.385, + 0.887, + 0.506 + ], + "angle": 0, + "content": "Can we get the best of both worlds from PrefixLM and UL2? This is non-trivial. Our first attempt is to merge checkpoints trained from PrefixLM and UL2 with uniform weighting. Unfortunately, the merged model results in either similar or much worse performance. We argue that PrefixLM and UL2 lead to different training dynamics and converge to very different local minima. Directly merging their weights doesn't work right out of the box." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.514, + 0.887, + 0.62 + ], + "angle": 0, + "content": "We next explore a two-stage optimization, where we first adapt with PrefixLM and then shift to UL2 for the last \\(10\\%\\) of training, and vice versa. Figure 5 shows very mixed results. Switching from PrefixLM to UL2 generally hurts performance. In contrast, switching from UL2 to PrefixLM improves IT performance, but suffers from reduction in PT and SuperGLUE performance." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.627, + 0.888, + 0.657 + ], + "angle": 0, + "content": "Another direction is to jointly optimize the model on PrefixLM and UL2, which we leave for future work." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.676, + 0.767, + 0.692 + ], + "angle": 0, + "content": "7. Conclusion and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.702, + 0.887, + 0.883 + ], + "angle": 0, + "content": "In this paper, we presented methods for building powerful, general purpose encoder-decoder LLMs by adapting from pretrained decoder-only LLMs. Such adaptation offers high flexibility in leveraging different types/families of pretrained decoder-only models as well as combining different-sized models. Through extensive experiments based on Gemma 2, we demonstrated the feasibility and effectiveness of the adaptation: the adapted encoder-decoder LLMs outperform their decoder-only counterparts substantially after instruction tuning, dominating the quality-inference efficiency frontier. Besides, encoder-decoder LLMs also provide better contextual representations as evaluated on SuperGLUE." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.891, + 0.885, + 0.906 + ], + "angle": 0, + "content": "We hope our findings inspire more researchers from" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.057, + 0.761, + 0.072 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.086, + 0.479, + 0.146 + ], + "angle": 0, + "content": "academia and industry to revisit the encoder-decoder paradigm for LLM development. To facilitate the research, we will release the code and checkpoints at XXX (coming soon)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.153, + 0.479, + 0.336 + ], + "angle": 0, + "content": "Our work still suffers from several limitations. Particularly, we only experimented with Gemma 2 models up to 9B, although the proposed approach could apply to other LLM families. In the future, we are interested in scaling the model size (e.g., to 27B), exploring other LLMs (such as LLaMA), examining more unbalanced setups, and testing the combination of dense and MoE LLMs. As mentioned above, we will also investigate better ways to leverage PrefixLM, knowledge distillation, and UL2. Extending our adapted encoder-decoder LLM to cross/multi-modality modeling (e.g., vision-language and speech-language) would be another intriguing direction." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.354, + 0.254, + 0.372 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.38, + 0.476, + 0.472 + ], + "angle": 0, + "content": "We'd like to thank Enrique Alfonseca, Tris Warkentin, Xiaodan Song, Sugato Basu, Inderjit Dhillon, Alexander Grushetsky, Pandu Nayak, Ramakrishnan Srikant, and Slav Petrov for their constructive feedback on the manuscript. We are grateful to Srinivasan Venkatachary for supporting this project." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.49, + 0.184, + 0.506 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.514, + 0.476, + 0.59 + ], + "angle": 0, + "content": "Abdin, M., Aneja, J., Awadalla, H., Awadallah, A., Awan, A. A., Bach, N., Bahree, A., Bakhtiari, A., Bao, J., Behl, H., et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.601, + 0.476, + 0.661 + ], + "angle": 0, + "content": "Austin, J., Odena, A., Nye, M., Bosma, M., Michalewski, H., Dohan, D., Jiang, E., Cai, C., Terry, M., Le, Q., et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.672, + 0.476, + 0.734 + ], + "angle": 0, + "content": "Bisk, Y., Zellers, R., Gao, J., Choi, Y., et al. Piqa: Reasoning about physical commonsense in natural language. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pp. 7432-7439, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.744, + 0.476, + 0.82 + ], + "angle": 0, + "content": "Botev, A., De, S., Smith, S. L., Fernando, A., Muraru, G.-C., Haroun, R., Berrada, L., Pascanu, R., Sessa, P. G., Dadashi, R., et al. Recurrentgemma: Moving past transformers for efficient open language models. arXiv preprint arXiv:2404.07839, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.83, + 0.476, + 0.905 + ], + "angle": 0, + "content": "Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems, 33: 1877-1901, 2020." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.514, + 0.476, + 0.905 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.085, + 0.888, + 0.146 + ], + "angle": 0, + "content": "Chen, M., Tworek, J., Jun, H., Yuan, Q., Pinto, H. P. D. O., Kaplan, J., Edwards, H., Burda, Y., Joseph, N., Brockman, G., et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.155, + 0.888, + 0.202 + ], + "angle": 0, + "content": "Chen, Y.-C., Gan, Z., Cheng, Y., Liu, J., and Liu, J. Distilling knowledge learned in bert for text generation. arXiv preprint arXiv:1911.03829, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.211, + 0.887, + 0.258 + ], + "angle": 0, + "content": "Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. Boolq: Exploring the surprising difficulty of natural yes/no questions. In NAACL, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.267, + 0.887, + 0.327 + ], + "angle": 0, + "content": "Clark, P., Cowhey, I., Etzioni, O., Khot, T., Sabharwal, A., Schoenick, C., and Tafjord, O. Think you have solved question answering? try arc, the ai2 reasoning challenge. arXiv:1803.05457v1, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.337, + 0.888, + 0.472 + ], + "angle": 0, + "content": "Clinchant, S., Jung, K. W., and Nikoulina, V. On the use of BERT for neural machine translation. In Birch, A., Finch, A., Hayashi, H., Konstas, I., Luong, T., Neubig, G., Oda, Y., and Sudoh, K. (eds.), Proceedings of the 3rd Workshop on Neural Generation and Translation, pp. 108-117, Hong Kong, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-5611. URL https://aclanthology.org/D19-5611/." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.483, + 0.888, + 0.558 + ], + "angle": 0, + "content": "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.569, + 0.888, + 0.659 + ], + "angle": 0, + "content": "Corallo, G. and Papotti, P. FINCH: Prompt-guided key-value cache compression for large language models. Transactions of the Association for Computational Linguistics, 12:1517-1532, 2024. doi: 10.1162/tacl_a_00716. URL https://aclanthology.org/2024.tacl-1.83/." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.67, + 0.888, + 0.73 + ], + "angle": 0, + "content": "Dettmers, T. and Zettlemoyer, L. The case for 4-bit precision: k-bit inference scaling laws. In International Conference on Machine Learning, pp. 7750-7774. PMLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.74, + 0.888, + 0.906 + ], + "angle": 0, + "content": "Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. BERT: Pre-training of deep bidirectional transformers for language understanding. In Burstein, J., Doran, C., and Solorio, T. (eds.), Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1423. URL https://aclanthology.org/N19-1423." + }, + { + "type": "list", + "bbox": [ + 0.499, + 0.085, + 0.888, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.493, + 0.935 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.057, + 0.761, + 0.072 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.085, + 0.476, + 0.147 + ], + "angle": 0, + "content": "Dua, D., Wang, Y., Dasigi, P., Stanovsky, G., Singh, S., and Gardner, M. Drop: A reading comprehension benchmark requiring discrete reasoning over paragraphs. arXiv preprint arXiv:1903.00161, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.155, + 0.476, + 0.217 + ], + "angle": 0, + "content": "Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.225, + 0.476, + 0.302 + ], + "angle": 0, + "content": "Gemini, T., Reid, M., Savinov, N., Teplyashin, D., Lepikhin, D., Lillicrap, T., Alayrac, J.-b., Soricut, R., Lazaridou, A., First, O., Schrittwieser, J., et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.31, + 0.476, + 0.357 + ], + "angle": 0, + "content": "Gu, A. and Dao, T. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.365, + 0.476, + 0.442 + ], + "angle": 0, + "content": "Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. Measuring massive multi-task language understanding. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=d7KBjmI3GmQ." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.45, + 0.476, + 0.481 + ], + "angle": 0, + "content": "Hinton, G., Vinyals, O., and Dean, J. Distilling the knowledge in a neural network, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.49, + 0.476, + 0.552 + ], + "angle": 0, + "content": "Jiang, A. Q., Sablayrolles, A., Roux, A., Mensch, A., Savary, B., Bamford, C., Chaplot, D. S., Casas, D. d. l., Hanna, E. B., Bressand, F., et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.56, + 0.476, + 0.62 + ], + "angle": 0, + "content": "Joshi, M., Choi, E., Weld, D., and Zettlemoyer, L. triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension. arXiv e-prints, art. arXiv:1705.03551, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.63, + 0.476, + 0.692 + ], + "angle": 0, + "content": "Kaneko, M., Mita, M., Kiyono, S., Suzuki, J., and Inui, K. Encoder-decoder models can benefit from pre-trained masked language models in grammatical error correction. arXiv preprint arXiv:2005.00987, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.7, + 0.476, + 0.762 + ], + "angle": 0, + "content": "Kasai, J., Pappas, N., Peng, H., Cross, J., and Smith, N. A. Deep encoder, shallow decoder: Reevaluating non-autoregressive machine translation. arXiv preprint arXiv:2006.10369, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.77, + 0.476, + 0.907 + ], + "angle": 0, + "content": "Kocmi, T., Avramidis, E., Bawden, R., Bojar, O., Dvorkovich, A., Federmann, C., Fishel, M., Freitag, M., Gowda, T., Grundkiewicz, R., Haddow, B., Koehn, P., Marie, B., Monz, C., Morishita, M., Murray, K., Nagata, M., Nakazawa, T., Popel, M., Popovic, M., and Shmatova, M. Findings of the 2023 conference on machine translation (WMT23): LLMs are here but not quite there yet. In Koehn, P., Haddow, B., Kocmi, T., and Monz, C. (eds.), Proceedings of the Eighth Conference on Machine" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.476, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.085, + 0.888, + 0.147 + ], + "angle": 0, + "content": "Translation, pp. 1-42, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.wmt-1.1. URL https://aclanthology.org/2023.wmt-1.1/." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.159, + 0.888, + 0.296 + ], + "angle": 0, + "content": "Kwiatkowski, T., Palomaki, J., Redfield, O., Collins, M., Parikh, A., Alberti, C., Epstein, D., Polosukhin, I., Devlin, J., Lee, K., Toutanova, K., Jones, L., Kelley, M., Chang, M.-W., Dai, A. M., Uszkoreit, J., Le, Q., and Petrov, S. Natural questions: A benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:452-466, 2019. doi: 10.1162/tacl_a_00276. URL https://aclanthology.org/Q19-1026/." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.309, + 0.888, + 0.46 + ], + "angle": 0, + "content": "Lewis, M., Liu, Y., Goyal, N., Ghazvininejad, M., Mohamed, A., Levy, O., Stoyanov, V., and Zettlemoyer, L. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7871-7880, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.703. URL https://aclanthology.org/2020.acl-main.703." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.474, + 0.888, + 0.549 + ], + "angle": 0, + "content": "Li, J., Tang, Z., Ding, Y., Wang, P., Guo, P., You, W., Qiao, D., Chen, W., Fu, G., Zhu, Q., et al. Openba: An open-sourced 15b bilingual asymmetric seq2seq model pretrained from scratch. arXiv preprint arXiv:2309.10706, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.563, + 0.885, + 0.61 + ], + "angle": 0, + "content": "Lin, S., Hilton, J., and Evans, O. Truthfulqa: Measuring how models mimic human falsehoods. arXiv preprint arXiv:2109.07958, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.623, + 0.888, + 0.682 + ], + "angle": 0, + "content": "Liu, A., Feng, B., Xue, B., Wang, B., Wu, B., Lu, C., Zhao, C., Deng, C., Zhang, C., Ruan, C., et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.697, + 0.885, + 0.728 + ], + "angle": 0, + "content": "Liu, Y. and Lapata, M. Text summarization with pretrained encoders. arXiv preprint arXiv:1908.08345, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.741, + 0.888, + 0.817 + ], + "angle": 0, + "content": "Liu, Z., Zhao, C., Iandola, F., Lai, C., Tian, Y., Fedorov, I., Xiong, Y., Chang, E., Shi, Y., Krishnamoorthi, R., et al. Mobilellm: Optimizing sub-billion parameter language models for on-device use cases. arXiv preprint arXiv:2402.14905, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.499, + 0.83, + 0.888, + 0.907 + ], + "angle": 0, + "content": "Paperno, D., Kruszewski, G., Lazaridou, A., Pham, Q. N., Bernardi, R., Pezzelle, S., Baroni, M., Boleda, G., and Fernandez, R. The lambada dataset: Word prediction requiring a broad discourse context. arXiv preprint arXiv:1606.06031, 2016." + }, + { + "type": "list", + "bbox": [ + 0.499, + 0.085, + 0.888, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.057, + 0.761, + 0.072 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.085, + 0.476, + 0.146 + ], + "angle": 0, + "content": "Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., Zhou, Y., Li, W., and Liu, P. J. Exploring the limits of transfer learning with a unified text-to-text transformer. 21(1), jan 2020. ISSN 1532-4435." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.158, + 0.476, + 0.22 + ], + "angle": 0, + "content": "Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J., and Bowman, S. R. Gpqa: A graduate-level google-proof q&a benchmark. arXiv preprint arXiv:2311.12022, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.231, + 0.476, + 0.29 + ], + "angle": 0, + "content": "Sakaguchi, K., Bras, R. L., Bhagavatula, C., and Choi, Y. Winogrande: An adversarial winograd schema challenge at scale. Communications of the ACM, 64(9):99-106, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.303, + 0.476, + 0.349 + ], + "angle": 0, + "content": "Sap, M., Rashkin, H., Chen, D., LeBras, R., and Choi, Y. Socialiaq: Commonsense reasoning about social interactions. arXiv preprint arXiv:1904.09728, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.361, + 0.476, + 0.422 + ], + "angle": 0, + "content": "Shi, F., Suzgun, M., Freitag, M., Wang, X., Srivats, S., Vosoughi, S., Chung, H. W., Tay, Y., Ruder, S., Zhou, D., et al. Language models are multilingual chain-of-thought reasoners. arXiv preprint arXiv:2210.03057, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.434, + 0.476, + 0.479 + ], + "angle": 0, + "content": "Song, K., Tan, X., Qin, T., Lu, J., and Liu, T.-Y. Mass: Masked sequence to sequence pre-training for language generation, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.491, + 0.476, + 0.567 + ], + "angle": 0, + "content": "Suzgun, M., Scales, N., Schärli, N., Gehrmann, S., Tay, Y., Chung, H. W., Chowdhery, A., Le, Q. V., Chi, E. H., Zhou, D., et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.579, + 0.476, + 0.655 + ], + "angle": 0, + "content": "Tay, Y., Dehghani, M., Tran, V. Q., Garcia, X., Wei, J., Wang, X., Chung, H. W., Bahri, D., Schuster, T., Zheng, S., et al. Ul2: Unifying language learning paradigms. In The Eleventh International Conference on Learning Representations, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.667, + 0.476, + 0.743 + ], + "angle": 0, + "content": "Team, G., Riviere, M., Pathak, S., Sessa, P. G., Hardin, C., Bhupatiraju, S., Hussenot, L., Mesnard, T., Shahriari, B., Ramé, A., et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.755, + 0.485, + 0.906 + ], + "angle": 0, + "content": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L. u., and Polosukhin, I. Attention is all you need. In Guyon, I., Luxburg, U. V., Bengio, S., Wallach, H., Fergus, R., Vishwanathan, S., and Garnett, R. (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. URL https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.485, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.085, + 0.886, + 0.162 + ], + "angle": 0, + "content": "Wang, A., Pruksachatkun, Y., Nangia, N., Singh, A., Michael, J., Hill, F., Levy, O., and Bowman, S. Superglue: A stickier benchmark for general-purpose language understanding systems. Advances in neural information processing systems, 32, 2019a." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.171, + 0.886, + 0.245 + ], + "angle": 0, + "content": "Wang, A., Pruksachatkun, Y., Nangia, N., Singh, A., Michael, J., Hill, F., Levy, O., and Bowman, S. R. SuperGLUE: a stickier benchmark for general-purpose language understanding systems. Curran Associates Inc., Red Hook, NY, USA, 2019b." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.255, + 0.888, + 0.421 + ], + "angle": 0, + "content": "Wang, T., Roberts, A., Hesslow, D., Scao, T. L., Chung, H. W., Beltagy, I., Launay, J., and Raffel, C. What language model architecture and pretraining objective works best for zero-shot generalization? In Chaudhuri, K., Jegelka, S., Song, L., Szepesvari, C., Niu, G., and Sabato, S. (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 22964-22984. PMLR, 17-23 Jul 2022. URL https://proceedings.mlr.press/v162/wang22u.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.431, + 0.886, + 0.505 + ], + "angle": 0, + "content": "Wang, Y., Ma, X., Zhang, G., Ni, Y., Chandra, A., Guo, S., Ren, W., Arulraj, A., He, X., Jiang, Z., et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. arXiv preprint arXiv:2406.01574, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.515, + 0.886, + 0.697 + ], + "angle": 0, + "content": "Xue, L., Constant, N., Roberts, A., Kale, M., Al-Rfou, R., Siddhant, A., Barua, A., and Raffel, C. mT5: A massively multilingual pre-trained text-to-text transformer. In Toutanova, K., Rumshisky, A., Zettlemoyer, L., Hakkani-Tur, D., Beltagy, I., Bethard, S., Cotterell, R., Chakraborty, T., and Zhou, Y. (eds.), Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 483-498, Online, June 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.naacl-main.41. URL https://aclanthology.org/2021.naacl-main.41." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.706, + 0.886, + 0.811 + ], + "angle": 0, + "content": "Xue, L., Barua, A., Constant, N., Al-Rfou, R., Narang, S., Kale, M., Roberts, A., and Raffel, C. ByT5: Towards a token-free future with pre-trained byte-to-byte models. Transactions of the Association for Computational Linguistics, 10:291-306, 2022. doi: 10.1162/tacl_a_00461. URL https://aclanthology.org/2022.tacl-1.17." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.821, + 0.886, + 0.867 + ], + "angle": 0, + "content": "Yang, A., Yang, B., Zhang, B., Hui, B., Zheng, B., Yu, B., Li, C., Liu, D., Huang, F., Wei, H., et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.876, + 0.886, + 0.906 + ], + "angle": 0, + "content": "Yang, J., Wang, M., Zhou, H., Zhao, C., Zhang, W., Yu, Y., and Li, L. Towards making the most of bert" + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.888, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.495, + 0.935 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.212, + 0.057, + 0.761, + 0.072 + ], + "angle": 0, + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + }, + { + "type": "ref_text", + "bbox": [ + 0.104, + 0.085, + 0.477, + 0.13 + ], + "angle": 0, + "content": "in neural machine translation. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pp. 9378-9385, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.141, + 0.476, + 0.201 + ], + "angle": 0, + "content": "Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. Hellaswag: Can a machine really finish your sentence? In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.211, + 0.478, + 0.363 + ], + "angle": 0, + "content": "Zhang, B., Ghorbani, B., Bapna, A., Cheng, Y., Garcia, X., Shen, J., and First, O. Examining scaling and transfer of language model architectures for machine translation. In Chaudhuri, K., Jegelka, S., Song, L., Szepesvari, C., Niu, G., and Sabato, S. (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 26176-26192. PMLR, 17-23 Jul 2022. URL https://proceedings.mlrpress/v162/zhang22h.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.372, + 0.476, + 0.433 + ], + "angle": 0, + "content": "Zhong, W., Cui, R., Guo, Y., Liang, Y., Lu, S., Wang, Y., Saied, A., Chen, W., and Duan, N. Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.443, + 0.476, + 0.489 + ], + "angle": 0, + "content": "Zhu, J., Xia, Y., Wu, L., He, D., Qin, T., Zhou, W., Li, H., and Liu, T.-Y. Incorporating bert into neural machine translation. arXiv preprint arXiv:2002.06823, 2020." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.478, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "12" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06225/48347d42-40fb-4979-b798-617f024e9b22_origin.pdf b/data/2025/2504_06xxx/2504.06225/48347d42-40fb-4979-b798-617f024e9b22_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..efc912934ec5f4b57d66c8f5984c42efcdbe2257 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/48347d42-40fb-4979-b798-617f024e9b22_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ee9b15ddff43c276773a4c31b8fc7b09cfd398f0b6fa6261ab8b8530dff52be +size 611933 diff --git a/data/2025/2504_06xxx/2504.06225/full.md b/data/2025/2504_06xxx/2504.06225/full.md new file mode 100644 index 0000000000000000000000000000000000000000..473501b0edb2fa79179fb3934d416e41c32b1265 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/full.md @@ -0,0 +1,276 @@ +# Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation + +Biao Zhang* Fedor Moiseev* Joshua Ainslie* Paul Suganthan* Min Ma* Surya Bhupatiraju Fede Lebron Orhan Firat Armand Joulin Zhe Dong* + +# Abstract + +While decoder-only large language models (LLMs) have shown impressive results, encoder-decoder models are still widely adopted in real-world applications for their inference efficiency and richer encoder representation. In this paper, we study a novel problem: adapting pretrained decoder-only LLMs to encoder-decoder, with the goal of leveraging the strengths of both approaches to achieve a more favorable quality-efficiency trade-off. We argue that adaptation not only enables inheriting the capability of decoder-only LLMs but also reduces the demand for computation compared to pretraining from scratch. We rigorously explore different pretraining objectives and parameter initialization/optimization techniques. Through extensive experiments based on Gemma 2 (2B and 9B) and a suite of newly pretrained mT5-sized models (up to 1.6B), we demonstrate the effectiveness of adaptation and the advantage of encoder-decoder LLMs. Under similar inference budget, encoder-decoder LLMs achieve comparable (often better) pretraining performance but substantially better finetuning performance than their decoder-only counterpart. For example, Gemma 2B-2B outperforms Gemma 2B by $\sim 7\%$ after instruction tuning. Encoder-decoder adaptation also allows for flexible combination of different-sized models, where Gemma 9B-2B significantly surpasses Gemma 2B-2B by $>3\%$ . The adapted encoder representation also yields better results on SuperGLUE. We will release our checkpoints to facilitate future research. + +# 1. Introduction + +Neural network architectures are often designed to incorporate certain assumptions or inductive biases regarding the + +* Core Contributor. Google. Correspondence to: Biao Zhang , Zhe Dong . + +input data, leading to either improved model performance or better computational efficiency, if not both. Unlike the popular decoder-only architecture used for large language model (LLM) (Brown et al., 2020), the encoder-decoder architecture adopts separate modeling modules – an encoder for input understanding and a decoder for output generation (Vaswani et al., 2017). This separation decouples parameters for different functionalities and thus enjoys higher freedom in handling contextual representation and challenging tasks (Tay et al., 2022; Wang et al., 2022). It also offers high flexibility in changing the encoder and decoder size (e.g., a large encoder paired with a small decoder) to control the quality-efficiency trade-off (Kasai et al., 2020; Zhang et al., 2022), an increasingly important aspect for LLM deployment (Gemini et al., 2024). Despite these benefits, however, the study on encoder-decoder LLMs receive little to no attention nowadays. + +In this paper, we revisit this classical architecture by exploring the following question: can we get strong(er) encoder-decoder LLMs by adapting from existing pretrained decoder-only LLMs? We consider the adaptation more significantly than pretraining new models from scratch since pretraining is resource-intensive and powerful decoder-only models at different sizes are already widely available (Dubey et al., 2024; Team et al., 2024; Liu et al., 2024a; Yang et al., 2024; Jiang et al., 2024). Our hypothesis is that, by reusing parameters from decoder-only models, we can accelerate training and effectively transfer their internal knowledge to encoder-decoder, preserving (even enhancing) their capabilities. Note adaptation also allows for pairing varying-sized decoder-only models to achieve specific quality-efficiency considerations. Yet, the optimal method for such adaptation and the extent to which performance can be improved remain open questions, which we aim to address rigorously. + +We employ Gemma 2 (Team et al., 2024) as the testbed. As shown in Figure 1, the encoder-decoder architecture follows the original Transformer (Vaswani et al., 2017) but equipped with Gemma 2 modifications. The key idea behind the adaptation is to initialize the parameters of the encoder-decoder model from pretrained decoder-only model(s) as a warmup and then pretrain or adapt all parameters with self + +![](images/117636db4067cddc308df04a48879f38f22774c43820412bd49fab3eac458ef7.jpg) +Figure 1: Overview of our approach. We build encoder-decoder models by adapting from pretrained decoder-only models. Model architecture and parameters are inherited from the decoder-only model except the cross-attention, for which we adopt different initialization methods depending on the encoder and decoder size. "ROPE": rotary embedding; "FFN": feed-forward layer. + +supervised learning. Depending on whether the encoder and the decoder share the same configuration, we propose different initialization and optimization strategies for the cross-attention layer. We also compare different pretraining objectives, including prefix language modeling with knowledge distillation (Hinton et al., 2015) and UL2 (Tay et al., 2022). Apart from Gemma 2 2B and 9B, we pretrain a series of small models to better understand the adaptation at different scales. + +To thoroughly evaluate model performance, we adopt different benchmarks for pretrained and instruction-tuned models respectively, each covering a range of established academic evaluations. In addition, we use SuperGLUE (Wang et al., 2019a) to measure the quality of the learned contextual representations. Our main findings are below: + +- Leveraging pretrained decoder-only LLMs is an effective way to build powerful encoder-decoder LLMs, which yields substantially improved downstream performance particularly after instruction tuning under similar inference flops. +- Our adaptation method is highly flexible, allowing for pairing large encoder with small decoder, such as 9B-2B, with significant quality gains over Gemma 2 2B but similar generation latency. +- Adaptation is not only more compute efficient but also more effective than pretraining from scratch. +- Pretraining objective matters. Models trained with prefix language modeling and knowledge distillation are generally better at generative tasks, while UL2 models have better encoder representations. + +# 2. Related Work + +While the decoder-only architecture has become the de facto standard for LLMs, the debate between encoder-decoder and decoder-only modeling is still not conclusive. Many prior studies proposed different approaches to pretrain strong + +encoder-decoder models, e.g., MASS (Song et al., 2019), T5 (Raffel et al., 2020), mT5 (Xue et al., 2021), byT5 (Xue et al., 2022), BART (Lewis et al., 2020), and OpenBA (Li et al., 2023). Tay et al. (2022) compared different pretraining objectives, highlighting the superiority of UL2 and encoder-decoder modeling. Zhang et al. (2022) systematically examined the scaling behavior of both architectures on machine translation, showing their similarity when adequate objectives are applied. Wang et al. (2022) thoroughly explored different modeling choices and training objectives with a focus on LLM zero-shot generalization. They discovered that encoder-decoder LLMs after instruction tuning achieve the best performance, echoing with our experiments. They also studied adaptation, but it is between different pretraining objectives rather than from decoder-only LLMs to encoder-decoder LLMs. + +Leveraging pretrained models for encoder-decoder modeling has been extensively explored. In the BERT era (Devlin et al., 2019), researchers developed different ways of utilizing it to enhance encoder-decoder performance on downstream tasks, such as machine translation (Zhu et al., 2020; Clinchant et al., 2019; Yang et al., 2020), grammatical error correction (Kaneko et al., 2020), summarization (Liu & Lapata, 2019), and text generation (Chen et al., 2019). Our work follows a similar spirit but is based on pretrained decoder-only LLMs and focuses on developing general-purpose encoder-decoder LLMs. + +Another related direction is the development of inference friendly LLMs. Techniques for improving inference efficiency are many, ranging from quantization (Dettmers & Zettlemoyer, 2023), key-value cache optimization (Corallo & Papotti, 2024), recurrent modeling (Gu & Dao, 2023; Botev et al., 2024), to strong small LLMs with improved pretraining (Abdin et al., 2024; Liu et al., 2024b), to name a few. While these techniques offer significant efficiency gains, their focus is fundamentally distinct and complementary to our proposed encoder-decoder adaptation, i.e., both approaches can be used in conjunction to realize greater overall efficiency. + +# 3. Approach: Encoder-Decoder Adaptation + +# 3.1. Architecture + +Pretraining LLMs is both compute and time intensive. To reduce the amount of training required, we propose to adapt existing decoder-only LLMs to encoder-decoder and leverage pretrained decoder-only checkpoints for initialization, as shown in Figure 1. Due to this, we keep the encoder-decoder architecture as similar as possible to original decoder-only model, only introducing changes when necessary. This results in the following architecture: + +1. Encoder has exactly the same architecture as the decoder-only model, but self-attention is switched from causal to bidirectional. We provide ablations in Section 6 that illustrate the critical effect of bidirectional attention on downstream performance. +2. In each Decoder block, FFN and self-attention parts are identical to the corresponding parts in decoder-only models, and cross-attention has the same number of heads and head dimension as self-attention, but attends to the whole output of the encoder. + +We base our study on Gemma 2 (Team et al., 2024). But note our approach is highly flexible and isn't restricted to specific decoder-only architectures. We can easily apply our method to other model families, such as LLaMA (Dubey et al., 2024), QWen (Yang et al., 2024), and DeepSeek (Liu et al., 2024a). In theory, we can also adapt decoder-only models from different families, such as pairing LLaMA models with QWen models. + +In addition, our approach allows for unbalanced encoder-decoder models, where the decoder is significantly smaller than the encoder. This provides better support for applications where input processing capabilities are more important than generative capacity. For example, for summarization, deep understanding of the input text is often more important than the generation part, as it doesn't need to generate any new information. As a result, generation time is significantly reduced, while providing competitive quality. + +# 3.2. Initialization + +When initializing an encoder-decoder model from a decoder-only checkpoint, we try to map every layer to the most similar weight in the decoder-only checkpoint. In particular, the encoder is fully initialized from the decoder-only checkpoint, as it doesn't introduce any new weights. In the decoder, FFN and self-attention subblocks are initialized from the FFN and self-attention weights from the corresponding layers in the decoder-only checkpoint. + +Cross-attention is initialized from self-attention weights in the balanced setup where encoder and decoder have the + +same configuration. Otherwise, we first initialize crossattention from scratch and then finetune it for the first $K$ steps as a warmup while freezing other model parameters. After $K$ steps, all model parameters are tuned. + +# 3.3. Pretraining Objective + +Decoder-only pretraining often adopts causal language modeling on a single sequence. In contrast, encoder-decoder adaptation requires separate input and target sequences to be fed to the encoder and decoder separately. We explore two classical pretraining objectives for encoder-decoder modeling: prefix language modeling (PrefixLM) and UL2 (Tay et al., 2022; Wang et al., 2022). + +PrefixLM behaves similar to causal language modeling except for its prefix condition. To simplify the preprocessing, we split a sequence equally into two halves, the first half used as input and the second one as target. This also eases the adoption of knowledge distillation from decoder-only models. UL2 is more complicated. It is composed of several denoising tasks at different levels of complexity. We prepare UL2 data following Tay et al. (2022). We compare their performance in experiments. + +# 4. Setup + +Data Setting Our data for pretraining and instruction tuning – including supervised finetuning (SFT) and reinforcement learning from human feedback (RLHF) – follow Gemma 2 (Team et al., 2024). For the adaptation, we preprocess the Gemma 2 pretraining data (8 trillion tokens) with PrefixLM and UL2. Note Gemma 2 pretraining data comes with knowledge distillation. We preserve this information for PrefixLM while adopting ground-truth targets for UL2 as mapping the teacher logits to UL2 is non-trivial. The preprocessed data has an input-output sequence length of 4096-4096 and 8192-8192 for PrefixLM and UL2, respectively. We adapt our models on up to 2 trillion tokens. + +Model Setting We use Gemma 2 (2B and 9B) as the base decoder-only LLM. We also pretrain several smaller models (Small, Base, Large, and XL) following mT5 configurations (Xue et al., 2021) under the Gemma 2 framework, and then adapt them to encoder-decoder LLMs. Detailed model configurations are given in Table 1. + +Evaluation We employ diverse academic evaluation datasets to evaluate different capabilities of LLMs. Concretely, we use the following benchmarks: + +- Pretraining (PT) benchmark: Boolq (Clark et al., 2019), SIQA (Sap et al., 2019), PIQA (Bisk et al., 2020), ARC-c&ARC-e (Clark et al., 2018), MMLU (Hendrycks et al., 2021), MMLU Pro (Wang et al., 2024), Hel + +
#Layersdmodeldfn#heads (q/kv)head#Params
Decoder-OnlyEncoder-Decoder
2B262304184328/42562.0B4.0B (2B-2B)
9B4235842867216/82568.3B16.7B (9B-9B)
S (Small)851210248/86414.7M29.4M (S-S)
B (Base)12768204812/126456.7M113.3M (B-B)
L (Large)241024281616/1664204.6M409.1M (L-L)
XL (Xlarge)242048512032/3264780.3M1.6B (XL-XL)
+ +Table 1: Model configurations. #Layers: number of layers; $d_{model/ffn/head}$ : model/feed-forward/head dimension; #heads ( $q/kv$ ): number of query/value heads. #Params: number of model parameters excluding embeddings. For encoder-decoder models, we show the number of parameters for the balanced architecture, e.g. 2B-2B. The 9B-2B model has 10.4B parameters. “B/M”: billion/million. + +laSwag (Zellers et al., 2019), Winogrande (Sakaguchi et al., 2021), TruthfulQA (Lin et al., 2021), AGIEval (Zhong et al., 2023), BBH (Suzgun et al., 2022), DROP (Dua et al., 2019), GPQA (Rein et al., 2023), GSM8K (Cobbe et al., 2021), HumanEval (Chen et al., 2021), Lambada (Paperno et al., 2016), MATH-500 (Hendrycks et al., 2021), MBPP (Austin et al., 2021), NQ (Kwiatkowski et al., 2019), TriviaQA (Joshi et al., 2017), and WMT23 (Kocmi et al., 2023). We perform zero/few-shot prompting for pretrained LLMs, and report the averaged result as $PT$ score. + +- Instruction-tuning (IT) benchmark: GSM8K, MMLU, MMLU Pro, MBPP, HumanEval, MATH-500, BBH, GPQA (Diamond), WMT23, and MGSM (Shi et al., 2022). We perform zero/few-shot prompting with task-specific instruction for instruction-tuned models, and report the averaged result as IT score. +- SuperGLUE (Wang et al., 2019b): we use this benchmark to examine the learned contextual representation. We stack a task-specific head on the representation of the last token in the encoder (decoder) of the encoder-decoder (decoder-only) LLM, and finetune all parameters on the training set. Learning rate, batch size, and dropout are grid-searched for each task. We reformulate all tasks as classification tasks and report averaged dev-set accuracy over COPA, WIC, WSC, RTE, MultiRC, CB, and Boolq. + +For generative tasks, we always apply greedy sampling. We perform pretraining, SFT, and RLHF based on the Gemma 2 recipe except for the learning rate which we tune empirically for encoder-decoder LLMs. In unbalanced encoder-decoder adaptation, e.g. 9B-2B, we set the cross-attention warmup step $K$ to 1000. + +# 5. Results + +The encoder-decoder adaptation converges rapidly, particularly for balanced architectures. While adaptation + +![](images/e84af8ed438269fcb4b932d2777badf545e4bc6420d932ca29791018c17890d4.jpg) +Figure 2: Pretraining performance as a function of the number of pretrained tokens during the adaptation. + +leverages pretrained parameters for initialization, whether and how this benefits model convergence is still questionable. Figure 2 shows the change of PT performance with respect to the amount of pretrained tokens. Obviously, adaptation is very computationally efficient, converging quickly and achieving similar performance to its decoder-only counterpart after only tens of billions of tokens. Balanced architectures (2B-2B and 9B-9B) converge much faster than the unbalanced ones (9B-2B) since all parameters in the former are initialized from pretrained decoder-only models while the cross-attention in the latter is randomly initialized. + +We also notice that additional pretraining improves balanced models a little on average but substantially benefits some tasks, like GSM8K and DROP. Besides, 9B-2B performance increases consistently during the adaptation, quickly surpassing Gemma 2 2B and moving towards Gemma 2 9B. This demonstrates the feasibility of encoder-decoder adaptation from varying-sized decoder-only LLMs, as well as its ability to utilize the knowledge from pretrained models. + +Pretraining objective matters: UL2 and PrefixLM show different characteristics. Previous study reported the superiority of UL2 over PrefixLM (Tay et al., 2022), but PrefixLM in our study is enhanced with knowledge distillation, which often improves small models significantly. We compare these two objectives for the adaptation in Table 2. + +
PT ScoreIT Score
Gemma 2+ PrefixLM+ UL2Gemma 2+ PrefixLM+ UL2
2B-2B47.949.750.1(39.0)46.4 (46.1)42.4
9B-2B-55.052.9-49.3 (50.6)45.7
9B-9B61.763.163.9(59.6)62.9 (64.5)61.5
S-S23.422.823.16.29.810.7
B-B26.726.926.09.812.911.1
L-L32.331.630.912.917.518.9
XL-XL39.739.538.523.530.729.2
+ +(a) Results on PT and IT benchmarks. + +
PT ModelsIT Models
Gemma 2+ PrefixLM+ UL2Gemma 2+ PrefixLM+ UL2
2B-2B75.588.188.1(86.2)88.3 (87.9)90.5
9B-2B-90.290.7-90.6 (90.3)91.3
9B-9B82.591.491.8(89.8)91.8 (91.4)91.6
S-S67.669.869.667.668.869.4
B-B68.671.271.568.772.373.6
L-L68.478.779.768.878.180.3
XL-XL70.784.485.469.285.787.0
+ +(b) Finetuned performance on SuperGLUE. + +Table 2: Main results on PT, IT, and SuperGLUE benchmarks. "Gemma 2": decoder-only models; "+"PrefixLM/UL2": encoder-decoder models adapted via prefix language modeling (with knowledge distillation)/UL2. We put Gemma 2 results into the corresponding encoder-decoder rows to save space, e.g. 2B-2B for Gemma 2 means Gemma 2 2B. Numbers in parentheses are for RLHFed models. Best results are in bold. Note PT and IT scores are not directly comparable since they are averaged over different tasks. + +We find that PrefixLM and UL2 have their own strengths. Specifically, UL2 delivers stronger contextual representations, outweighing PrefixLM on SuperGLUE across most model scales, resonating with previous findings (Tay et al., 2022). In contrast, PrefixLM produces more powerful generative LLMs thanks to its generation nature and the knowledge distillation. It surpasses UL2 on PT and IT benchmarks in most cases. Particularly, it outperforms UL2 at 9B-2B on both PT and IT by up to 3.6, a significant margin. Since generative LLMs have become the mainstream, we base our following analysis on PrefixLM. We discuss our attempts to combine PrefixLM and UL2 in the next section. + +Encoder-decoder LLMs outperform decoder-only LLMs especially after instruction tuning. Table 2 also shows that the adapted encoder-decoder LLMs achieve comparable or slightly better pretraining performance than their decoder-only counterpart but with substantially improved instruction-tuning performance, echoing with the findings of Wang et al. (2022). For example, the 9B-9B encoder-decoder LLM surpasses Gemma 2 9B by 1.4 and 4.9 on PT and IT, respectively. The performance gap further increases to 1.8 and 7.1 at 2B-2B scale. We notice that the adaption performs slightly worse at scales below 2B on PT, but the improvements on IT are still promising, e.g. 7.2 at XL-XL. + +Regardless of PT or IT models, pretraining objectives, and model scales, encoder-decoder LLMs perform consistently better than decoder-only LLMs on SuperGLUE. This suggests that the contextual representation from encoder-decoder LLMs is often of higher quality, likely due to bidirectional self-attention. + +We need to highlight that the above analysis is based on the overall performance, which may not apply when it comes to a specific downstream task. As shown in Table 3, there are some tasks favoring encoder-decoder models while others favoring decoder-only models especially for PT models. For example, after pretraining, Gemma 2 9B surpasses 9B-9B by 4.1 on ARC-C but underperforms it by 4.4 on Winogrande; while encoder-decoder LLM shows more consistent advantage after instruction tuning, 9B-9B still lags behind Gemma 2 9B by 0.9 on WMT23. This illustrates the complexity when evaluating LLM capability as well as the risk of reaching misleading conclusions when adopting biased evaluation tasks. We reduce such risk by selecting as diverse and broad tasks as possible for evaluation. + +Encoder-decoder LLMs balance quality and inference efficiency more effectively. We next analyze different models from the perspective of inference efficiency which becomes increasingly crucial for model deployment. Figure + +
TaskMetricGemma 2Encoder-Decoder Adaptation
2B9B2B-2B9B-2B9B-9B
MMLU5-shot51.971.146.860.371.3
ARC-C25-shot55.569.152.059.965.0
GSM8K5-shot23.763.241.748.772.8
AGIEval3-5-shot31.553.335.043.653.1
DROP3-shot, F153.371.561.466.975.7
BBH3-shot, CoT40.268.951.951.674.7
Winogrande5-shot65.274.369.568.178.7
HellaSwag10-shot72.981.874.975.781.0
MATH-5004-shot17.233.424.223.637.8
ARC-e0-shot81.088.377.182.985.3
PIQA0-shot78.481.679.078.381.1
SIQA0-shot51.753.650.150.150.5
Boolq0-shot75.577.575.684.685.6
TriviaQA5-shot60.176.651.266.275.2
NQ5-shot30.743.928.437.143.1
HumanEvalpass@119.539.027.433.540.2
MBPP3-shot30.452.037.443.455.6
Average49.364.752.057.366.3
+ +(a) Results for pretrained models. + +
TaskMetricGemma 2Encoder-Decoder Adaptation
2B9B2B-2B9B-2B9B-9B
GSM8K11-shot58.084.370.773.888.6
MMLU5-shot49.871.861.566.776.7
MMLU Pro5-shot27.449.936.643.055.7
MBPP3-shot37.859.244.049.864.8
HumanEvalpass@143.365.947.655.572.0
MATH-5000-shot24.445.828.230.047.2
BBH3-shot44.872.057.557.676.4
GPQA0-shot24.829.927.532.635.7
GPQA Diamond0-shot27.829.826.829.340.4
WMT235-shot, BLEURT65.272.059.965.371.1
MGSM8-shot26.374.946.853.580.7
Average39.059.646.150.664.5
+ +(b) Results for RLHFed models. + +Table 3: Detailed results on different tasks for PT and RLHFed models. We compare Gemma 2 and encoder-decoder models adapted via PrefixLM. Best results are in bold. + +3 shows that balanced encoder-decoder LLMs have similar inference flops to their decoder-only counterparts, e.g. 2B-2B vs. Gemma 2 2B. As such, encoder-decoder models often dominate the quality-inference efficiency frontier across PT, IT, and SuperGLUE benchmarks. + +We acknowledge that inference flops may not correlate well with actual running speed due to factors like inter-device communication, key-value caching, and autoregressive bottleneck. We then provide the latency results measured on GSM8K for 2B and 9B models in Figure 4, which further verified the above analysis. 9B-9B and 2B-2B show similar latency to Gemma 2 9B and 2B, respectively, but clearly better performance. In particular, 9B-2B, the one pairing large + +encoder and small decoder, shows similar latency to Gemma 2 2B but significantly better performance than 2B-2B. + +Together, these confirm that encoder-decoder adaptation indeed provides a more flexible way for balancing between quality and inference speed. + +# 6. Discussion + +Is the improvement after the adaptation simply due to the extra pretraining compute? Not really. We also tried to apply more pretraining compute to Gemma 2 2B by going through another 6 trillion tokens, which leads to a PT score of 48.57, still significantly below the encoder- + +![](images/5bc3292c9075390db3ad037b56681680df820b5ed6361ee93024ceb18506f6da.jpg) +Figure 3: Comparisons of decoder-only LLMs with adapted encoder-decoder models under inference flops. We show PT, IT, and SuperGLUE performance. Inference flops is estimated with a sequence length of 4096-4096 and 8192 for encoder-decoder and decoder-only LLMs, respectively. Note the upper left corner marks the quality-efficiency frontier. + +![](images/51bbcc46d8fda806b753bf35c24c7ec6a4f852660553929aa38199e5069a40d7.jpg) + +![](images/d2f2cfdf0577b291f7b73a7bf7b879e70cfaae71e626c66933421a10b6d03157.jpg) + +![](images/b62435503ae00d84a7a4eecfced3a619d2992128db4f0dea34e324725f8e2c26.jpg) +Figure 4: GSM8K performance as a function of latency for RL-HFed models. Latency is estimated as milliseconds (ms) per query by answering 200 reasoning questions from GSM8K. Batch size of 1 is used. + +decoder adaptation, 49.7. This indicates that the additional pretraining compute can't fully explain the improvements from the adaptation and we argue that the inductive bias of encoder-decoder modeling plays a crucial role. + +Does cross-attention warmup matter for unbalanced encoder-decoder? Yes. Our preliminary experiments with 9B-2B and UL2 on 800B tokens show that the pretraining performance over Boolq and GSM8K reduces from 62.5 to 61.8 without the warmup. Besides, increasing warmup steps from 1K to 5K further reduces performance to 60.2. An adequate amount of warmup optimization is required to reach the optimal performance. + +Can we switch from grouped-query attention to multi-head self attention for the encoder? Yes but with mixed results. Gemma 2 adopts grouped-query attention (GQA) to improve its decoding efficiency. However, unlike the decoder, the encoder can be fully parallelized during inference, making the use of multi-head attention (MHA) reasonable. We tried to expand GQA in Gemma 2 2B to MHA by replicating head parameters for the encoder self-attention. Under + +
AdaptationScratch
PTITSGPTITSG
S-S22.89.868.824.09.970.5
B-B26.912.972.328.111.875.5
L-L31.617.578.130.917.178.5
XL-XL39.530.785.737.728.879.5
2B-2B49.746.488.347.143.984.5
+ +Table 4: Results for encoder-decoder models adapted with PrefixLM (Adaptation) and pretrained from scratch (Scratch). SG: SuperGLUE score for SFTed models. + +PrefixLM, this improves PT performance to 50.2 by 0.5 at 2B-2B but reduces IT performance to 43.5 by 2.9. We thus still stick to GQA when adapting Gemma 2 2B and 9B for the encoder. + +Does bidirectional self-attention matter for the encoder? Yes. A crucial difference between encoder-decoder and decoder-only LLMs is the use of bidirectional self-attention. We also tested keeping the encoder self-attention causal at 2B-2B, which achieves a PT and IT score of 45.6 and 41.7, lagging behind its bidirectional counterpart substantially by 4.1 and 4.7, respectively. Note, the causal 2B-2B model surpasses Gemma 2 2B on IT by 2.7, although it performs worse on PT. This suggests that bidirectional self-attention contributes greatly to the success of our adaptation, but is not the only factor. + +Would pretraining encoder-decoder LLMs from scratch yield better performance? Not really. Pretraining from scratch is a common method for developing new LLMs. We also pretrained encoder-decoder LLMs from scratch on 8 trillion tokens with PrefixLM. Table 4 summarizes the results. Despite using more pretraining tokens, encoder-decoder LLMs pretrained from scratch only perform better at small scales, such as S-S and B-B, beyond which adaptation shows clear superiority. As such, adaptation is a more computationally efficient way of developing powerful + +![](images/dbcb0e6cce13b037f4c0883ed286cbdc0d95b29ded518c718b721cfc2f428ce9.jpg) +Figure 5: Quality change for the two-stage optimization. "UL2-then-PrefixLM": switch the training objective from UL2 to PrefixLM for the final $10\%$ tokens; "PrefixLM-then-UL2": similar but from PrefixLM to UL2. + +![](images/5ec6336377cc82d82a620bc9b2385a17178a9693bcd557a46c65604ff7d76ed2.jpg) + +![](images/bb18af8918cb67a1778002f0f629d0c1e7c9645672e4e9d7249a3689aab6eb52.jpg) + +![](images/094aec7e87b8d447722242304ed5483546e0e65034b026f8a17558ab2195a3f1.jpg) + +![](images/4de0f4c3868f0d61d6237f19aeceac00a345c0d159226ff2806bd0f3aed4d81f.jpg) +Figure 6: Correlation analysis between PT performance and its corresponding IT/SuperGLUE performance. + +encoder-decoder LLMs. + +Is IT/SuperGLUE score predicable from PT score? Mixed. A general assumption in LLM development is that PT performance can be used as an indicator for downstream applications. We summarize all our ablations and put them in Figure 6. Over all data points and across all model sizes, the correlation is pretty strong: a Spearman's $\rho$ of 0.97 and 0.89 for IT vs. PT and SuperGLUE vs. PT, respectively. However, when considering data points within each model size separately, the averaged Spearman's $\rho$ reduces to 0.42 and 0.05, respectively and is not significant anymore. + +In practice, we also noticed that PT checkpoints with weaker performance sometimes yield significantly better IT or Su + +perGLUE performance. When selecting PT checkpoints for a specific model size, it's better to also examine their IT performance apart from PT results to avoid some biases or overfitting. + +Can we get the best of both worlds from PrefixLM and UL2? This is non-trivial. Our first attempt is to merge checkpoints trained from PrefixLM and UL2 with uniform weighting. Unfortunately, the merged model results in either similar or much worse performance. We argue that PrefixLM and UL2 lead to different training dynamics and converge to very different local minima. Directly merging their weights doesn't work right out of the box. + +We next explore a two-stage optimization, where we first adapt with PrefixLM and then shift to UL2 for the last $10\%$ of training, and vice versa. Figure 5 shows very mixed results. Switching from PrefixLM to UL2 generally hurts performance. In contrast, switching from UL2 to PrefixLM improves IT performance, but suffers from reduction in PT and SuperGLUE performance. + +Another direction is to jointly optimize the model on PrefixLM and UL2, which we leave for future work. + +# 7. Conclusion and Future Work + +In this paper, we presented methods for building powerful, general purpose encoder-decoder LLMs by adapting from pretrained decoder-only LLMs. Such adaptation offers high flexibility in leveraging different types/families of pretrained decoder-only models as well as combining different-sized models. Through extensive experiments based on Gemma 2, we demonstrated the feasibility and effectiveness of the adaptation: the adapted encoder-decoder LLMs outperform their decoder-only counterparts substantially after instruction tuning, dominating the quality-inference efficiency frontier. Besides, encoder-decoder LLMs also provide better contextual representations as evaluated on SuperGLUE. + +We hope our findings inspire more researchers from + +academia and industry to revisit the encoder-decoder paradigm for LLM development. To facilitate the research, we will release the code and checkpoints at XXX (coming soon). + +Our work still suffers from several limitations. Particularly, we only experimented with Gemma 2 models up to 9B, although the proposed approach could apply to other LLM families. In the future, we are interested in scaling the model size (e.g., to 27B), exploring other LLMs (such as LLaMA), examining more unbalanced setups, and testing the combination of dense and MoE LLMs. As mentioned above, we will also investigate better ways to leverage PrefixLM, knowledge distillation, and UL2. Extending our adapted encoder-decoder LLM to cross/multi-modality modeling (e.g., vision-language and speech-language) would be another intriguing direction. + +# Acknowledgements + +We'd like to thank Enrique Alfonseca, Tris Warkentin, Xiaodan Song, Sugato Basu, Inderjit Dhillon, Alexander Grushetsky, Pandu Nayak, Ramakrishnan Srikant, and Slav Petrov for their constructive feedback on the manuscript. We are grateful to Srinivasan Venkatachary for supporting this project. + +# References + +Abdin, M., Aneja, J., Awadalla, H., Awadallah, A., Awan, A. A., Bach, N., Bahree, A., Bakhtiari, A., Bao, J., Behl, H., et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024. +Austin, J., Odena, A., Nye, M., Bosma, M., Michalewski, H., Dohan, D., Jiang, E., Cai, C., Terry, M., Le, Q., et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021. +Bisk, Y., Zellers, R., Gao, J., Choi, Y., et al. Piqa: Reasoning about physical commonsense in natural language. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pp. 7432-7439, 2020. +Botev, A., De, S., Smith, S. L., Fernando, A., Muraru, G.-C., Haroun, R., Berrada, L., Pascanu, R., Sessa, P. G., Dadashi, R., et al. Recurrentgemma: Moving past transformers for efficient open language models. arXiv preprint arXiv:2404.07839, 2024. +Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems, 33: 1877-1901, 2020. + +Chen, M., Tworek, J., Jun, H., Yuan, Q., Pinto, H. P. D. O., Kaplan, J., Edwards, H., Burda, Y., Joseph, N., Brockman, G., et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021. +Chen, Y.-C., Gan, Z., Cheng, Y., Liu, J., and Liu, J. Distilling knowledge learned in bert for text generation. arXiv preprint arXiv:1911.03829, 2019. +Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. Boolq: Exploring the surprising difficulty of natural yes/no questions. In NAACL, 2019. +Clark, P., Cowhey, I., Etzioni, O., Khot, T., Sabharwal, A., Schoenick, C., and Tafjord, O. Think you have solved question answering? try arc, the ai2 reasoning challenge. arXiv:1803.05457v1, 2018. +Clinchant, S., Jung, K. W., and Nikoulina, V. On the use of BERT for neural machine translation. In Birch, A., Finch, A., Hayashi, H., Konstas, I., Luong, T., Neubig, G., Oda, Y., and Sudoh, K. (eds.), Proceedings of the 3rd Workshop on Neural Generation and Translation, pp. 108-117, Hong Kong, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-5611. URL https://aclanthology.org/D19-5611/. +Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +Corallo, G. and Papotti, P. FINCH: Prompt-guided key-value cache compression for large language models. Transactions of the Association for Computational Linguistics, 12:1517-1532, 2024. doi: 10.1162/tacl_a_00716. URL https://aclanthology.org/2024.tacl-1.83/. +Dettmers, T. and Zettlemoyer, L. The case for 4-bit precision: k-bit inference scaling laws. In International Conference on Machine Learning, pp. 7750-7774. PMLR, 2023. +Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. BERT: Pre-training of deep bidirectional transformers for language understanding. In Burstein, J., Doran, C., and Solorio, T. (eds.), Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1423. URL https://aclanthology.org/N19-1423. + +Dua, D., Wang, Y., Dasigi, P., Stanovsky, G., Singh, S., and Gardner, M. Drop: A reading comprehension benchmark requiring discrete reasoning over paragraphs. arXiv preprint arXiv:1903.00161, 2019. +Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. +Gemini, T., Reid, M., Savinov, N., Teplyashin, D., Lepikhin, D., Lillicrap, T., Alayrac, J.-b., Soricut, R., Lazaridou, A., First, O., Schrittwieser, J., et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. +Gu, A. and Dao, T. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752, 2023. +Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. Measuring massive multi-task language understanding. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=d7KBjmI3GmQ. +Hinton, G., Vinyals, O., and Dean, J. Distilling the knowledge in a neural network, 2015. +Jiang, A. Q., Sablayrolles, A., Roux, A., Mensch, A., Savary, B., Bamford, C., Chaplot, D. S., Casas, D. d. l., Hanna, E. B., Bressand, F., et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024. +Joshi, M., Choi, E., Weld, D., and Zettlemoyer, L. triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension. arXiv e-prints, art. arXiv:1705.03551, 2017. +Kaneko, M., Mita, M., Kiyono, S., Suzuki, J., and Inui, K. Encoder-decoder models can benefit from pre-trained masked language models in grammatical error correction. arXiv preprint arXiv:2005.00987, 2020. +Kasai, J., Pappas, N., Peng, H., Cross, J., and Smith, N. A. Deep encoder, shallow decoder: Reevaluating non-autoregressive machine translation. arXiv preprint arXiv:2006.10369, 2020. +Kocmi, T., Avramidis, E., Bawden, R., Bojar, O., Dvorkovich, A., Federmann, C., Fishel, M., Freitag, M., Gowda, T., Grundkiewicz, R., Haddow, B., Koehn, P., Marie, B., Monz, C., Morishita, M., Murray, K., Nagata, M., Nakazawa, T., Popel, M., Popovic, M., and Shmatova, M. Findings of the 2023 conference on machine translation (WMT23): LLMs are here but not quite there yet. In Koehn, P., Haddow, B., Kocmi, T., and Monz, C. (eds.), Proceedings of the Eighth Conference on Machine + +Translation, pp. 1-42, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.wmt-1.1. URL https://aclanthology.org/2023.wmt-1.1/. +Kwiatkowski, T., Palomaki, J., Redfield, O., Collins, M., Parikh, A., Alberti, C., Epstein, D., Polosukhin, I., Devlin, J., Lee, K., Toutanova, K., Jones, L., Kelley, M., Chang, M.-W., Dai, A. M., Uszkoreit, J., Le, Q., and Petrov, S. Natural questions: A benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:452-466, 2019. doi: 10.1162/tacl_a_00276. URL https://aclanthology.org/Q19-1026/. +Lewis, M., Liu, Y., Goyal, N., Ghazvininejad, M., Mohamed, A., Levy, O., Stoyanov, V., and Zettlemoyer, L. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7871-7880, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.703. URL https://aclanthology.org/2020.acl-main.703. +Li, J., Tang, Z., Ding, Y., Wang, P., Guo, P., You, W., Qiao, D., Chen, W., Fu, G., Zhu, Q., et al. Openba: An open-sourced 15b bilingual asymmetric seq2seq model pretrained from scratch. arXiv preprint arXiv:2309.10706, 2023. +Lin, S., Hilton, J., and Evans, O. Truthfulqa: Measuring how models mimic human falsehoods. arXiv preprint arXiv:2109.07958, 2021. +Liu, A., Feng, B., Xue, B., Wang, B., Wu, B., Lu, C., Zhao, C., Deng, C., Zhang, C., Ruan, C., et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a. +Liu, Y. and Lapata, M. Text summarization with pretrained encoders. arXiv preprint arXiv:1908.08345, 2019. +Liu, Z., Zhao, C., Iandola, F., Lai, C., Tian, Y., Fedorov, I., Xiong, Y., Chang, E., Shi, Y., Krishnamoorthi, R., et al. Mobilellm: Optimizing sub-billion parameter language models for on-device use cases. arXiv preprint arXiv:2402.14905, 2024b. +Paperno, D., Kruszewski, G., Lazaridou, A., Pham, Q. N., Bernardi, R., Pezzelle, S., Baroni, M., Boleda, G., and Fernandez, R. The lambada dataset: Word prediction requiring a broad discourse context. arXiv preprint arXiv:1606.06031, 2016. + +Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., Zhou, Y., Li, W., and Liu, P. J. Exploring the limits of transfer learning with a unified text-to-text transformer. 21(1), jan 2020. ISSN 1532-4435. +Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J., and Bowman, S. R. Gpqa: A graduate-level google-proof q&a benchmark. arXiv preprint arXiv:2311.12022, 2023. +Sakaguchi, K., Bras, R. L., Bhagavatula, C., and Choi, Y. Winogrande: An adversarial winograd schema challenge at scale. Communications of the ACM, 64(9):99-106, 2021. +Sap, M., Rashkin, H., Chen, D., LeBras, R., and Choi, Y. Socialiaq: Commonsense reasoning about social interactions. arXiv preprint arXiv:1904.09728, 2019. +Shi, F., Suzgun, M., Freitag, M., Wang, X., Srivats, S., Vosoughi, S., Chung, H. W., Tay, Y., Ruder, S., Zhou, D., et al. Language models are multilingual chain-of-thought reasoners. arXiv preprint arXiv:2210.03057, 2022. +Song, K., Tan, X., Qin, T., Lu, J., and Liu, T.-Y. Mass: Masked sequence to sequence pre-training for language generation, 2019. +Suzgun, M., Scales, N., Schärli, N., Gehrmann, S., Tay, Y., Chung, H. W., Chowdhery, A., Le, Q. V., Chi, E. H., Zhou, D., et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022. +Tay, Y., Dehghani, M., Tran, V. Q., Garcia, X., Wei, J., Wang, X., Chung, H. W., Bahri, D., Schuster, T., Zheng, S., et al. Ul2: Unifying language learning paradigms. In The Eleventh International Conference on Learning Representations, 2022. +Team, G., Riviere, M., Pathak, S., Sessa, P. G., Hardin, C., Bhupatiraju, S., Hussenot, L., Mesnard, T., Shahriari, B., Ramé, A., et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118, 2024. +Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L. u., and Polosukhin, I. Attention is all you need. In Guyon, I., Luxburg, U. V., Bengio, S., Wallach, H., Fergus, R., Vishwanathan, S., and Garnett, R. (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. URL https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf. + +Wang, A., Pruksachatkun, Y., Nangia, N., Singh, A., Michael, J., Hill, F., Levy, O., and Bowman, S. Superglue: A stickier benchmark for general-purpose language understanding systems. Advances in neural information processing systems, 32, 2019a. +Wang, A., Pruksachatkun, Y., Nangia, N., Singh, A., Michael, J., Hill, F., Levy, O., and Bowman, S. R. SuperGLUE: a stickier benchmark for general-purpose language understanding systems. Curran Associates Inc., Red Hook, NY, USA, 2019b. +Wang, T., Roberts, A., Hesslow, D., Scao, T. L., Chung, H. W., Beltagy, I., Launay, J., and Raffel, C. What language model architecture and pretraining objective works best for zero-shot generalization? In Chaudhuri, K., Jegelka, S., Song, L., Szepesvari, C., Niu, G., and Sabato, S. (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 22964-22984. PMLR, 17-23 Jul 2022. URL https://proceedings.mlr.press/v162/wang22u.html. +Wang, Y., Ma, X., Zhang, G., Ni, Y., Chandra, A., Guo, S., Ren, W., Arulraj, A., He, X., Jiang, Z., et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. arXiv preprint arXiv:2406.01574, 2024. +Xue, L., Constant, N., Roberts, A., Kale, M., Al-Rfou, R., Siddhant, A., Barua, A., and Raffel, C. mT5: A massively multilingual pre-trained text-to-text transformer. In Toutanova, K., Rumshisky, A., Zettlemoyer, L., Hakkani-Tur, D., Beltagy, I., Bethard, S., Cotterell, R., Chakraborty, T., and Zhou, Y. (eds.), Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 483-498, Online, June 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.naacl-main.41. URL https://aclanthology.org/2021.naacl-main.41. +Xue, L., Barua, A., Constant, N., Al-Rfou, R., Narang, S., Kale, M., Roberts, A., and Raffel, C. ByT5: Towards a token-free future with pre-trained byte-to-byte models. Transactions of the Association for Computational Linguistics, 10:291-306, 2022. doi: 10.1162/tacl_a_00461. URL https://aclanthology.org/2022.tacl-1.17. +Yang, A., Yang, B., Zhang, B., Hui, B., Zheng, B., Yu, B., Li, C., Liu, D., Huang, F., Wei, H., et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024. +Yang, J., Wang, M., Zhou, H., Zhao, C., Zhang, W., Yu, Y., and Li, L. Towards making the most of bert + +in neural machine translation. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pp. 9378-9385, 2020. +Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. Hellaswag: Can a machine really finish your sentence? In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, 2019. +Zhang, B., Ghorbani, B., Bapna, A., Cheng, Y., Garcia, X., Shen, J., and First, O. Examining scaling and transfer of language model architectures for machine translation. In Chaudhuri, K., Jegelka, S., Song, L., Szepesvari, C., Niu, G., and Sabato, S. (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 26176-26192. PMLR, 17-23 Jul 2022. URL https://proceedings.mlrpress/v162/zhang22h.html. +Zhong, W., Cui, R., Guo, Y., Liang, Y., Lu, S., Wang, Y., Saied, A., Chen, W., and Duan, N. Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364, 2023. +Zhu, J., Xia, Y., Wu, L., He, D., Qin, T., Zhou, W., Li, H., and Liu, T.-Y. Incorporating bert into neural machine translation. arXiv preprint arXiv:2002.06823, 2020. \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06225/images/094aec7e87b8d447722242304ed5483546e0e65034b026f8a17558ab2195a3f1.jpg b/data/2025/2504_06xxx/2504.06225/images/094aec7e87b8d447722242304ed5483546e0e65034b026f8a17558ab2195a3f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b3bd0b2f19ae8f3a2cae67452af50f1a4a137b1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/094aec7e87b8d447722242304ed5483546e0e65034b026f8a17558ab2195a3f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:569b1c861649ea56de712ce0de9c4c8cd9a1a6f792baa919fcabbe0d7c148c80 +size 20308 diff --git a/data/2025/2504_06xxx/2504.06225/images/117636db4067cddc308df04a48879f38f22774c43820412bd49fab3eac458ef7.jpg b/data/2025/2504_06xxx/2504.06225/images/117636db4067cddc308df04a48879f38f22774c43820412bd49fab3eac458ef7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d78e1579f31b63268b28a069f860d60718c08c63 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/117636db4067cddc308df04a48879f38f22774c43820412bd49fab3eac458ef7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:735623b956ceddf0e94629cfa5dfcb5199337371924731f90e6d30db0bd64849 +size 33231 diff --git a/data/2025/2504_06xxx/2504.06225/images/35058235b3a8e97de81c56604e7aa53eee71b1af57cda67fbad7585e55cb8bd2.jpg b/data/2025/2504_06xxx/2504.06225/images/35058235b3a8e97de81c56604e7aa53eee71b1af57cda67fbad7585e55cb8bd2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b3972a2a376e17c2b06944c51496ba915cf11cd --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/35058235b3a8e97de81c56604e7aa53eee71b1af57cda67fbad7585e55cb8bd2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1c15b12d678e3f73105712e50f9a33ab395ca789a3ce4df105c65e0aa892bc3 +size 27604 diff --git a/data/2025/2504_06xxx/2504.06225/images/3f839176a76f6ca49b21f2fe1d9927f962024d2444b345aaeefe51e43261e557.jpg b/data/2025/2504_06xxx/2504.06225/images/3f839176a76f6ca49b21f2fe1d9927f962024d2444b345aaeefe51e43261e557.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5c4e236239d2ef4d1452d5cc2d0cc0acc2c65ee --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/3f839176a76f6ca49b21f2fe1d9927f962024d2444b345aaeefe51e43261e557.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:240b40b933d879abca2718f4cbebbadcf3df2af69da8e1aacbe1fcddaa42fb9b +size 94420 diff --git a/data/2025/2504_06xxx/2504.06225/images/416bbeebb01547253fc7b5fc27c306c2b693d521b32756f8c4ea25bef1cb7ed1.jpg b/data/2025/2504_06xxx/2504.06225/images/416bbeebb01547253fc7b5fc27c306c2b693d521b32756f8c4ea25bef1cb7ed1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4848f3284abc5e8f8daad8e4dcafc963d5667bfc --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/416bbeebb01547253fc7b5fc27c306c2b693d521b32756f8c4ea25bef1cb7ed1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26bda14c5bec261333da11ef954a872e8e5e91963614f6eb8d6e9e8b8c377e88 +size 54452 diff --git a/data/2025/2504_06xxx/2504.06225/images/4bbb3f398b8b1bb36540721f23580fdcd02028eff36e3066fff913714b0c47e3.jpg b/data/2025/2504_06xxx/2504.06225/images/4bbb3f398b8b1bb36540721f23580fdcd02028eff36e3066fff913714b0c47e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b134169d6cd39fa2e9581239f73293b406c998aa --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/4bbb3f398b8b1bb36540721f23580fdcd02028eff36e3066fff913714b0c47e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77f47f46aab084e6e3c3b1944d884dcb27cb542c1d8ed937a5af6b6a2a62546d +size 44528 diff --git a/data/2025/2504_06xxx/2504.06225/images/4de0f4c3868f0d61d6237f19aeceac00a345c0d159226ff2806bd0f3aed4d81f.jpg b/data/2025/2504_06xxx/2504.06225/images/4de0f4c3868f0d61d6237f19aeceac00a345c0d159226ff2806bd0f3aed4d81f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f68c208653bc6bcd2b8dfbf27bf7311f8dfa5aeb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/4de0f4c3868f0d61d6237f19aeceac00a345c0d159226ff2806bd0f3aed4d81f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfc94590d7a9eb64e4c6f8996c79a84594e602817c7f0c106842ec95f16c3168 +size 21831 diff --git a/data/2025/2504_06xxx/2504.06225/images/51bbcc46d8fda806b753bf35c24c7ec6a4f852660553929aa38199e5069a40d7.jpg b/data/2025/2504_06xxx/2504.06225/images/51bbcc46d8fda806b753bf35c24c7ec6a4f852660553929aa38199e5069a40d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c09723bbb9f0f766bc25a41fee88b8c880db18d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/51bbcc46d8fda806b753bf35c24c7ec6a4f852660553929aa38199e5069a40d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06574c96658c905149f3c61101f1bc254f264eeff7ac2a5c54ca21bdaa43421b +size 17811 diff --git a/data/2025/2504_06xxx/2504.06225/images/5bc3292c9075390db3ad037b56681680df820b5ed6361ee93024ceb18506f6da.jpg b/data/2025/2504_06xxx/2504.06225/images/5bc3292c9075390db3ad037b56681680df820b5ed6361ee93024ceb18506f6da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98439b20d9fff44efafeb1bc050fa1ccb40c8952 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/5bc3292c9075390db3ad037b56681680df820b5ed6361ee93024ceb18506f6da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dbe034ea1731d07998fed0733e30fc5ae0bd4a34cf45a8fab2aa8d743456ec6 +size 15469 diff --git a/data/2025/2504_06xxx/2504.06225/images/5ec6336377cc82d82a620bc9b2385a17178a9693bcd557a46c65604ff7d76ed2.jpg b/data/2025/2504_06xxx/2504.06225/images/5ec6336377cc82d82a620bc9b2385a17178a9693bcd557a46c65604ff7d76ed2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bb73a3b692c6afd55cad3c7fbd98ae7df7f33c7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/5ec6336377cc82d82a620bc9b2385a17178a9693bcd557a46c65604ff7d76ed2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce843829fc274738186e7b94d5094a112c2787bfd0a1e15f08633eb51f7b15a7 +size 19960 diff --git a/data/2025/2504_06xxx/2504.06225/images/b62435503ae00d84a7a4eecfced3a619d2992128db4f0dea34e324725f8e2c26.jpg b/data/2025/2504_06xxx/2504.06225/images/b62435503ae00d84a7a4eecfced3a619d2992128db4f0dea34e324725f8e2c26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f53e9add98b8060f59fe419ae529626fe2d99876 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/b62435503ae00d84a7a4eecfced3a619d2992128db4f0dea34e324725f8e2c26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20aa9a8251f451623d459ce3e7356dde7c62eed6cc44e81130eac7d220fc6992 +size 18112 diff --git a/data/2025/2504_06xxx/2504.06225/images/bb18af8918cb67a1778002f0f629d0c1e7c9645672e4e9d7249a3689aab6eb52.jpg b/data/2025/2504_06xxx/2504.06225/images/bb18af8918cb67a1778002f0f629d0c1e7c9645672e4e9d7249a3689aab6eb52.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c19a8cce458da23cb6bc5e47ef0480e4e60f621f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/bb18af8918cb67a1778002f0f629d0c1e7c9645672e4e9d7249a3689aab6eb52.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a1e05082c809d6d9a0f0d164b929437b04660d7bbfe6a96be363113bf8f867e +size 20800 diff --git a/data/2025/2504_06xxx/2504.06225/images/c5f30f5d5027dab8614556e9555e0a41a7cbd800c71bc65594505349fcf4bb25.jpg b/data/2025/2504_06xxx/2504.06225/images/c5f30f5d5027dab8614556e9555e0a41a7cbd800c71bc65594505349fcf4bb25.jpg new file mode 100644 index 0000000000000000000000000000000000000000..337244eb28e999695fbb02c69ad4820a8243ab45 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/c5f30f5d5027dab8614556e9555e0a41a7cbd800c71bc65594505349fcf4bb25.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:064fa3b3b0fb066d9d701a1cf25ad66fff68c022f68b2525f1bccde0ad1cfbb5 +size 72644 diff --git a/data/2025/2504_06xxx/2504.06225/images/d2f2cfdf0577b291f7b73a7bf7b879e70cfaae71e626c66933421a10b6d03157.jpg b/data/2025/2504_06xxx/2504.06225/images/d2f2cfdf0577b291f7b73a7bf7b879e70cfaae71e626c66933421a10b6d03157.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c6b9065d6fefd6e946bb7e3149616a4fdcfae56 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/d2f2cfdf0577b291f7b73a7bf7b879e70cfaae71e626c66933421a10b6d03157.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19dfe13826d9bc5b69a358065fd7aaf85653efe12e801d7b0be1ca3b7b63cd15 +size 17515 diff --git a/data/2025/2504_06xxx/2504.06225/images/dbcb0e6cce13b037f4c0883ed286cbdc0d95b29ded518c718b721cfc2f428ce9.jpg b/data/2025/2504_06xxx/2504.06225/images/dbcb0e6cce13b037f4c0883ed286cbdc0d95b29ded518c718b721cfc2f428ce9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34565b4a9d4e249f902a05842b89c56debbe5ec8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/dbcb0e6cce13b037f4c0883ed286cbdc0d95b29ded518c718b721cfc2f428ce9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ca0470f433eb6a9dd04830460163523c626a517c6a8746b361f84a009af090f +size 20290 diff --git a/data/2025/2504_06xxx/2504.06225/images/e342687ba52ca17516eaef964d4960ad15f8aa80f31daad7aea4c345f96fb85e.jpg b/data/2025/2504_06xxx/2504.06225/images/e342687ba52ca17516eaef964d4960ad15f8aa80f31daad7aea4c345f96fb85e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8610040b8c30d7ae3cc5bf8cdd65ef3233abf525 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/e342687ba52ca17516eaef964d4960ad15f8aa80f31daad7aea4c345f96fb85e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4f6bab306a6063dda77c97a8c8461f89508ae2bb73d28ee2fbf6a39d29d3303 +size 47354 diff --git a/data/2025/2504_06xxx/2504.06225/images/e84af8ed438269fcb4b932d2777badf545e4bc6420d932ca29791018c17890d4.jpg b/data/2025/2504_06xxx/2504.06225/images/e84af8ed438269fcb4b932d2777badf545e4bc6420d932ca29791018c17890d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ac9c048783e002f51bd4f8f9c4189611e3c6933 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/images/e84af8ed438269fcb4b932d2777badf545e4bc6420d932ca29791018c17890d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff5f46c5fad0aa5c2463a19c4245312da666ff9c48aed162d183f574a6035289 +size 22739 diff --git a/data/2025/2504_06xxx/2504.06225/layout.json b/data/2025/2504_06xxx/2504.06225/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e023ad2c139f577d1d90c26fbe02635e38faa50d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06225/layout.json @@ -0,0 +1,7097 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 67, + 87, + 529, + 124 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 87, + 529, + 124 + ], + "spans": [ + { + "bbox": [ + 67, + 87, + 529, + 124 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 58, + 157, + 538, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 157, + 538, + 184 + ], + "spans": [ + { + "bbox": [ + 58, + 157, + 538, + 184 + ], + "type": "text", + "content": "Biao Zhang* Fedor Moiseev* Joshua Ainslie* Paul Suganthan* Min Ma* Surya Bhupatiraju Fede Lebron Orhan Firat Armand Joulin Zhe Dong*" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 148, + 204, + 196, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 204, + 196, + 217 + ], + "spans": [ + { + "bbox": [ + 148, + 204, + 196, + 217 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 220, + 272, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 220, + 272, + 602 + ], + "spans": [ + { + "bbox": [ + 72, + 220, + 272, + 602 + ], + "type": "text", + "content": "While decoder-only large language models (LLMs) have shown impressive results, encoder-decoder models are still widely adopted in real-world applications for their inference efficiency and richer encoder representation. In this paper, we study a novel problem: adapting pretrained decoder-only LLMs to encoder-decoder, with the goal of leveraging the strengths of both approaches to achieve a more favorable quality-efficiency trade-off. We argue that adaptation not only enables inheriting the capability of decoder-only LLMs but also reduces the demand for computation compared to pretraining from scratch. We rigorously explore different pretraining objectives and parameter initialization/optimization techniques. Through extensive experiments based on Gemma 2 (2B and 9B) and a suite of newly pretrained mT5-sized models (up to 1.6B), we demonstrate the effectiveness of adaptation and the advantage of encoder-decoder LLMs. Under similar inference budget, encoder-decoder LLMs achieve comparable (often better) pretraining performance but substantially better finetuning performance than their decoder-only counterpart. For example, Gemma 2B-2B outperforms Gemma 2B by " + }, + { + "bbox": [ + 72, + 220, + 272, + 602 + ], + "type": "inline_equation", + "content": "\\sim 7\\%" + }, + { + "bbox": [ + 72, + 220, + 272, + 602 + ], + "type": "text", + "content": " after instruction tuning. Encoder-decoder adaptation also allows for flexible combination of different-sized models, where Gemma 9B-2B significantly surpasses Gemma 2B-2B by " + }, + { + "bbox": [ + 72, + 220, + 272, + 602 + ], + "type": "inline_equation", + "content": ">3\\%" + }, + { + "bbox": [ + 72, + 220, + 272, + 602 + ], + "type": "text", + "content": ". The adapted encoder representation also yields better results on SuperGLUE. We will release our checkpoints to facilitate future research." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 624, + 133, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 624, + 133, + 636 + ], + "spans": [ + { + "bbox": [ + 53, + 624, + 133, + 636 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 644, + 291, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 644, + 291, + 669 + ], + "spans": [ + { + "bbox": [ + 52, + 644, + 291, + 669 + ], + "type": "text", + "content": "Neural network architectures are often designed to incorporate certain assumptions or inductive biases regarding the" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 675, + 292, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 675, + 292, + 698 + ], + "spans": [ + { + "bbox": [ + 52, + 675, + 292, + 698 + ], + "type": "text", + "content": "* Core Contributor. Google. Correspondence to: Biao Zhang , Zhe Dong ." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 303, + 205, + 543, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 205, + 543, + 410 + ], + "spans": [ + { + "bbox": [ + 303, + 205, + 543, + 410 + ], + "type": "text", + "content": "input data, leading to either improved model performance or better computational efficiency, if not both. Unlike the popular decoder-only architecture used for large language model (LLM) (Brown et al., 2020), the encoder-decoder architecture adopts separate modeling modules – an encoder for input understanding and a decoder for output generation (Vaswani et al., 2017). This separation decouples parameters for different functionalities and thus enjoys higher freedom in handling contextual representation and challenging tasks (Tay et al., 2022; Wang et al., 2022). It also offers high flexibility in changing the encoder and decoder size (e.g., a large encoder paired with a small decoder) to control the quality-efficiency trade-off (Kasai et al., 2020; Zhang et al., 2022), an increasingly important aspect for LLM deployment (Gemini et al., 2024). Despite these benefits, however, the study on encoder-decoder LLMs receive little to no attention nowadays." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 415, + 544, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 415, + 544, + 619 + ], + "spans": [ + { + "bbox": [ + 303, + 415, + 544, + 619 + ], + "type": "text", + "content": "In this paper, we revisit this classical architecture by exploring the following question: can we get strong(er) encoder-decoder LLMs by adapting from existing pretrained decoder-only LLMs? We consider the adaptation more significantly than pretraining new models from scratch since pretraining is resource-intensive and powerful decoder-only models at different sizes are already widely available (Dubey et al., 2024; Team et al., 2024; Liu et al., 2024a; Yang et al., 2024; Jiang et al., 2024). Our hypothesis is that, by reusing parameters from decoder-only models, we can accelerate training and effectively transfer their internal knowledge to encoder-decoder, preserving (even enhancing) their capabilities. Note adaptation also allows for pairing varying-sized decoder-only models to achieve specific quality-efficiency considerations. Yet, the optimal method for such adaptation and the extent to which performance can be improved remain open questions, which we aim to address rigorously." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 624, + 544, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 624, + 544, + 708 + ], + "spans": [ + { + "bbox": [ + 303, + 624, + 544, + 708 + ], + "type": "text", + "content": "We employ Gemma 2 (Team et al., 2024) as the testbed. As shown in Figure 1, the encoder-decoder architecture follows the original Transformer (Vaswani et al., 2017) but equipped with Gemma 2 modifications. The key idea behind the adaptation is to initialize the parameters of the encoder-decoder model from pretrained decoder-only model(s) as a warmup and then pretrain or adapt all parameters with self" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 216, + 37, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 216, + 37, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 216, + 37, + 555 + ], + "type": "text", + "content": "arXiv:2504.06225v1 [cs.CL] 8 Apr 2025" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 128, + 64, + 467, + 173 + ], + "blocks": [ + { + "bbox": [ + 128, + 64, + 467, + 173 + ], + "lines": [ + { + "bbox": [ + 128, + 64, + 467, + 173 + ], + "spans": [ + { + "bbox": [ + 128, + 64, + 467, + 173 + ], + "type": "image", + "image_path": "117636db4067cddc308df04a48879f38f22774c43820412bd49fab3eac458ef7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 180, + 543, + 212 + ], + "lines": [ + { + "bbox": [ + 51, + 180, + 543, + 212 + ], + "spans": [ + { + "bbox": [ + 51, + 180, + 543, + 212 + ], + "type": "text", + "content": "Figure 1: Overview of our approach. We build encoder-decoder models by adapting from pretrained decoder-only models. Model architecture and parameters are inherited from the decoder-only model except the cross-attention, for which we adopt different initialization methods depending on the encoder and decoder size. \"ROPE\": rotary embedding; \"FFN\": feed-forward layer." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 232, + 291, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 232, + 291, + 340 + ], + "spans": [ + { + "bbox": [ + 52, + 232, + 291, + 340 + ], + "type": "text", + "content": "supervised learning. Depending on whether the encoder and the decoder share the same configuration, we propose different initialization and optimization strategies for the cross-attention layer. We also compare different pretraining objectives, including prefix language modeling with knowledge distillation (Hinton et al., 2015) and UL2 (Tay et al., 2022). Apart from Gemma 2 2B and 9B, we pretrain a series of small models to better understand the adaptation at different scales." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 346, + 291, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 346, + 291, + 418 + ], + "spans": [ + { + "bbox": [ + 52, + 346, + 291, + 418 + ], + "type": "text", + "content": "To thoroughly evaluate model performance, we adopt different benchmarks for pretrained and instruction-tuned models respectively, each covering a range of established academic evaluations. In addition, we use SuperGLUE (Wang et al., 2019a) to measure the quality of the learned contextual representations. Our main findings are below:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 64, + 431, + 290, + 633 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 64, + 431, + 290, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 431, + 290, + 491 + ], + "spans": [ + { + "bbox": [ + 64, + 431, + 290, + 491 + ], + "type": "text", + "content": "- Leveraging pretrained decoder-only LLMs is an effective way to build powerful encoder-decoder LLMs, which yields substantially improved downstream performance particularly after instruction tuning under similar inference flops." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 64, + 498, + 290, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 498, + 290, + 546 + ], + "spans": [ + { + "bbox": [ + 64, + 498, + 290, + 546 + ], + "type": "text", + "content": "- Our adaptation method is highly flexible, allowing for pairing large encoder with small decoder, such as 9B-2B, with significant quality gains over Gemma 2 2B but similar generation latency." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 64, + 554, + 289, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 554, + 289, + 578 + ], + "spans": [ + { + "bbox": [ + 64, + 554, + 289, + 578 + ], + "type": "text", + "content": "- Adaptation is not only more compute efficient but also more effective than pretraining from scratch." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 64, + 586, + 290, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 586, + 290, + 633 + ], + "spans": [ + { + "bbox": [ + 64, + 586, + 290, + 633 + ], + "type": "text", + "content": "- Pretraining objective matters. Models trained with prefix language modeling and knowledge distillation are generally better at generative tasks, while UL2 models have better encoder representations." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 52, + 649, + 139, + 661 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 649, + 139, + 661 + ], + "spans": [ + { + "bbox": [ + 52, + 649, + 139, + 661 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 670, + 291, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 670, + 291, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 670, + 291, + 718 + ], + "type": "text", + "content": "While the decoder-only architecture has become the de facto standard for LLMs, the debate between encoder-decoder and decoder-only modeling is still not conclusive. Many prior studies proposed different approaches to pretrain strong" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 303, + 232, + 544, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 232, + 544, + 422 + ], + "spans": [ + { + "bbox": [ + 303, + 232, + 544, + 422 + ], + "type": "text", + "content": "encoder-decoder models, e.g., MASS (Song et al., 2019), T5 (Raffel et al., 2020), mT5 (Xue et al., 2021), byT5 (Xue et al., 2022), BART (Lewis et al., 2020), and OpenBA (Li et al., 2023). Tay et al. (2022) compared different pretraining objectives, highlighting the superiority of UL2 and encoder-decoder modeling. Zhang et al. (2022) systematically examined the scaling behavior of both architectures on machine translation, showing their similarity when adequate objectives are applied. Wang et al. (2022) thoroughly explored different modeling choices and training objectives with a focus on LLM zero-shot generalization. They discovered that encoder-decoder LLMs after instruction tuning achieve the best performance, echoing with our experiments. They also studied adaptation, but it is between different pretraining objectives rather than from decoder-only LLMs to encoder-decoder LLMs." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 430, + 544, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 430, + 544, + 561 + ], + "spans": [ + { + "bbox": [ + 303, + 430, + 544, + 561 + ], + "type": "text", + "content": "Leveraging pretrained models for encoder-decoder modeling has been extensively explored. In the BERT era (Devlin et al., 2019), researchers developed different ways of utilizing it to enhance encoder-decoder performance on downstream tasks, such as machine translation (Zhu et al., 2020; Clinchant et al., 2019; Yang et al., 2020), grammatical error correction (Kaneko et al., 2020), summarization (Liu & Lapata, 2019), and text generation (Chen et al., 2019). Our work follows a similar spirit but is based on pretrained decoder-only LLMs and focuses on developing general-purpose encoder-decoder LLMs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 567, + 544, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 567, + 544, + 711 + ], + "spans": [ + { + "bbox": [ + 303, + 567, + 544, + 711 + ], + "type": "text", + "content": "Another related direction is the development of inference friendly LLMs. Techniques for improving inference efficiency are many, ranging from quantization (Dettmers & Zettlemoyer, 2023), key-value cache optimization (Corallo & Papotti, 2024), recurrent modeling (Gu & Dao, 2023; Botev et al., 2024), to strong small LLMs with improved pretraining (Abdin et al., 2024; Liu et al., 2024b), to name a few. While these techniques offer significant efficiency gains, their focus is fundamentally distinct and complementary to our proposed encoder-decoder adaptation, i.e., both approaches can be used in conjunction to realize greater overall efficiency." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "spans": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 66, + 276, + 80 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 66, + 276, + 80 + ], + "spans": [ + { + "bbox": [ + 52, + 66, + 276, + 80 + ], + "type": "text", + "content": "3. Approach: Encoder-Decoder Adaptation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 87, + 127, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 87, + 127, + 99 + ], + "spans": [ + { + "bbox": [ + 53, + 87, + 127, + 99 + ], + "type": "text", + "content": "3.1. Architecture" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 105, + 291, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 105, + 291, + 201 + ], + "spans": [ + { + "bbox": [ + 52, + 105, + 291, + 201 + ], + "type": "text", + "content": "Pretraining LLMs is both compute and time intensive. To reduce the amount of training required, we propose to adapt existing decoder-only LLMs to encoder-decoder and leverage pretrained decoder-only checkpoints for initialization, as shown in Figure 1. Due to this, we keep the encoder-decoder architecture as similar as possible to original decoder-only model, only introducing changes when necessary. This results in the following architecture:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 60, + 213, + 291, + 340 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 61, + 213, + 290, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 213, + 290, + 273 + ], + "spans": [ + { + "bbox": [ + 61, + 213, + 290, + 273 + ], + "type": "text", + "content": "1. Encoder has exactly the same architecture as the decoder-only model, but self-attention is switched from causal to bidirectional. We provide ablations in Section 6 that illustrate the critical effect of bidirectional attention on downstream performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 60, + 279, + 291, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 279, + 291, + 340 + ], + "spans": [ + { + "bbox": [ + 60, + 279, + 291, + 340 + ], + "type": "text", + "content": "2. In each Decoder block, FFN and self-attention parts are identical to the corresponding parts in decoder-only models, and cross-attention has the same number of heads and head dimension as self-attention, but attends to the whole output of the encoder." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 52, + 350, + 291, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 350, + 291, + 447 + ], + "spans": [ + { + "bbox": [ + 52, + 350, + 291, + 447 + ], + "type": "text", + "content": "We base our study on Gemma 2 (Team et al., 2024). But note our approach is highly flexible and isn't restricted to specific decoder-only architectures. We can easily apply our method to other model families, such as LLaMA (Dubey et al., 2024), QWen (Yang et al., 2024), and DeepSeek (Liu et al., 2024a). In theory, we can also adapt decoder-only models from different families, such as pairing LLaMA models with QWen models." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 453, + 291, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 453, + 291, + 561 + ], + "spans": [ + { + "bbox": [ + 52, + 453, + 291, + 561 + ], + "type": "text", + "content": "In addition, our approach allows for unbalanced encoder-decoder models, where the decoder is significantly smaller than the encoder. This provides better support for applications where input processing capabilities are more important than generative capacity. For example, for summarization, deep understanding of the input text is often more important than the generation part, as it doesn't need to generate any new information. As a result, generation time is significantly reduced, while providing competitive quality." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 573, + 129, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 573, + 129, + 585 + ], + "spans": [ + { + "bbox": [ + 53, + 573, + 129, + 585 + ], + "type": "text", + "content": "3.2. Initialization" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 592, + 291, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 592, + 291, + 688 + ], + "spans": [ + { + "bbox": [ + 52, + 592, + 291, + 688 + ], + "type": "text", + "content": "When initializing an encoder-decoder model from a decoder-only checkpoint, we try to map every layer to the most similar weight in the decoder-only checkpoint. In particular, the encoder is fully initialized from the decoder-only checkpoint, as it doesn't introduce any new weights. In the decoder, FFN and self-attention subblocks are initialized from the FFN and self-attention weights from the corresponding layers in the decoder-only checkpoint." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 693, + 290, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 290, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 290, + 717 + ], + "type": "text", + "content": "Cross-attention is initialized from self-attention weights in the balanced setup where encoder and decoder have the" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 67, + 543, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 543, + 116 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 543, + 116 + ], + "type": "text", + "content": "same configuration. Otherwise, we first initialize crossattention from scratch and then finetune it for the first " + }, + { + "bbox": [ + 304, + 67, + 543, + 116 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 67, + 543, + 116 + ], + "type": "text", + "content": " steps as a warmup while freezing other model parameters. After " + }, + { + "bbox": [ + 304, + 67, + 543, + 116 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 67, + 543, + 116 + ], + "type": "text", + "content": " steps, all model parameters are tuned." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 128, + 419, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 128, + 419, + 140 + ], + "spans": [ + { + "bbox": [ + 304, + 128, + 419, + 140 + ], + "type": "text", + "content": "3.3. Pretraining Objective" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 147, + 543, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 147, + 543, + 231 + ], + "spans": [ + { + "bbox": [ + 303, + 147, + 543, + 231 + ], + "type": "text", + "content": "Decoder-only pretraining often adopts causal language modeling on a single sequence. In contrast, encoder-decoder adaptation requires separate input and target sequences to be fed to the encoder and decoder separately. We explore two classical pretraining objectives for encoder-decoder modeling: prefix language modeling (PrefixLM) and UL2 (Tay et al., 2022; Wang et al., 2022)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 303, + 236, + 544, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 236, + 544, + 345 + ], + "spans": [ + { + "bbox": [ + 303, + 236, + 544, + 345 + ], + "type": "text", + "content": "PrefixLM behaves similar to causal language modeling except for its prefix condition. To simplify the preprocessing, we split a sequence equally into two halves, the first half used as input and the second one as target. This also eases the adoption of knowledge distillation from decoder-only models. UL2 is more complicated. It is composed of several denoising tasks at different levels of complexity. We prepare UL2 data following Tay et al. (2022). We compare their performance in experiments." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 360, + 350, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 360, + 350, + 373 + ], + "spans": [ + { + "bbox": [ + 304, + 360, + 350, + 373 + ], + "type": "text", + "content": "4. Setup" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 303, + 380, + 543, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 380, + 543, + 524 + ], + "spans": [ + { + "bbox": [ + 303, + 380, + 543, + 524 + ], + "type": "text", + "content": "Data Setting Our data for pretraining and instruction tuning – including supervised finetuning (SFT) and reinforcement learning from human feedback (RLHF) – follow Gemma 2 (Team et al., 2024). For the adaptation, we preprocess the Gemma 2 pretraining data (8 trillion tokens) with PrefixLM and UL2. Note Gemma 2 pretraining data comes with knowledge distillation. We preserve this information for PrefixLM while adopting ground-truth targets for UL2 as mapping the teacher logits to UL2 is non-trivial. The preprocessed data has an input-output sequence length of 4096-4096 and 8192-8192 for PrefixLM and UL2, respectively. We adapt our models on up to 2 trillion tokens." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 303, + 536, + 543, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 536, + 543, + 609 + ], + "spans": [ + { + "bbox": [ + 303, + 536, + 543, + 609 + ], + "type": "text", + "content": "Model Setting We use Gemma 2 (2B and 9B) as the base decoder-only LLM. We also pretrain several smaller models (Small, Base, Large, and XL) following mT5 configurations (Xue et al., 2021) under the Gemma 2 framework, and then adapt them to encoder-decoder LLMs. Detailed model configurations are given in Table 1." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 303, + 620, + 543, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 620, + 543, + 657 + ], + "spans": [ + { + "bbox": [ + 303, + 620, + 543, + 657 + ], + "type": "text", + "content": "Evaluation We employ diverse academic evaluation datasets to evaluate different capabilities of LLMs. Concretely, we use the following benchmarks:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 670, + 544, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 670, + 544, + 718 + ], + "spans": [ + { + "bbox": [ + 315, + 670, + 544, + 718 + ], + "type": "text", + "content": "- Pretraining (PT) benchmark: Boolq (Clark et al., 2019), SIQA (Sap et al., 2019), PIQA (Bisk et al., 2020), ARC-c&ARC-e (Clark et al., 2018), MMLU (Hendrycks et al., 2021), MMLU Pro (Wang et al., 2024), Hel" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "spans": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 95, + 64, + 500, + 163 + ], + "blocks": [ + { + "bbox": [ + 95, + 64, + 500, + 163 + ], + "lines": [ + { + "bbox": [ + 95, + 64, + 500, + 163 + ], + "spans": [ + { + "bbox": [ + 95, + 64, + 500, + 163 + ], + "type": "table", + "html": "
#Layersdmodeldfn#heads (q/kv)head#Params
Decoder-OnlyEncoder-Decoder
2B262304184328/42562.0B4.0B (2B-2B)
9B4235842867216/82568.3B16.7B (9B-9B)
S (Small)851210248/86414.7M29.4M (S-S)
B (Base)12768204812/126456.7M113.3M (B-B)
L (Large)241024281616/1664204.6M409.1M (L-L)
XL (Xlarge)242048512032/3264780.3M1.6B (XL-XL)
", + "image_path": "416bbeebb01547253fc7b5fc27c306c2b693d521b32756f8c4ea25bef1cb7ed1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 171, + 543, + 203 + ], + "lines": [ + { + "bbox": [ + 51, + 171, + 543, + 203 + ], + "spans": [ + { + "bbox": [ + 51, + 171, + 543, + 203 + ], + "type": "text", + "content": "Table 1: Model configurations. #Layers: number of layers; " + }, + { + "bbox": [ + 51, + 171, + 543, + 203 + ], + "type": "inline_equation", + "content": "d_{model/ffn/head}" + }, + { + "bbox": [ + 51, + 171, + 543, + 203 + ], + "type": "text", + "content": ": model/feed-forward/head dimension; #heads (" + }, + { + "bbox": [ + 51, + 171, + 543, + 203 + ], + "type": "inline_equation", + "content": "q/kv" + }, + { + "bbox": [ + 51, + 171, + 543, + 203 + ], + "type": "text", + "content": "): number of query/value heads. #Params: number of model parameters excluding embeddings. For encoder-decoder models, we show the number of parameters for the balanced architecture, e.g. 2B-2B. The 9B-2B model has 10.4B parameters. “B/M”: billion/million." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 71, + 223, + 291, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 223, + 291, + 366 + ], + "spans": [ + { + "bbox": [ + 71, + 223, + 291, + 366 + ], + "type": "text", + "content": "laSwag (Zellers et al., 2019), Winogrande (Sakaguchi et al., 2021), TruthfulQA (Lin et al., 2021), AGIEval (Zhong et al., 2023), BBH (Suzgun et al., 2022), DROP (Dua et al., 2019), GPQA (Rein et al., 2023), GSM8K (Cobbe et al., 2021), HumanEval (Chen et al., 2021), Lambada (Paperno et al., 2016), MATH-500 (Hendrycks et al., 2021), MBPP (Austin et al., 2021), NQ (Kwiatkowski et al., 2019), TriviaQA (Joshi et al., 2017), and WMT23 (Kocmi et al., 2023). We perform zero/few-shot prompting for pretrained LLMs, and report the averaged result as " + }, + { + "bbox": [ + 71, + 223, + 291, + 366 + ], + "type": "inline_equation", + "content": "PT" + }, + { + "bbox": [ + 71, + 223, + 291, + 366 + ], + "type": "text", + "content": " score." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 64, + 373, + 291, + 573 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 64, + 373, + 290, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 373, + 290, + 445 + ], + "spans": [ + { + "bbox": [ + 64, + 373, + 290, + 445 + ], + "type": "text", + "content": "- Instruction-tuning (IT) benchmark: GSM8K, MMLU, MMLU Pro, MBPP, HumanEval, MATH-500, BBH, GPQA (Diamond), WMT23, and MGSM (Shi et al., 2022). We perform zero/few-shot prompting with task-specific instruction for instruction-tuned models, and report the averaged result as IT score." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 64, + 453, + 291, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 453, + 291, + 573 + ], + "spans": [ + { + "bbox": [ + 64, + 453, + 291, + 573 + ], + "type": "text", + "content": "- SuperGLUE (Wang et al., 2019b): we use this benchmark to examine the learned contextual representation. We stack a task-specific head on the representation of the last token in the encoder (decoder) of the encoder-decoder (decoder-only) LLM, and finetune all parameters on the training set. Learning rate, batch size, and dropout are grid-searched for each task. We reformulate all tasks as classification tasks and report averaged dev-set accuracy over COPA, WIC, WSC, RTE, MultiRC, CB, and Boolq." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 52, + 586, + 290, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 586, + 290, + 657 + ], + "spans": [ + { + "bbox": [ + 52, + 586, + 290, + 657 + ], + "type": "text", + "content": "For generative tasks, we always apply greedy sampling. We perform pretraining, SFT, and RLHF based on the Gemma 2 recipe except for the learning rate which we tune empirically for encoder-decoder LLMs. In unbalanced encoder-decoder adaptation, e.g. 9B-2B, we set the cross-attention warmup step " + }, + { + "bbox": [ + 52, + 586, + 290, + 657 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 52, + 586, + 290, + 657 + ], + "type": "text", + "content": " to 1000." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 673, + 105, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 673, + 105, + 685 + ], + "spans": [ + { + "bbox": [ + 52, + 673, + 105, + 685 + ], + "type": "text", + "content": "5. Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 693, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 291, + 717 + ], + "type": "text", + "content": "The encoder-decoder adaptation converges rapidly, particularly for balanced architectures. While adaptation" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 345, + 224, + 503, + 347 + ], + "blocks": [ + { + "bbox": [ + 345, + 224, + 503, + 347 + ], + "lines": [ + { + "bbox": [ + 345, + 224, + 503, + 347 + ], + "spans": [ + { + "bbox": [ + 345, + 224, + 503, + 347 + ], + "type": "image", + "image_path": "e84af8ed438269fcb4b932d2777badf545e4bc6420d932ca29791018c17890d4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 358, + 542, + 380 + ], + "lines": [ + { + "bbox": [ + 304, + 358, + 542, + 380 + ], + "spans": [ + { + "bbox": [ + 304, + 358, + 542, + 380 + ], + "type": "text", + "content": "Figure 2: Pretraining performance as a function of the number of pretrained tokens during the adaptation." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 400, + 543, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 400, + 543, + 532 + ], + "spans": [ + { + "bbox": [ + 303, + 400, + 543, + 532 + ], + "type": "text", + "content": "leverages pretrained parameters for initialization, whether and how this benefits model convergence is still questionable. Figure 2 shows the change of PT performance with respect to the amount of pretrained tokens. Obviously, adaptation is very computationally efficient, converging quickly and achieving similar performance to its decoder-only counterpart after only tens of billions of tokens. Balanced architectures (2B-2B and 9B-9B) converge much faster than the unbalanced ones (9B-2B) since all parameters in the former are initialized from pretrained decoder-only models while the cross-attention in the latter is randomly initialized." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 537, + 544, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 537, + 544, + 634 + ], + "spans": [ + { + "bbox": [ + 303, + 537, + 544, + 634 + ], + "type": "text", + "content": "We also notice that additional pretraining improves balanced models a little on average but substantially benefits some tasks, like GSM8K and DROP. Besides, 9B-2B performance increases consistently during the adaptation, quickly surpassing Gemma 2 2B and moving towards Gemma 2 9B. This demonstrates the feasibility of encoder-decoder adaptation from varying-sized decoder-only LLMs, as well as its ability to utilize the knowledge from pretrained models." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 645, + 544, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 645, + 544, + 718 + ], + "spans": [ + { + "bbox": [ + 303, + 645, + 544, + 718 + ], + "type": "text", + "content": "Pretraining objective matters: UL2 and PrefixLM show different characteristics. Previous study reported the superiority of UL2 over PrefixLM (Tay et al., 2022), but PrefixLM in our study is enhanced with knowledge distillation, which often improves small models significantly. We compare these two objectives for the adaptation in Table 2." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "spans": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 140, + 64, + 455, + 178 + ], + "blocks": [ + { + "bbox": [ + 140, + 64, + 455, + 178 + ], + "lines": [ + { + "bbox": [ + 140, + 64, + 455, + 178 + ], + "spans": [ + { + "bbox": [ + 140, + 64, + 455, + 178 + ], + "type": "table", + "html": "
PT ScoreIT Score
Gemma 2+ PrefixLM+ UL2Gemma 2+ PrefixLM+ UL2
2B-2B47.949.750.1(39.0)46.4 (46.1)42.4
9B-2B-55.052.9-49.3 (50.6)45.7
9B-9B61.763.163.9(59.6)62.9 (64.5)61.5
S-S23.422.823.16.29.810.7
B-B26.726.926.09.812.911.1
L-L32.331.630.912.917.518.9
XL-XL39.739.538.523.530.729.2
", + "image_path": "4bbb3f398b8b1bb36540721f23580fdcd02028eff36e3066fff913714b0c47e3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 141, + 202, + 456, + 316 + ], + "blocks": [ + { + "bbox": [ + 226, + 182, + 366, + 192 + ], + "lines": [ + { + "bbox": [ + 226, + 182, + 366, + 192 + ], + "spans": [ + { + "bbox": [ + 226, + 182, + 366, + 192 + ], + "type": "text", + "content": "(a) Results on PT and IT benchmarks." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 141, + 202, + 456, + 316 + ], + "lines": [ + { + "bbox": [ + 141, + 202, + 456, + 316 + ], + "spans": [ + { + "bbox": [ + 141, + 202, + 456, + 316 + ], + "type": "table", + "html": "
PT ModelsIT Models
Gemma 2+ PrefixLM+ UL2Gemma 2+ PrefixLM+ UL2
2B-2B75.588.188.1(86.2)88.3 (87.9)90.5
9B-2B-90.290.7-90.6 (90.3)91.3
9B-9B82.591.491.8(89.8)91.8 (91.4)91.6
S-S67.669.869.667.668.869.4
B-B68.671.271.568.772.373.6
L-L68.478.779.768.878.180.3
XL-XL70.784.485.469.285.787.0
", + "image_path": "e342687ba52ca17516eaef964d4960ad15f8aa80f31daad7aea4c345f96fb85e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 216, + 319, + 378, + 330 + ], + "lines": [ + { + "bbox": [ + 216, + 319, + 378, + 330 + ], + "spans": [ + { + "bbox": [ + 216, + 319, + 378, + 330 + ], + "type": "text", + "content": "(b) Finetuned performance on SuperGLUE." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 51, + 339, + 544, + 381 + ], + "lines": [ + { + "bbox": [ + 51, + 339, + 544, + 381 + ], + "spans": [ + { + "bbox": [ + 51, + 339, + 544, + 381 + ], + "type": "text", + "content": "Table 2: Main results on PT, IT, and SuperGLUE benchmarks. \"Gemma 2\": decoder-only models; \"+\"PrefixLM/UL2\": encoder-decoder models adapted via prefix language modeling (with knowledge distillation)/UL2. We put Gemma 2 results into the corresponding encoder-decoder rows to save space, e.g. 2B-2B for Gemma 2 means Gemma 2 2B. Numbers in parentheses are for RLHFed models. Best results are in bold. Note PT and IT scores are not directly comparable since they are averaged over different tasks." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 51, + 401, + 291, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 401, + 291, + 544 + ], + "spans": [ + { + "bbox": [ + 51, + 401, + 291, + 544 + ], + "type": "text", + "content": "We find that PrefixLM and UL2 have their own strengths. Specifically, UL2 delivers stronger contextual representations, outweighing PrefixLM on SuperGLUE across most model scales, resonating with previous findings (Tay et al., 2022). In contrast, PrefixLM produces more powerful generative LLMs thanks to its generation nature and the knowledge distillation. It surpasses UL2 on PT and IT benchmarks in most cases. Particularly, it outperforms UL2 at 9B-2B on both PT and IT by up to 3.6, a significant margin. Since generative LLMs have become the mainstream, we base our following analysis on PrefixLM. We discuss our attempts to combine PrefixLM and UL2 in the next section." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 574, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 574, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 51, + 574, + 291, + 717 + ], + "type": "text", + "content": "Encoder-decoder LLMs outperform decoder-only LLMs especially after instruction tuning. Table 2 also shows that the adapted encoder-decoder LLMs achieve comparable or slightly better pretraining performance than their decoder-only counterpart but with substantially improved instruction-tuning performance, echoing with the findings of Wang et al. (2022). For example, the 9B-9B encoder-decoder LLM surpasses Gemma 2 9B by 1.4 and 4.9 on PT and IT, respectively. The performance gap further increases to 1.8 and 7.1 at 2B-2B scale. We notice that the adaption performs slightly worse at scales below 2B on PT, but the improvements on IT are still promising, e.g. 7.2 at XL-XL." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 401, + 544, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 401, + 544, + 473 + ], + "spans": [ + { + "bbox": [ + 302, + 401, + 544, + 473 + ], + "type": "text", + "content": "Regardless of PT or IT models, pretraining objectives, and model scales, encoder-decoder LLMs perform consistently better than decoder-only LLMs on SuperGLUE. This suggests that the contextual representation from encoder-decoder LLMs is often of higher quality, likely due to bidirectional self-attention." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 479, + 544, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 479, + 544, + 647 + ], + "spans": [ + { + "bbox": [ + 302, + 479, + 544, + 647 + ], + "type": "text", + "content": "We need to highlight that the above analysis is based on the overall performance, which may not apply when it comes to a specific downstream task. As shown in Table 3, there are some tasks favoring encoder-decoder models while others favoring decoder-only models especially for PT models. For example, after pretraining, Gemma 2 9B surpasses 9B-9B by 4.1 on ARC-C but underperforms it by 4.4 on Winogrande; while encoder-decoder LLM shows more consistent advantage after instruction tuning, 9B-9B still lags behind Gemma 2 9B by 0.9 on WMT23. This illustrates the complexity when evaluating LLM capability as well as the risk of reaching misleading conclusions when adopting biased evaluation tasks. We reduce such risk by selecting as diverse and broad tasks as possible for evaluation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 670, + 542, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 670, + 542, + 718 + ], + "spans": [ + { + "bbox": [ + 302, + 670, + 542, + 718 + ], + "type": "text", + "content": "Encoder-decoder LLMs balance quality and inference efficiency more effectively. We next analyze different models from the perspective of inference efficiency which becomes increasingly crucial for model deployment. Figure" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "spans": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 154, + 64, + 441, + 292 + ], + "blocks": [ + { + "bbox": [ + 154, + 64, + 441, + 292 + ], + "lines": [ + { + "bbox": [ + 154, + 64, + 441, + 292 + ], + "spans": [ + { + "bbox": [ + 154, + 64, + 441, + 292 + ], + "type": "table", + "html": "
TaskMetricGemma 2Encoder-Decoder Adaptation
2B9B2B-2B9B-2B9B-9B
MMLU5-shot51.971.146.860.371.3
ARC-C25-shot55.569.152.059.965.0
GSM8K5-shot23.763.241.748.772.8
AGIEval3-5-shot31.553.335.043.653.1
DROP3-shot, F153.371.561.466.975.7
BBH3-shot, CoT40.268.951.951.674.7
Winogrande5-shot65.274.369.568.178.7
HellaSwag10-shot72.981.874.975.781.0
MATH-5004-shot17.233.424.223.637.8
ARC-e0-shot81.088.377.182.985.3
PIQA0-shot78.481.679.078.381.1
SIQA0-shot51.753.650.150.150.5
Boolq0-shot75.577.575.684.685.6
TriviaQA5-shot60.176.651.266.275.2
NQ5-shot30.743.928.437.143.1
HumanEvalpass@119.539.027.433.540.2
MBPP3-shot30.452.037.443.455.6
Average49.364.752.057.366.3
", + "image_path": "3f839176a76f6ca49b21f2fe1d9927f962024d2444b345aaeefe51e43261e557.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 138, + 317, + 457, + 479 + ], + "blocks": [ + { + "bbox": [ + 234, + 297, + 358, + 307 + ], + "lines": [ + { + "bbox": [ + 234, + 297, + 358, + 307 + ], + "spans": [ + { + "bbox": [ + 234, + 297, + 358, + 307 + ], + "type": "text", + "content": "(a) Results for pretrained models." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 317, + 457, + 479 + ], + "lines": [ + { + "bbox": [ + 138, + 317, + 457, + 479 + ], + "spans": [ + { + "bbox": [ + 138, + 317, + 457, + 479 + ], + "type": "table", + "html": "
TaskMetricGemma 2Encoder-Decoder Adaptation
2B9B2B-2B9B-2B9B-9B
GSM8K11-shot58.084.370.773.888.6
MMLU5-shot49.871.861.566.776.7
MMLU Pro5-shot27.449.936.643.055.7
MBPP3-shot37.859.244.049.864.8
HumanEvalpass@143.365.947.655.572.0
MATH-5000-shot24.445.828.230.047.2
BBH3-shot44.872.057.557.676.4
GPQA0-shot24.829.927.532.635.7
GPQA Diamond0-shot27.829.826.829.340.4
WMT235-shot, BLEURT65.272.059.965.371.1
MGSM8-shot26.374.946.853.580.7
Average39.059.646.150.664.5
", + "image_path": "c5f30f5d5027dab8614556e9555e0a41a7cbd800c71bc65594505349fcf4bb25.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 236, + 483, + 355, + 494 + ], + "lines": [ + { + "bbox": [ + 236, + 483, + 355, + 494 + ], + "spans": [ + { + "bbox": [ + 236, + 483, + 355, + 494 + ], + "type": "text", + "content": "(b) Results for RLHFed models." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 52, + 513, + 541, + 534 + ], + "lines": [ + { + "bbox": [ + 52, + 513, + 541, + 534 + ], + "spans": [ + { + "bbox": [ + 52, + 513, + 541, + 534 + ], + "type": "text", + "content": "Table 3: Detailed results on different tasks for PT and RLHFed models. We compare Gemma 2 and encoder-decoder models adapted via PrefixLM. Best results are in bold." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 52, + 555, + 290, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 555, + 290, + 616 + ], + "spans": [ + { + "bbox": [ + 52, + 555, + 290, + 616 + ], + "type": "text", + "content": "3 shows that balanced encoder-decoder LLMs have similar inference flops to their decoder-only counterparts, e.g. 2B-2B vs. Gemma 2 2B. As such, encoder-decoder models often dominate the quality-inference efficiency frontier across PT, IT, and SuperGLUE benchmarks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 621, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 621, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 621, + 291, + 717 + ], + "type": "text", + "content": "We acknowledge that inference flops may not correlate well with actual running speed due to factors like inter-device communication, key-value caching, and autoregressive bottleneck. We then provide the latency results measured on GSM8K for 2B and 9B models in Figure 4, which further verified the above analysis. 9B-9B and 2B-2B show similar latency to Gemma 2 9B and 2B, respectively, but clearly better performance. In particular, 9B-2B, the one pairing large" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 555, + 541, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 555, + 541, + 580 + ], + "spans": [ + { + "bbox": [ + 304, + 555, + 541, + 580 + ], + "type": "text", + "content": "encoder and small decoder, shows similar latency to Gemma 2 2B but significantly better performance than 2B-2B." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 586, + 541, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 586, + 541, + 621 + ], + "spans": [ + { + "bbox": [ + 304, + 586, + 541, + 621 + ], + "type": "text", + "content": "Together, these confirm that encoder-decoder adaptation indeed provides a more flexible way for balancing between quality and inference speed." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 637, + 373, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 637, + 373, + 649 + ], + "spans": [ + { + "bbox": [ + 304, + 637, + 373, + 649 + ], + "type": "text", + "content": "6. Discussion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 658, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 658, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 658, + 542, + 717 + ], + "type": "text", + "content": "Is the improvement after the adaptation simply due to the extra pretraining compute? Not really. We also tried to apply more pretraining compute to Gemma 2 2B by going through another 6 trillion tokens, which leads to a PT score of 48.57, still significantly below the encoder-" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "spans": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 67, + 220, + 185 + ], + "blocks": [ + { + "bbox": [ + 70, + 67, + 220, + 185 + ], + "lines": [ + { + "bbox": [ + 70, + 67, + 220, + 185 + ], + "spans": [ + { + "bbox": [ + 70, + 67, + 220, + 185 + ], + "type": "image", + "image_path": "5bc3292c9075390db3ad037b56681680df820b5ed6361ee93024ceb18506f6da.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 194, + 544, + 227 + ], + "lines": [ + { + "bbox": [ + 51, + 194, + 544, + 227 + ], + "spans": [ + { + "bbox": [ + 51, + 194, + 544, + 227 + ], + "type": "text", + "content": "Figure 3: Comparisons of decoder-only LLMs with adapted encoder-decoder models under inference flops. We show PT, IT, and SuperGLUE performance. Inference flops is estimated with a sequence length of 4096-4096 and 8192 for encoder-decoder and decoder-only LLMs, respectively. Note the upper left corner marks the quality-efficiency frontier." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 223, + 65, + 373, + 185 + ], + "blocks": [ + { + "bbox": [ + 223, + 65, + 373, + 185 + ], + "lines": [ + { + "bbox": [ + 223, + 65, + 373, + 185 + ], + "spans": [ + { + "bbox": [ + 223, + 65, + 373, + 185 + ], + "type": "image", + "image_path": "51bbcc46d8fda806b753bf35c24c7ec6a4f852660553929aa38199e5069a40d7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 376, + 64, + 526, + 185 + ], + "blocks": [ + { + "bbox": [ + 376, + 64, + 526, + 185 + ], + "lines": [ + { + "bbox": [ + 376, + 64, + 526, + 185 + ], + "spans": [ + { + "bbox": [ + 376, + 64, + 526, + 185 + ], + "type": "image", + "image_path": "d2f2cfdf0577b291f7b73a7bf7b879e70cfaae71e626c66933421a10b6d03157.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 92, + 248, + 250, + 373 + ], + "blocks": [ + { + "bbox": [ + 92, + 248, + 250, + 373 + ], + "lines": [ + { + "bbox": [ + 92, + 248, + 250, + 373 + ], + "spans": [ + { + "bbox": [ + 92, + 248, + 250, + 373 + ], + "type": "image", + "image_path": "b62435503ae00d84a7a4eecfced3a619d2992128db4f0dea34e324725f8e2c26.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 382, + 291, + 422 + ], + "lines": [ + { + "bbox": [ + 51, + 382, + 291, + 422 + ], + "spans": [ + { + "bbox": [ + 51, + 382, + 291, + 422 + ], + "type": "text", + "content": "Figure 4: GSM8K performance as a function of latency for RL-HFed models. Latency is estimated as milliseconds (ms) per query by answering 200 reasoning questions from GSM8K. Batch size of 1 is used." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 447, + 290, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 447, + 290, + 495 + ], + "spans": [ + { + "bbox": [ + 51, + 447, + 290, + 495 + ], + "type": "text", + "content": "decoder adaptation, 49.7. This indicates that the additional pretraining compute can't fully explain the improvements from the adaptation and we argue that the inductive bias of encoder-decoder modeling plays a crucial role." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 510, + 291, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 510, + 291, + 606 + ], + "spans": [ + { + "bbox": [ + 51, + 510, + 291, + 606 + ], + "type": "text", + "content": "Does cross-attention warmup matter for unbalanced encoder-decoder? Yes. Our preliminary experiments with 9B-2B and UL2 on 800B tokens show that the pretraining performance over Boolq and GSM8K reduces from 62.5 to 61.8 without the warmup. Besides, increasing warmup steps from 1K to 5K further reduces performance to 60.2. An adequate amount of warmup optimization is required to reach the optimal performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 622, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 622, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 51, + 622, + 291, + 717 + ], + "type": "text", + "content": "Can we switch from grouped-query attention to multi-head self attention for the encoder? Yes but with mixed results. Gemma 2 adopts grouped-query attention (GQA) to improve its decoding efficiency. However, unlike the decoder, the encoder can be fully parallelized during inference, making the use of multi-head attention (MHA) reasonable. We tried to expand GQA in Gemma 2 2B to MHA by replicating head parameters for the encoder self-attention. Under" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 320, + 244, + 527, + 333 + ], + "blocks": [ + { + "bbox": [ + 320, + 244, + 527, + 333 + ], + "lines": [ + { + "bbox": [ + 320, + 244, + 527, + 333 + ], + "spans": [ + { + "bbox": [ + 320, + 244, + 527, + 333 + ], + "type": "table", + "html": "
AdaptationScratch
PTITSGPTITSG
S-S22.89.868.824.09.970.5
B-B26.912.972.328.111.875.5
L-L31.617.578.130.917.178.5
XL-XL39.530.785.737.728.879.5
2B-2B49.746.488.347.143.984.5
", + "image_path": "35058235b3a8e97de81c56604e7aa53eee71b1af57cda67fbad7585e55cb8bd2.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 340, + 544, + 372 + ], + "lines": [ + { + "bbox": [ + 304, + 340, + 544, + 372 + ], + "spans": [ + { + "bbox": [ + 304, + 340, + 544, + 372 + ], + "type": "text", + "content": "Table 4: Results for encoder-decoder models adapted with PrefixLM (Adaptation) and pretrained from scratch (Scratch). SG: SuperGLUE score for SFTed models." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 303, + 392, + 542, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 392, + 542, + 440 + ], + "spans": [ + { + "bbox": [ + 303, + 392, + 542, + 440 + ], + "type": "text", + "content": "PrefixLM, this improves PT performance to 50.2 by 0.5 at 2B-2B but reduces IT performance to 43.5 by 2.9. We thus still stick to GQA when adapting Gemma 2 2B and 9B for the encoder." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 453, + 542, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 453, + 542, + 586 + ], + "spans": [ + { + "bbox": [ + 303, + 453, + 542, + 586 + ], + "type": "text", + "content": "Does bidirectional self-attention matter for the encoder? Yes. A crucial difference between encoder-decoder and decoder-only LLMs is the use of bidirectional self-attention. We also tested keeping the encoder self-attention causal at 2B-2B, which achieves a PT and IT score of 45.6 and 41.7, lagging behind its bidirectional counterpart substantially by 4.1 and 4.7, respectively. Note, the causal 2B-2B model surpasses Gemma 2 2B on IT by 2.7, although it performs worse on PT. This suggests that bidirectional self-attention contributes greatly to the success of our adaptation, but is not the only factor." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 597, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 597, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 597, + 542, + 717 + ], + "type": "text", + "content": "Would pretraining encoder-decoder LLMs from scratch yield better performance? Not really. Pretraining from scratch is a common method for developing new LLMs. We also pretrained encoder-decoder LLMs from scratch on 8 trillion tokens with PrefixLM. Table 4 summarizes the results. Despite using more pretraining tokens, encoder-decoder LLMs pretrained from scratch only perform better at small scales, such as S-S and B-B, beyond which adaptation shows clear superiority. As such, adaptation is a more computationally efficient way of developing powerful" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "spans": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 67, + 220, + 183 + ], + "blocks": [ + { + "bbox": [ + 70, + 67, + 220, + 183 + ], + "lines": [ + { + "bbox": [ + 70, + 67, + 220, + 183 + ], + "spans": [ + { + "bbox": [ + 70, + 67, + 220, + 183 + ], + "type": "image", + "image_path": "dbcb0e6cce13b037f4c0883ed286cbdc0d95b29ded518c718b721cfc2f428ce9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 194, + 543, + 216 + ], + "lines": [ + { + "bbox": [ + 51, + 194, + 543, + 216 + ], + "spans": [ + { + "bbox": [ + 51, + 194, + 543, + 216 + ], + "type": "text", + "content": "Figure 5: Quality change for the two-stage optimization. \"UL2-then-PrefixLM\": switch the training objective from UL2 to PrefixLM for the final " + }, + { + "bbox": [ + 51, + 194, + 543, + 216 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 51, + 194, + 543, + 216 + ], + "type": "text", + "content": " tokens; \"PrefixLM-then-UL2\": similar but from PrefixLM to UL2." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 223, + 68, + 372, + 182 + ], + "blocks": [ + { + "bbox": [ + 223, + 68, + 372, + 182 + ], + "lines": [ + { + "bbox": [ + 223, + 68, + 372, + 182 + ], + "spans": [ + { + "bbox": [ + 223, + 68, + 372, + 182 + ], + "type": "image", + "image_path": "5ec6336377cc82d82a620bc9b2385a17178a9693bcd557a46c65604ff7d76ed2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 376, + 68, + 524, + 182 + ], + "blocks": [ + { + "bbox": [ + 376, + 68, + 524, + 182 + ], + "lines": [ + { + "bbox": [ + 376, + 68, + 524, + 182 + ], + "spans": [ + { + "bbox": [ + 376, + 68, + 524, + 182 + ], + "type": "image", + "image_path": "bb18af8918cb67a1778002f0f629d0c1e7c9645672e4e9d7249a3689aab6eb52.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 92, + 237, + 251, + 361 + ], + "blocks": [ + { + "bbox": [ + 92, + 237, + 251, + 361 + ], + "lines": [ + { + "bbox": [ + 92, + 237, + 251, + 361 + ], + "spans": [ + { + "bbox": [ + 92, + 237, + 251, + 361 + ], + "type": "image", + "image_path": "094aec7e87b8d447722242304ed5483546e0e65034b026f8a17558ab2195a3f1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 93, + 366, + 251, + 490 + ], + "blocks": [ + { + "bbox": [ + 93, + 366, + 251, + 490 + ], + "lines": [ + { + "bbox": [ + 93, + 366, + 251, + 490 + ], + "spans": [ + { + "bbox": [ + 93, + 366, + 251, + 490 + ], + "type": "image", + "image_path": "4de0f4c3868f0d61d6237f19aeceac00a345c0d159226ff2806bd0f3aed4d81f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 501, + 290, + 522 + ], + "lines": [ + { + "bbox": [ + 52, + 501, + 290, + 522 + ], + "spans": [ + { + "bbox": [ + 52, + 501, + 290, + 522 + ], + "type": "text", + "content": "Figure 6: Correlation analysis between PT performance and its corresponding IT/SuperGLUE performance." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 544, + 151, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 544, + 151, + 554 + ], + "spans": [ + { + "bbox": [ + 52, + 544, + 151, + 554 + ], + "type": "text", + "content": "encoder-decoder LLMs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 567, + 291, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 567, + 291, + 687 + ], + "spans": [ + { + "bbox": [ + 52, + 567, + 291, + 687 + ], + "type": "text", + "content": "Is IT/SuperGLUE score predicable from PT score? Mixed. A general assumption in LLM development is that PT performance can be used as an indicator for downstream applications. We summarize all our ablations and put them in Figure 6. Over all data points and across all model sizes, the correlation is pretty strong: a Spearman's " + }, + { + "bbox": [ + 52, + 567, + 291, + 687 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 52, + 567, + 291, + 687 + ], + "type": "text", + "content": " of 0.97 and 0.89 for IT vs. PT and SuperGLUE vs. PT, respectively. However, when considering data points within each model size separately, the averaged Spearman's " + }, + { + "bbox": [ + 52, + 567, + 291, + 687 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 52, + 567, + 291, + 687 + ], + "type": "text", + "content": " reduces to 0.42 and 0.05, respectively and is not significant anymore." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 693, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 291, + 717 + ], + "type": "text", + "content": "In practice, we also noticed that PT checkpoints with weaker performance sometimes yield significantly better IT or Su" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 237, + 542, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 237, + 542, + 285 + ], + "spans": [ + { + "bbox": [ + 304, + 237, + 542, + 285 + ], + "type": "text", + "content": "perGLUE performance. When selecting PT checkpoints for a specific model size, it's better to also examine their IT performance apart from PT results to avoid some biases or overfitting." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 304, + 542, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 304, + 542, + 400 + ], + "spans": [ + { + "bbox": [ + 304, + 304, + 542, + 400 + ], + "type": "text", + "content": "Can we get the best of both worlds from PrefixLM and UL2? This is non-trivial. Our first attempt is to merge checkpoints trained from PrefixLM and UL2 with uniform weighting. Unfortunately, the merged model results in either similar or much worse performance. We argue that PrefixLM and UL2 lead to different training dynamics and converge to very different local minima. Directly merging their weights doesn't work right out of the box." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 407, + 542, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 407, + 542, + 491 + ], + "spans": [ + { + "bbox": [ + 303, + 407, + 542, + 491 + ], + "type": "text", + "content": "We next explore a two-stage optimization, where we first adapt with PrefixLM and then shift to UL2 for the last " + }, + { + "bbox": [ + 303, + 407, + 542, + 491 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 303, + 407, + 542, + 491 + ], + "type": "text", + "content": " of training, and vice versa. Figure 5 shows very mixed results. Switching from PrefixLM to UL2 generally hurts performance. In contrast, switching from UL2 to PrefixLM improves IT performance, but suffers from reduction in PT and SuperGLUE performance." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 496, + 543, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 496, + 543, + 520 + ], + "spans": [ + { + "bbox": [ + 304, + 496, + 543, + 520 + ], + "type": "text", + "content": "Another direction is to jointly optimize the model on PrefixLM and UL2, which we leave for future work." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 535, + 469, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 535, + 469, + 548 + ], + "spans": [ + { + "bbox": [ + 304, + 535, + 469, + 548 + ], + "type": "text", + "content": "7. Conclusion and Future Work" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 303, + 555, + 542, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 555, + 542, + 699 + ], + "spans": [ + { + "bbox": [ + 303, + 555, + 542, + 699 + ], + "type": "text", + "content": "In this paper, we presented methods for building powerful, general purpose encoder-decoder LLMs by adapting from pretrained decoder-only LLMs. Such adaptation offers high flexibility in leveraging different types/families of pretrained decoder-only models as well as combining different-sized models. Through extensive experiments based on Gemma 2, we demonstrated the feasibility and effectiveness of the adaptation: the adapted encoder-decoder LLMs outperform their decoder-only counterparts substantially after instruction tuning, dominating the quality-inference efficiency frontier. Besides, encoder-decoder LLMs also provide better contextual representations as evaluated on SuperGLUE." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 705, + 541, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 705, + 541, + 717 + ], + "spans": [ + { + "bbox": [ + 304, + 705, + 541, + 717 + ], + "type": "text", + "content": "We hope our findings inspire more researchers from" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "spans": [ + { + "bbox": [ + 129, + 45, + 465, + 56 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 68, + 293, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 68, + 293, + 115 + ], + "spans": [ + { + "bbox": [ + 52, + 68, + 293, + 115 + ], + "type": "text", + "content": "academia and industry to revisit the encoder-decoder paradigm for LLM development. To facilitate the research, we will release the code and checkpoints at XXX (coming soon)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 121, + 293, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 121, + 293, + 266 + ], + "spans": [ + { + "bbox": [ + 52, + 121, + 293, + 266 + ], + "type": "text", + "content": "Our work still suffers from several limitations. Particularly, we only experimented with Gemma 2 models up to 9B, although the proposed approach could apply to other LLM families. In the future, we are interested in scaling the model size (e.g., to 27B), exploring other LLMs (such as LLaMA), examining more unbalanced setups, and testing the combination of dense and MoE LLMs. As mentioned above, we will also investigate better ways to leverage PrefixLM, knowledge distillation, and UL2. Extending our adapted encoder-decoder LLM to cross/multi-modality modeling (e.g., vision-language and speech-language) would be another intriguing direction." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 280, + 155, + 294 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 280, + 155, + 294 + ], + "spans": [ + { + "bbox": [ + 53, + 280, + 155, + 294 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 300, + 291, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 300, + 291, + 373 + ], + "spans": [ + { + "bbox": [ + 52, + 300, + 291, + 373 + ], + "type": "text", + "content": "We'd like to thank Enrique Alfonseca, Tris Warkentin, Xiaodan Song, Sugato Basu, Inderjit Dhillon, Alexander Grushetsky, Pandu Nayak, Ramakrishnan Srikant, and Slav Petrov for their constructive feedback on the manuscript. We are grateful to Srinivasan Venkatachary for supporting this project." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 388, + 112, + 400 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 388, + 112, + 400 + ], + "spans": [ + { + "bbox": [ + 53, + 388, + 112, + 400 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 407, + 291, + 716 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 53, + 407, + 291, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 407, + 291, + 467 + ], + "spans": [ + { + "bbox": [ + 53, + 407, + 291, + 467 + ], + "type": "text", + "content": "Abdin, M., Aneja, J., Awadalla, H., Awadallah, A., Awan, A. A., Bach, N., Bahree, A., Bakhtiari, A., Bao, J., Behl, H., et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 475, + 291, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 475, + 291, + 523 + ], + "spans": [ + { + "bbox": [ + 53, + 475, + 291, + 523 + ], + "type": "text", + "content": "Austin, J., Odena, A., Nye, M., Bosma, M., Michalewski, H., Dohan, D., Jiang, E., Cai, C., Terry, M., Le, Q., et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 532, + 291, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 532, + 291, + 581 + ], + "spans": [ + { + "bbox": [ + 53, + 532, + 291, + 581 + ], + "type": "text", + "content": "Bisk, Y., Zellers, R., Gao, J., Choi, Y., et al. Piqa: Reasoning about physical commonsense in natural language. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pp. 7432-7439, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 589, + 291, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 589, + 291, + 649 + ], + "spans": [ + { + "bbox": [ + 53, + 589, + 291, + 649 + ], + "type": "text", + "content": "Botev, A., De, S., Smith, S. L., Fernando, A., Muraru, G.-C., Haroun, R., Berrada, L., Pascanu, R., Sessa, P. G., Dadashi, R., et al. Recurrentgemma: Moving past transformers for efficient open language models. arXiv preprint arXiv:2404.07839, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 657, + 291, + 716 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 657, + 291, + 716 + ], + "spans": [ + { + "bbox": [ + 53, + 657, + 291, + 716 + ], + "type": "text", + "content": "Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems, 33: 1877-1901, 2020." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 305, + 67, + 543, + 717 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 305, + 67, + 543, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 67, + 543, + 115 + ], + "spans": [ + { + "bbox": [ + 305, + 67, + 543, + 115 + ], + "type": "text", + "content": "Chen, M., Tworek, J., Jun, H., Yuan, Q., Pinto, H. P. D. O., Kaplan, J., Edwards, H., Burda, Y., Joseph, N., Brockman, G., et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 122, + 543, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 122, + 543, + 159 + ], + "spans": [ + { + "bbox": [ + 305, + 122, + 543, + 159 + ], + "type": "text", + "content": "Chen, Y.-C., Gan, Z., Cheng, Y., Liu, J., and Liu, J. Distilling knowledge learned in bert for text generation. arXiv preprint arXiv:1911.03829, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 167, + 542, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 167, + 542, + 204 + ], + "spans": [ + { + "bbox": [ + 305, + 167, + 542, + 204 + ], + "type": "text", + "content": "Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. Boolq: Exploring the surprising difficulty of natural yes/no questions. In NAACL, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 211, + 542, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 211, + 542, + 258 + ], + "spans": [ + { + "bbox": [ + 305, + 211, + 542, + 258 + ], + "type": "text", + "content": "Clark, P., Cowhey, I., Etzioni, O., Khot, T., Sabharwal, A., Schoenick, C., and Tafjord, O. Think you have solved question answering? try arc, the ai2 reasoning challenge. arXiv:1803.05457v1, 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 266, + 543, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 266, + 543, + 373 + ], + "spans": [ + { + "bbox": [ + 305, + 266, + 543, + 373 + ], + "type": "text", + "content": "Clinchant, S., Jung, K. W., and Nikoulina, V. On the use of BERT for neural machine translation. In Birch, A., Finch, A., Hayashi, H., Konstas, I., Luong, T., Neubig, G., Oda, Y., and Sudoh, K. (eds.), Proceedings of the 3rd Workshop on Neural Generation and Translation, pp. 108-117, Hong Kong, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-5611. URL https://aclanthology.org/D19-5611/." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 382, + 543, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 382, + 543, + 441 + ], + "spans": [ + { + "bbox": [ + 305, + 382, + 543, + 441 + ], + "type": "text", + "content": "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 450, + 543, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 450, + 543, + 521 + ], + "spans": [ + { + "bbox": [ + 305, + 450, + 543, + 521 + ], + "type": "text", + "content": "Corallo, G. and Papotti, P. FINCH: Prompt-guided key-value cache compression for large language models. Transactions of the Association for Computational Linguistics, 12:1517-1532, 2024. doi: 10.1162/tacl_a_00716. URL https://aclanthology.org/2024.tacl-1.83/." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 530, + 543, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 530, + 543, + 578 + ], + "spans": [ + { + "bbox": [ + 305, + 530, + 543, + 578 + ], + "type": "text", + "content": "Dettmers, T. and Zettlemoyer, L. The case for 4-bit precision: k-bit inference scaling laws. In International Conference on Machine Learning, pp. 7750-7774. PMLR, 2023." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 586, + 543, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 586, + 543, + 717 + ], + "spans": [ + { + "bbox": [ + 305, + 586, + 543, + 717 + ], + "type": "text", + "content": "Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. BERT: Pre-training of deep bidirectional transformers for language understanding. In Burstein, J., Doran, C., and Solorio, T. (eds.), Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1423. URL https://aclanthology.org/N19-1423." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "spans": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 291, + 718 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 67, + 291, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 67, + 291, + 116 + ], + "spans": [ + { + "bbox": [ + 53, + 67, + 291, + 116 + ], + "type": "text", + "content": "Dua, D., Wang, Y., Dasigi, P., Stanovsky, G., Singh, S., and Gardner, M. Drop: A reading comprehension benchmark requiring discrete reasoning over paragraphs. arXiv preprint arXiv:1903.00161, 2019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 122, + 291, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 122, + 291, + 171 + ], + "spans": [ + { + "bbox": [ + 53, + 122, + 291, + 171 + ], + "type": "text", + "content": "Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 178, + 291, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 178, + 291, + 239 + ], + "spans": [ + { + "bbox": [ + 53, + 178, + 291, + 239 + ], + "type": "text", + "content": "Gemini, T., Reid, M., Savinov, N., Teplyashin, D., Lepikhin, D., Lillicrap, T., Alayrac, J.-b., Soricut, R., Lazaridou, A., First, O., Schrittwieser, J., et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 245, + 291, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 245, + 291, + 282 + ], + "spans": [ + { + "bbox": [ + 53, + 245, + 291, + 282 + ], + "type": "text", + "content": "Gu, A. and Dao, T. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 289, + 291, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 289, + 291, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 289, + 291, + 350 + ], + "type": "text", + "content": "Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. Measuring massive multi-task language understanding. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=d7KBjmI3GmQ." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 356, + 291, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 356, + 291, + 380 + ], + "spans": [ + { + "bbox": [ + 53, + 356, + 291, + 380 + ], + "type": "text", + "content": "Hinton, G., Vinyals, O., and Dean, J. Distilling the knowledge in a neural network, 2015." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 388, + 291, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 388, + 291, + 437 + ], + "spans": [ + { + "bbox": [ + 53, + 388, + 291, + 437 + ], + "type": "text", + "content": "Jiang, A. Q., Sablayrolles, A., Roux, A., Mensch, A., Savary, B., Bamford, C., Chaplot, D. S., Casas, D. d. l., Hanna, E. B., Bressand, F., et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 443, + 291, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 443, + 291, + 491 + ], + "spans": [ + { + "bbox": [ + 53, + 443, + 291, + 491 + ], + "type": "text", + "content": "Joshi, M., Choi, E., Weld, D., and Zettlemoyer, L. triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension. arXiv e-prints, art. arXiv:1705.03551, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 498, + 291, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 498, + 291, + 548 + ], + "spans": [ + { + "bbox": [ + 53, + 498, + 291, + 548 + ], + "type": "text", + "content": "Kaneko, M., Mita, M., Kiyono, S., Suzuki, J., and Inui, K. Encoder-decoder models can benefit from pre-trained masked language models in grammatical error correction. arXiv preprint arXiv:2005.00987, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 554, + 291, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 554, + 291, + 603 + ], + "spans": [ + { + "bbox": [ + 53, + 554, + 291, + 603 + ], + "type": "text", + "content": "Kasai, J., Pappas, N., Peng, H., Cross, J., and Smith, N. A. Deep encoder, shallow decoder: Reevaluating non-autoregressive machine translation. arXiv preprint arXiv:2006.10369, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 609, + 291, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 609, + 291, + 718 + ], + "spans": [ + { + "bbox": [ + 53, + 609, + 291, + 718 + ], + "type": "text", + "content": "Kocmi, T., Avramidis, E., Bawden, R., Bojar, O., Dvorkovich, A., Federmann, C., Fishel, M., Freitag, M., Gowda, T., Grundkiewicz, R., Haddow, B., Koehn, P., Marie, B., Monz, C., Morishita, M., Murray, K., Nagata, M., Nakazawa, T., Popel, M., Popovic, M., and Shmatova, M. Findings of the 2023 conference on machine translation (WMT23): LLMs are here but not quite there yet. In Koehn, P., Haddow, B., Kocmi, T., and Monz, C. (eds.), Proceedings of the Eighth Conference on Machine" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 305, + 67, + 543, + 718 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 315, + 67, + 543, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 67, + 543, + 116 + ], + "spans": [ + { + "bbox": [ + 315, + 67, + 543, + 116 + ], + "type": "text", + "content": "Translation, pp. 1-42, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.wmt-1.1. URL https://aclanthology.org/2023.wmt-1.1/." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 125, + 543, + 234 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 125, + 543, + 234 + ], + "spans": [ + { + "bbox": [ + 306, + 125, + 543, + 234 + ], + "type": "text", + "content": "Kwiatkowski, T., Palomaki, J., Redfield, O., Collins, M., Parikh, A., Alberti, C., Epstein, D., Polosukhin, I., Devlin, J., Lee, K., Toutanova, K., Jones, L., Kelley, M., Chang, M.-W., Dai, A. M., Uszkoreit, J., Le, Q., and Petrov, S. Natural questions: A benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:452-466, 2019. doi: 10.1162/tacl_a_00276. URL https://aclanthology.org/Q19-1026/." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 244, + 543, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 244, + 543, + 364 + ], + "spans": [ + { + "bbox": [ + 305, + 244, + 543, + 364 + ], + "type": "text", + "content": "Lewis, M., Liu, Y., Goyal, N., Ghazvininejad, M., Mohamed, A., Levy, O., Stoyanov, V., and Zettlemoyer, L. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7871-7880, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.703. URL https://aclanthology.org/2020.acl-main.703." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 375, + 543, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 375, + 543, + 434 + ], + "spans": [ + { + "bbox": [ + 305, + 375, + 543, + 434 + ], + "type": "text", + "content": "Li, J., Tang, Z., Ding, Y., Wang, P., Guo, P., You, W., Qiao, D., Chen, W., Fu, G., Zhu, Q., et al. Openba: An open-sourced 15b bilingual asymmetric seq2seq model pretrained from scratch. arXiv preprint arXiv:2309.10706, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 445, + 541, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 445, + 541, + 483 + ], + "spans": [ + { + "bbox": [ + 305, + 445, + 541, + 483 + ], + "type": "text", + "content": "Lin, S., Hilton, J., and Evans, O. Truthfulqa: Measuring how models mimic human falsehoods. arXiv preprint arXiv:2109.07958, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 493, + 543, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 493, + 543, + 540 + ], + "spans": [ + { + "bbox": [ + 305, + 493, + 543, + 540 + ], + "type": "text", + "content": "Liu, A., Feng, B., Xue, B., Wang, B., Wu, B., Lu, C., Zhao, C., Deng, C., Zhang, C., Ruan, C., et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 552, + 541, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 552, + 541, + 576 + ], + "spans": [ + { + "bbox": [ + 305, + 552, + 541, + 576 + ], + "type": "text", + "content": "Liu, Y. and Lapata, M. Text summarization with pretrained encoders. arXiv preprint arXiv:1908.08345, 2019." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 586, + 543, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 586, + 543, + 647 + ], + "spans": [ + { + "bbox": [ + 305, + 586, + 543, + 647 + ], + "type": "text", + "content": "Liu, Z., Zhao, C., Iandola, F., Lai, C., Tian, Y., Fedorov, I., Xiong, Y., Chang, E., Shi, Y., Krishnamoorthi, R., et al. Mobilellm: Optimizing sub-billion parameter language models for on-device use cases. arXiv preprint arXiv:2402.14905, 2024b." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 657, + 543, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 657, + 543, + 718 + ], + "spans": [ + { + "bbox": [ + 305, + 657, + 543, + 718 + ], + "type": "text", + "content": "Paperno, D., Kruszewski, G., Lazaridou, A., Pham, Q. N., Bernardi, R., Pezzelle, S., Baroni, M., Boleda, G., and Fernandez, R. The lambada dataset: Word prediction requiring a broad discourse context. arXiv preprint arXiv:1606.06031, 2016." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "spans": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 296, + 717 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 53, + 67, + 291, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 67, + 291, + 115 + ], + "spans": [ + { + "bbox": [ + 53, + 67, + 291, + 115 + ], + "type": "text", + "content": "Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., Zhou, Y., Li, W., and Liu, P. J. Exploring the limits of transfer learning with a unified text-to-text transformer. 21(1), jan 2020. ISSN 1532-4435." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 291, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 291, + 174 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 291, + 174 + ], + "type": "text", + "content": "Rein, D., Hou, B. L., Stickland, A. C., Petty, J., Pang, R. Y., Dirani, J., Michael, J., and Bowman, S. R. Gpqa: A graduate-level google-proof q&a benchmark. arXiv preprint arXiv:2311.12022, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 182, + 291, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 291, + 229 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 291, + 229 + ], + "type": "text", + "content": "Sakaguchi, K., Bras, R. L., Bhagavatula, C., and Choi, Y. Winogrande: An adversarial winograd schema challenge at scale. Communications of the ACM, 64(9):99-106, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 239, + 291, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 239, + 291, + 276 + ], + "spans": [ + { + "bbox": [ + 53, + 239, + 291, + 276 + ], + "type": "text", + "content": "Sap, M., Rashkin, H., Chen, D., LeBras, R., and Choi, Y. Socialiaq: Commonsense reasoning about social interactions. arXiv preprint arXiv:1904.09728, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 285, + 291, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 285, + 291, + 334 + ], + "spans": [ + { + "bbox": [ + 53, + 285, + 291, + 334 + ], + "type": "text", + "content": "Shi, F., Suzgun, M., Freitag, M., Wang, X., Srivats, S., Vosoughi, S., Chung, H. W., Tay, Y., Ruder, S., Zhou, D., et al. Language models are multilingual chain-of-thought reasoners. arXiv preprint arXiv:2210.03057, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 343, + 291, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 343, + 291, + 379 + ], + "spans": [ + { + "bbox": [ + 53, + 343, + 291, + 379 + ], + "type": "text", + "content": "Song, K., Tan, X., Qin, T., Lu, J., and Liu, T.-Y. Mass: Masked sequence to sequence pre-training for language generation, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 388, + 291, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 388, + 291, + 449 + ], + "spans": [ + { + "bbox": [ + 53, + 388, + 291, + 449 + ], + "type": "text", + "content": "Suzgun, M., Scales, N., Schärli, N., Gehrmann, S., Tay, Y., Chung, H. W., Chowdhery, A., Le, Q. V., Chi, E. H., Zhou, D., et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 458, + 291, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 458, + 291, + 518 + ], + "spans": [ + { + "bbox": [ + 53, + 458, + 291, + 518 + ], + "type": "text", + "content": "Tay, Y., Dehghani, M., Tran, V. Q., Garcia, X., Wei, J., Wang, X., Chung, H. W., Bahri, D., Schuster, T., Zheng, S., et al. Ul2: Unifying language learning paradigms. In The Eleventh International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 528, + 291, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 528, + 291, + 588 + ], + "spans": [ + { + "bbox": [ + 53, + 528, + 291, + 588 + ], + "type": "text", + "content": "Team, G., Riviere, M., Pathak, S., Sessa, P. G., Hardin, C., Bhupatiraju, S., Hussenot, L., Mesnard, T., Shahriari, B., Ramé, A., et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 597, + 296, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 597, + 296, + 717 + ], + "spans": [ + { + "bbox": [ + 53, + 597, + 296, + 717 + ], + "type": "text", + "content": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L. u., and Polosukhin, I. Attention is all you need. In Guyon, I., Luxburg, U. V., Bengio, S., Wallach, H., Fergus, R., Vishwanathan, S., and Garnett, R. (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. URL https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 543, + 717 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 306, + 67, + 542, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 67, + 542, + 128 + ], + "spans": [ + { + "bbox": [ + 306, + 67, + 542, + 128 + ], + "type": "text", + "content": "Wang, A., Pruksachatkun, Y., Nangia, N., Singh, A., Michael, J., Hill, F., Levy, O., and Bowman, S. Superglue: A stickier benchmark for general-purpose language understanding systems. Advances in neural information processing systems, 32, 2019a." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 135, + 542, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 135, + 542, + 194 + ], + "spans": [ + { + "bbox": [ + 306, + 135, + 542, + 194 + ], + "type": "text", + "content": "Wang, A., Pruksachatkun, Y., Nangia, N., Singh, A., Michael, J., Hill, F., Levy, O., and Bowman, S. R. SuperGLUE: a stickier benchmark for general-purpose language understanding systems. Curran Associates Inc., Red Hook, NY, USA, 2019b." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 201, + 543, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 201, + 543, + 333 + ], + "spans": [ + { + "bbox": [ + 306, + 201, + 543, + 333 + ], + "type": "text", + "content": "Wang, T., Roberts, A., Hesslow, D., Scao, T. L., Chung, H. W., Beltagy, I., Launay, J., and Raffel, C. What language model architecture and pretraining objective works best for zero-shot generalization? In Chaudhuri, K., Jegelka, S., Song, L., Szepesvari, C., Niu, G., and Sabato, S. (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 22964-22984. PMLR, 17-23 Jul 2022. URL https://proceedings.mlr.press/v162/wang22u.html." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 341, + 542, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 341, + 542, + 399 + ], + "spans": [ + { + "bbox": [ + 306, + 341, + 542, + 399 + ], + "type": "text", + "content": "Wang, Y., Ma, X., Zhang, G., Ni, Y., Chandra, A., Guo, S., Ren, W., Arulraj, A., He, X., Jiang, Z., et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. arXiv preprint arXiv:2406.01574, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 407, + 542, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 407, + 542, + 552 + ], + "spans": [ + { + "bbox": [ + 306, + 407, + 542, + 552 + ], + "type": "text", + "content": "Xue, L., Constant, N., Roberts, A., Kale, M., Al-Rfou, R., Siddhant, A., Barua, A., and Raffel, C. mT5: A massively multilingual pre-trained text-to-text transformer. In Toutanova, K., Rumshisky, A., Zettlemoyer, L., Hakkani-Tur, D., Beltagy, I., Bethard, S., Cotterell, R., Chakraborty, T., and Zhou, Y. (eds.), Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 483-498, Online, June 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.naacl-main.41. URL https://aclanthology.org/2021.naacl-main.41." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 559, + 542, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 559, + 542, + 642 + ], + "spans": [ + { + "bbox": [ + 306, + 559, + 542, + 642 + ], + "type": "text", + "content": "Xue, L., Barua, A., Constant, N., Al-Rfou, R., Narang, S., Kale, M., Roberts, A., and Raffel, C. ByT5: Towards a token-free future with pre-trained byte-to-byte models. Transactions of the Association for Computational Linguistics, 10:291-306, 2022. doi: 10.1162/tacl_a_00461. URL https://aclanthology.org/2022.tacl-1.17." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 650, + 542, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 650, + 542, + 686 + ], + "spans": [ + { + "bbox": [ + 306, + 650, + 542, + 686 + ], + "type": "text", + "content": "Yang, A., Yang, B., Zhang, B., Hui, B., Zheng, B., Yu, B., Li, C., Liu, D., Huang, F., Wei, H., et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 693, + 542, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 693, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 306, + 693, + 542, + 717 + ], + "type": "text", + "content": "Yang, J., Wang, M., Zhou, H., Zhao, C., Zhang, W., Yu, Y., and Li, L. Towards making the most of bert" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "spans": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 292, + 387 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 63, + 67, + 291, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 67, + 291, + 102 + ], + "spans": [ + { + "bbox": [ + 63, + 67, + 291, + 102 + ], + "type": "text", + "content": "in neural machine translation. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pp. 9378-9385, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 111, + 291, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 111, + 291, + 159 + ], + "spans": [ + { + "bbox": [ + 53, + 111, + 291, + 159 + ], + "type": "text", + "content": "Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. Hellaswag: Can a machine really finish your sentence? In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 167, + 292, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 167, + 292, + 287 + ], + "spans": [ + { + "bbox": [ + 53, + 167, + 292, + 287 + ], + "type": "text", + "content": "Zhang, B., Ghorbani, B., Bapna, A., Cheng, Y., Garcia, X., Shen, J., and First, O. Examining scaling and transfer of language model architectures for machine translation. In Chaudhuri, K., Jegelka, S., Song, L., Szepesvari, C., Niu, G., and Sabato, S. (eds.), Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pp. 26176-26192. PMLR, 17-23 Jul 2022. URL https://proceedings.mlrpress/v162/zhang22h.html." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 294, + 291, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 294, + 291, + 342 + ], + "spans": [ + { + "bbox": [ + 53, + 294, + 291, + 342 + ], + "type": "text", + "content": "Zhong, W., Cui, R., Guo, Y., Liang, Y., Lu, S., Wang, Y., Saied, A., Chen, W., and Duan, N. Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 350, + 291, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 350, + 291, + 387 + ], + "spans": [ + { + "bbox": [ + 53, + 350, + 291, + 387 + ], + "type": "text", + "content": "Zhu, J., Xia, Y., Wu, L., He, D., Qin, T., Zhou, W., Li, H., and Liu, T.-Y. Incorporating bert into neural machine translation. arXiv preprint arXiv:2002.06823, 2020." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "spans": [ + { + "bbox": [ + 129, + 45, + 465, + 57 + ], + "type": "text", + "content": "Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06256/805981bf-d643-4b2e-955e-6bcd5ca89984_content_list.json b/data/2025/2504_06xxx/2504.06256/805981bf-d643-4b2e-955e-6bcd5ca89984_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..6873159137eac1d31d57915e8563906ae7d6fd63 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/805981bf-d643-4b2e-955e-6bcd5ca89984_content_list.json @@ -0,0 +1,2223 @@ +[ + { + "type": "text", + "text": "Transfer between Modalities with MetaQueries", + "text_level": 1, + "bbox": [ + 138, + 99, + 808, + 125 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xichen Pan $^{1,2}$ , Satya Narayan Shukla $^{1,\\dagger}$ , Aashu Singh $^{1}$ , Zhuokai Zhao $^{1}$ , Shlok Kumar Mishra $^{1}$ , Jialiang Wang $^{1}$ , Zhiyang Xu $^{1}$ , Jiuhai Chen $^{1}$ , Kunpeng Li $^{1}$ , Felix Juefei-Xu $^{1}$ , Ji Hou $^{1,\\dagger}$ , Saining Xie $^{2,\\dagger}$", + "bbox": [ + 135, + 128, + 841, + 161 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ Meta, $^{2}$ New York University", + "$\\dagger$ Equal advising" + ], + "bbox": [ + 138, + 166, + 352, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Unified multimodal models aim to integrate understanding (text output) and generation (pixel output), but aligning these different modalities within a single architecture often demands complex training recipes and careful data balancing. We introduce MetaQueries, a set of learnable queries that act as an efficient interface between autoregressive multimodal LLMs (MLLMs) and diffusion models. MetaQueries connects the MLLM's latents to the diffusion decoder, enabling knowledge-augmented image generation by leveraging the MLLM's deep understanding and reasoning capabilities. Our method simplifies training, requiring only paired image-caption data and standard diffusion objectives. Notably, this transfer is effective even when the MLLM backbone remains frozen, thereby preserving its state-of-the-art multimodal understanding capabilities while achieving strong generative performance. Additionally, our method is flexible and can be easily instruction-tuned for advanced applications such as image editing and subject-driven generation.", + "bbox": [ + 135, + 215, + 859, + 382 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Date: April 9, 2025", + "bbox": [ + 138, + 401, + 271, + 415 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Correspondence: satyanshukla@meta.com, jihou@meta.com, saining.xie@nyu.edu", + "bbox": [ + 138, + 417, + 676, + 430 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project Page: https://xichenpan.com/metaquery", + "bbox": [ + 138, + 431, + 465, + 446 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Meta", + "bbox": [ + 784, + 431, + 859, + 446 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 109, + 489, + 271, + 507 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The quest for unified multimodal models capable of both deep understanding (typically resulting in textual outputs) and rich generation (resulting in pixel outputs) holds immense promise. Such systems could unlock synergistic capabilities (OpenAI, 2025; Google, 2025), where understanding informs generation and vice versa. However, effectively connecting these different output modalities poses considerable challenges—e.g. how do we effectively transfer the latent world knowledge from the autoregressive multimodal LLM to the image generator? Although significant progress has been made, most published approaches (Ge et al., 2024; Sun et al., 2024b; Tong et al., 2024; Jin et al., 2024; Liu et al., 2024a; Team, 2024a; Xie et al., 2024; Wang et al., 2024; Wu et al., 2025a; Chen et al., 2025; Dong et al., 2024; Zhou et al., 2025; Shi et al., 2024) rely on carefully tuning base multimodal LLMs (MLLMs) to handle both understanding and generation tasks. This involves complex architectural design, data/loss balancing, multiple training stages, and other complex training recipes—without these, optimizing one capability could compromise the other.", + "bbox": [ + 107, + 522, + 887, + 690 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we aim to deliver the promise of unified models via a simpler philosophy: Render unto diffusion what is generative, and unto LLMs what is understanding. In other words, instead of building a monolithic system from scratch, we focus on effectively transferring capabilities between state-of-the-art, pre-trained models specialized for different output modalities. To operationalize this, we keep MLLMs frozen so they can focus on what they do best—understanding—while entrusting image generation to diffusion models. We then demonstrate that even under this frozen condition, the MLLM's inherent world knowledge, strong reasoning, and in-context learning capabilities can indeed be transferred to image generation, provided the right architectural bridge is in place.", + "bbox": [ + 107, + 696, + 887, + 819 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, leveraging an MLLM—especially a frozen one—for both multimodal understanding and generation is far from straightforward. Although (frozen) LLMs have shown good performance as conditional text encoders in text-to-image generation (Zhuo et al., 2024; Xie et al., 2025; Ma et al., 2024), they are not compatible with many desired tasks in unified modeling, such as in-context learning or producing multimodal, interleaved output. The architectural bridge we design in this work is MetaQuery (Figure 1). MetaQuery feeds a set of", + "bbox": [ + 107, + 825, + 888, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06256v1 [cs.CV] 8 Apr 2025", + "bbox": [ + 22, + 268, + 60, + 700 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 936, + 503, + 948 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/70fdf417d4fb39bf516fdf05899650efd71ec63b281c5cfcbbf45619d8d2b2b4.jpg", + "image_caption": [ + "Figure 1 Overview of our model. Blue tokens maintain SOTA multimodal understanding; MetaQueries are learnable queries that directly applied to frozen MLLMs to query out conditions for generation. The model is tuned using only denoising objective with paired data. The generative diffusion models can be either frozen or further instruction-tuned for advanced generation tasks." + ], + "image_footnote": [], + "bbox": [ + 151, + 78, + 846, + 280 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "learnable queries directly into a frozen MLLM to extract multimodal conditions for multimodal generation. Our experiments reveal that, even without fine-tuning or enabling bi-directional attention, the frozen LLM serves as a powerful feature resampler (Alayrac et al., 2022), producing high-quality conditions for multimodal generation. Training unified models with MetaQueries requires only a modest amount of paired image-caption data to connect these prompted conditions to any conditional diffusion model. Because the entire MLLM stays intact for understanding, the training objective remains the original denoising objective—just as efficient and stable as fine-tuning a diffusion model.", + "bbox": [ + 109, + 377, + 887, + 484 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "More specifically, previous unified models aim to train a single autoregressive transformer backbone to jointly model $p(\\text{text}, \\text{pixels})$ . In contrast, we choose to use a token $\\rightarrow$ [transformer] $\\rightarrow$ [diffusion] $\\rightarrow$ pixels paradigm, which might share a high-level philosophy with the concurrent GPT-4o image generation system, as hinted at by OpenAI (2025). This approach composes the MLLM's autoregressive prior with a powerful diffusion decoder, directly leveraging the frozen MLLM's strong capability in modeling compressed semantic representations, thus avoiding the more challenging task of directly generating pixels.", + "bbox": [ + 109, + 491, + 887, + 583 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To validate our approach, we conduct a series of controlled experiments, showing that MetaQuery1 outperforms the use of a frozen MLLM purely as a conditional text encoder for image generation. Moreover, MetaQuery can match the performance of fully tuning the MLLM backbone, yet it is significantly more efficient. We also systematically investigate the training strategy, including the number of tokens and architectural configurations. With just 25M publicly available image-caption pairs, we are able to train a family of unified models that not only preserves state-of-the-art (SOTA) performance in image understanding, but also achieves SOTA-level results in text-to-image generation across multiple benchmarks.", + "bbox": [ + 109, + 589, + 887, + 696 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The promise of unified modeling goes beyond handling multimodal understanding and text-to-image generation in parallel. A deeper synergy is expected—one that taps into advanced MLLM abilities like reasoning, internal knowledge, multimodal perception, and in-context learning to enhance generation. Our results show that our method draws on the frozen MLLM's commonsense knowledge, achieving SOTA visual-commonsense generation on the CommonsenseT2I benchmark (Fu et al., 2024). Our approach also harnesses the built-in reasoning and in-context learning capabilities of frozen MLLMs, producing images from complex prompts—such as generating the United States flag in response to \"The national flag of the country where Yellowstone National Park is located.\" (See Figure 9 for examples.) We also benchmark this type of world knowledge reasoning capability on WISE (Niu et al., 2025) and demonstrate SOTA performance.", + "bbox": [ + 109, + 702, + 887, + 840 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Finally, by connecting, preserving, and enhancing multimodal input with MetaQueries and a frozen MLLM backbone, our model can be further instruction-tuned for advanced generation tasks such as image editing and subject-driven generation. We show that this can be achieved both efficiently and effectively using a scalable", + "bbox": [ + 109, + 845, + 887, + 892 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1For simplicity, we also use MetaQuery to represent our method.", + "bbox": [ + 129, + 898, + 521, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 936, + 504, + 948 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "data curation pipeline that directly leverages naturally occurring image pairs from web corpora, instead of depending on human-created pairs or synthetically generated data (Brooks et al., 2023; Hu et al., 2024a; Xiao et al., 2025). This natural supervision surprisingly unlocks several new capabilities beyond subject-driven generation, such as visual association and logo design (see Figure 8 for examples).", + "bbox": [ + 109, + 80, + 887, + 142 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In summary, we explore a simple yet underexplored alternative to unified multimodal modeling. Our method, MetaQuery, bridges frozen MLLM backbones and diffusion models. Experiments show that this framework delivers all the capabilities once thought to require MLLM fine-tuning while being much easier to train. The main results and findings in this paper include:", + "bbox": [ + 109, + 148, + 887, + 209 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- With MetaQuery and frozen MLLM backbones, we maintain SOTA multimodal understanding performance while enabling SOTA-level multimodal generation.", + "MetaQuery can transfer the capabilities of MLLMs for reasoning- and knowledge-augmented image generation.", + "MetaQuery can extract highly detailed visual conditions beyond semantic similarity from frozen MLLMs, enabling image reconstruction and editing tasks.", + "- Our method can be easily instruction-tuned even with a frozen MLLM backbone, enabling advanced multimodal generation tasks like subject-driven generation." + ], + "bbox": [ + 133, + 215, + 883, + 358 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 109, + 383, + 284, + 401 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Unified understanding and generation models. Next-token prediction has proven to be an effective approach for training models to understand language (Devlin, 2019; Brown et al., 2020) and multimodal content (Liu et al., 2024b). Recently, the community has witnessed numerous efforts to extend the success of multimodal understanding (Liu et al., 2024b) to multimodal generation by training LLM backbones to generate images at the same time. However, unlike adapting text-only LLMs (Touvron et al., 2023) to understand multimodal content with one single next text token prediction objective (Liu et al., 2024b), generating multimodal content requires a different set of training objectives. SEED-X (Ge et al., 2024), Emu (Sun et al., 2024b), and MetaMorph (Tong et al., 2024) learn to regress image features; LaVIT (Jin et al., 2024), LWM (Liu et al., 2024a), Chameleon (Team, 2024a), Show-o (Xie et al., 2024), EMU3 (Wang et al., 2024), and Janus (Wu et al., 2025a; Chen et al., 2025) auto-regressively predict next visual tokens; and DreamLLM (Dong et al., 2024), Transfusion (Zhou et al., 2025) employ diffusion objectives. However, these approaches necessitate tuning LLMs for generating both modalities, naturally posing challenges in multi-task balancing.", + "bbox": [ + 109, + 415, + 888, + 598 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Unified models with frozen LLMs. Several studies have explored the use of frozen LLMs for multimodal understanding and generation. For instance, LMFusion (Shi et al., 2024) trains image generation expert feed-forward networks (FFNs) and query-key-value (QKV) modules in parallel with a frozen LLM backbone to deeply fuse input conditions and denoise visual outputs. However, this approach offers limited flexibility as it shares the same architecture as specific LLM backbones and requires training a separate set of generative modules for every single LLM backbone. This not only imposes more computational burden but also restricts the ability to leverage powerful pre-trained generative models. An earlier work, GILL (Koh et al., 2023), investigates feeding learnable tokens into frozen MLLMs. It employs a combined contrastive loss and regression loss for image retrieval and generation, rather than directly employing the denoising objective for more efficient training. Its application is restricted to contextual image generation and it does not systematically explore the impact of frozen MLLMs and learnable queries.", + "bbox": [ + 109, + 614, + 887, + 782 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 MetaQuery", + "text_level": 1, + "bbox": [ + 109, + 804, + 261, + 824 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this work, we propose MetaQuery, which losslessly augments understanding-only MLLMs with multimodal generation capabilities while preserving their original architecture designs and parameters intact. We carefully analyze the impact of applying MetaQuery on image generation performance. Results show that a frozen MLLM can provide strong conditions for multimodal generation.", + "bbox": [ + 109, + 835, + 885, + 897 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 936, + 504, + 948 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/ce8bf82f52247f065d1f56aaad4e4693afc7e3ffbf3a357566646853e081a9b8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Methods# of TokensMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
LLM last layer embedding*-7.490.5578.41
Random queries648.590.3554.81
Learnable queries647.430.5675.35
Learnable queries5127.340.5678.43
", + "bbox": [ + 197, + 78, + 802, + 157 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/f604c38dcdbffc2511f059b98a51508f48de17764a2bbbc28a2ac4936d985f9d.jpg", + "table_caption": [ + "Table 1 Study on different conditions for image generation. * denotes the embeddings of input tokens. Learnable queries achieve comparable performance to using all hidden states and can even surpass them with more tokens." + ], + "table_footnote": [], + "table_body": "
MethodsTrain LLMTrain DiTMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
MLLM tuningX7.750.5878.97
E2E tuning6.280.6179.39
Frozen MLLMXX7.430.5675.35
Frozen MLLMX6.060.6176.66
", + "bbox": [ + 205, + 208, + 792, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 2 Study on strategies for adapting MLLMs. The methods without training LLM do not suffer from multimodal understanding degradation. Frozen MLLM achieves comparable performance to full MLLM tuning, with slightly lower prompt alignment but slightly improved visual quality.", + "bbox": [ + 109, + 297, + 885, + 340 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Architecture", + "text_level": 1, + "bbox": [ + 109, + 364, + 263, + 380 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MetaQuery bridges frozen MLLMs with diffusion models. We use randomly initialized learnable queries $\\mathcal{Q} \\in \\mathbb{R}^{N \\times D}$ to query out the conditions $\\mathcal{C}$ for generation. $N$ is the number of queries and $D$ is the dimension of the queries, which is the same as the MLLM hidden dimension. For simplicity and compatibility, we continue to use causal masking for the entire sequence rather than specifically enabling full attention for $\\mathcal{Q}$ . The conditions $\\mathcal{C}$ are then fed into a trainable connector to align with the input space of text-to-image diffusion models. These models can be arbitrary as long as they have a conditional input interface; we simply replace its original condition with our $\\mathcal{C}$ . The whole model is trained with the original generation objective on paired data. In this paper, we focus on image generation tasks, but the model can be easily extended to other modalities like audio, video, 3D, and more.", + "bbox": [ + 109, + 388, + 885, + 526 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Design Choices", + "text_level": 1, + "bbox": [ + 109, + 544, + 284, + 559 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proposed architecture involves two design choices: using learnable queries and keeping the MLLM backbone frozen. We explain the reasons why we adopted these choices and how they impact performance. For all experiments, unless otherwise specified, we use the same frozen LLaVA-OneVision-0.5B (Li et al., 2024a) MLLM backbone, frozen Sana-0.6B (Xie et al., 2025) diffusion model in 512 resolution, learnable queries with $N = 64$ tokens, and a connector with a 24-layer transformer encoder. All models are trained on 25M publicly available image caption pairs for 4 epochs. We report FID score (Heusel et al., 2017) on MJHQ-30K (Li et al., 2024b) for visual aesthetic quality, and GenEval (Ghosh et al., 2023) and DPG-Bench (Hu et al., 2024b) (both without prompt rewriting) for prompt alignment, respectively.", + "bbox": [ + 109, + 569, + 887, + 691 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Learnable queries. Many models like Lumina-Next (Zhuo et al., 2024), Sana (Xie et al., 2025), and Kosmos-G (Pan et al., 2024) use the (M)LLM's last layer embedding of input tokens as image generation conditions. However, this approach is not ideal for unified models as it is not compatible with many desired tasks in unified modeling, such as in-context learning or producing multimodal, interleaved output (we provide more discussion and comparison with MetaQuery in Section 5.6). As shown in Table 1, using learnable queries with just $N = 64$ tokens achieves image generation quality comparable to that of utilizing the last layer embedding of input tokens. While random queries produce acceptable FID scores, they struggle with prompt alignment, highlighting the importance of learnable queries. Additionally, since the last layer embedding setting naturally comes with a longer sequence length, we also tested learnable queries with $N = 512$ tokens, which further improves performance and even outperforms the last layer embedding approach.", + "bbox": [ + 109, + 707, + 885, + 859 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Frozen MLLM. Existing unified models train MLLMs to jointly model $p(\\text{text}, \\text{pixels})$ , resulting in a more complicated training process and even downgraded understanding performance. MetaQuery keeps the original", + "bbox": [ + 109, + 875, + 883, + 906 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 936, + 504, + 948 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/26e5def00142de348936cfa334f6dcbef2aa8e4cd65e4ff90e11e7056c47e06b.jpg", + "image_caption": [ + "(a) Text-to-image results." + ], + "image_footnote": [], + "bbox": [ + 114, + 84, + 495, + 303 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6cbcca96c2745b0fc235a258d5debaaacb8baf3df31684a63e481d46e4678b1c.jpg", + "image_caption": [ + "(b) Image reconstruction results." + ], + "image_footnote": [], + "bbox": [ + 504, + 78, + 883, + 305 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/991b1ac03b36f5361d4e58d6868c9741411c1cc7e9963640e355b72a09945466.jpg", + "image_caption": [ + "Figure 2 Study on the scaling of token numbers. As the number of tokens increases, text-to-image prompt alignment and image reconstruction results consistently improve.", + "Figure 3 Visaul samples for image reconstruction with different numbers of tokens." + ], + "image_footnote": [], + "bbox": [ + 112, + 380, + 885, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "MLLM architecture and parameters interact to preserve SOTA understanding capabilities. However, for multimodal generation, a key concern is whether MetaQuery's performance with significantly fewer tunable parameters would be substantially worse than methods with full MLLM tuning. As shown in Table 2, frozen MLLMs achieve comparable performance to full MLLM tuning, with slightly lower prompt alignment but slightly improved visual quality. Tuning DiT can further improve performance for both settings. This suggests that MetaQuery is another possible training strategy, one that is simpler but also effective, as an alternative to fine-tuning the entire MLLM.", + "bbox": [ + 109, + 619, + 883, + 726 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Training Recipe", + "text_level": 1, + "bbox": [ + 109, + 744, + 285, + 762 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Based on insights from our design choices, we further study key training options for the two main components of MetaQuery: learnable queries and connectors. This study examines the number of tokens and connector design. Unless otherwise specified, all experiments in this section use the same setup as described in Section 3.2.", + "bbox": [ + 109, + 768, + 883, + 815 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Number of tokens. Many works (Wu et al., 2023; Pan et al., 2024; Ge et al., 2024) have employed learnable queries for condition extraction. However, they either set the number of tokens to match the fixed input sequence length of the image decoder (e.g., $N = 77$ for the CLIP (Radford et al., 2021) text encoder in Stable Diffusion v1.5 (Rombach et al., 2021)), or use an arbitrary fixed number like $N = 64$ without further investigation. Given that modern diffusion models like Lumina-Next (Zhuo et al., 2024) and Sana (Xie", + "bbox": [ + 109, + 832, + 883, + 907 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/7aae1bbc607ab3c1b39cc159c1512802e81ed3d35ad05074e80e97c48e4e8080.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Architecture# of LayersDims# of ParamsRel. Wall TimeMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
Proj-Enc62304517M1.06x7.800.5373.37
Proj-Enc2423042046M1.23x7.410.5173.75
Enc-Proj689684M1x7.730.4971.39
Enc-Proj24896316M1.06x7.430.5675.35
", + "bbox": [ + 122, + 78, + 874, + 157 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3 Study on connector design. Aligning the conditions first in the same dimension as the MLLM hidden states (Enc-Proj) is more effective and parameter-efficient.", + "bbox": [ + 109, + 166, + 883, + 195 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "et al., 2025) naturally accept variable-length conditions, determining the optimal number of tokens for learnable queries is crucial. In Figure 2, we provide a careful study of the number of tokens and observe promising scalability of MetaQueries. For text-to-image generation, visual quality begins to converge after 64 tokens, while more tokens consistently yield better prompt alignment. This is more evident for long captions, as GenEval with rewritten prompts increases more rapidly as the number of tokens increases. For image reconstruction, we observe that more tokens consistently improve the quality of reconstructed images (visual samples can be found in Figure 3). In our later experiments, we set the number of tokens to $N = 256$ for all models, as it achieves a good balance between performance and efficiency.", + "bbox": [ + 109, + 220, + 887, + 343 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Connector design. The connector is another important component in MetaQuery. We use the same architecture as the Qwen2.5 (Team, 2024b) LLM, but enable bi-directional attention for the connector. We study two different designs: Projection Before Encoder (Proj-Enc) and Projection After Encoder (Enc-Proj). Proj-Enc first projects the conditions into the input dimension of the diffusion decoder, then uses a transformer encoder to align the conditions. On the other hand, Enc-Proj first uses a transformer encoder to align the conditions in the same dimension as the MLLM hidden states, then projects the conditions into the input dimension of the diffusion decoder. As shown in Table 3, the Enc-Proj design achieves better performance than the Proj-Enc design while having fewer parameters.", + "bbox": [ + 109, + 359, + 887, + 481 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Model Training", + "text_level": 1, + "bbox": [ + 109, + 503, + 295, + 523 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We train MetaQuery in two stages: the pre-training stage and the instruction tuning stage. Both training stages keep MLLMs frozen and fine-tune learnable queries, connectors, and diffusion models. We use three different MLLM backbones for different sizes: Base (LLaVA-OneVision 0.5B (Li et al., 2024a)), Large (Qwen2.5-VL 3B (Bai et al., 2025)), and X-Large (Qwen2.5-VL 7B (Bai et al., 2025)). We set the number of tokens to $N = 256$ for all models, and utilize a 24-layer connector with Enc-Proj architecture. For image generation heads, we tested two different diffusion models: Stable Diffusion v1.5 (Rombach et al., 2021) and Sana-1.6B (Xie et al., 2025).", + "bbox": [ + 109, + 536, + 375, + 839 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/71a1d9a265fd63ade28febee74e11c217a12bde87dcfcf180be066256678952a.jpg", + "image_caption": [ + "Figure 4 Overview of instruction tuning data curation pipeline. We group images from web corpora based on caption similarity using the SigLIP (Zhai et al., 2023) model, then construct instruction-tuning data from these image pairs using an MLLM." + ], + "image_footnote": [], + "bbox": [ + 398, + 532, + 880, + 777 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Pre-training. We pre-train our model on 25M publicly available image-caption pairs for 8 epochs with a learning rate of 1e-4 and a global batch size of 4096. The learning rate follows a cosine decay schedule with a 4,000-step warmup period before gradually decreasing to 1e-5.", + "bbox": [ + 109, + 854, + 885, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/47fde3a154dfb5fd97836334bedd3adfe43814936d49863da6f7b62e487a3a46.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsBase (M)LLMMME-PMMBSEEDMMMUMM-VetCOCO FID ↓MJHQ FID ↓GenEval ↑DPG-Bench ↑
EmuLLaMA 13B-----11.66---
DreamLLMVicuna 7B----36.68.46---
ChameleonFrom Scratch 7B---22.48.326.74-0.39-
Show-o-512Phi-1.5 1.3B1097.2--26.7-9.2415.180.68-
VILA-ULLaMA-2 7B1401.8-59.0-33.5-7.69--
Emu3From Scratch 7B-58.568.231.637.212.80-0.66†80.60
MetaMorphLLaMA-3 8B-75.271.8--11.8---
TokenFlow-XLQwen-2.5 14B1551.176.872.643.248.2--0.63†73.38
TransfusionFrom Scratch 7B-----8.70-0.63-
LMFusionLLaVA-Next 8B1603.772.172.541.7-8.20---
JanusDeepSeek-LLM 1.5B1338.069.463.730.534.38.5310.100.61-
JanusFlowDeepSeek-LLM 1.5B1333.174.970.529.330.9-9.510.6380.09
Janus-Pro-1BDeepSeek-LLM 1.5B1444.075.568.336.339.8-14.33‡0.7382.63
Janus-Pro-7BDeepSeek-LLM 7B1567.179.272.141.050.0-13.48‡0.8084.19
MetaQuery-BLLaVA-ov 0.5B1238.058.566.631.429.18.916.280.74†80.04
MetaQuery-LQwen2.5-VL 3B1574.378.673.853.163.28.876.350.78†81.10
MetaQuery-XLQwen2.5-VL 7B1685.283.576.958.666.68.696.020.80†82.05
", + "bbox": [ + 125, + 78, + 872, + 310 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4 Quantitative results on multimodal understanding and generation benchmarks. We report the COCO FID with Stable Diffusion v1.5 (Rombach et al., 2021), and other metrics with Sana (Xie et al., 2025). † denotes rewritten prompts. ‡ denotes results tested by us under the same settings.", + "bbox": [ + 109, + 321, + 883, + 364 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Instruction tuning. Furthermore, in this work, we rethink the data curation process for instruction tuning in image generation. All current methods rely on expert models to generate target images from source images and instructions (Ge et al., 2024; Xiao et al., 2025; Hu et al., 2024a). However, this approach is limited in scalability and may introduce biases, as the available expert models cover only a narrow range of image transformations. Inspired by MagicLens (Zhang et al., 2024), we construct instruction-tuning data using naturally occurring image pairs in web corpora. These corpora contain rich multimodal contexts with interleaved text and images on related subjects or topics. These image pairs often exhibit meaningful associations and specific relationships spanning a broad spectrum, from direct visual similarities to more subtle semantic connections (as shown in Figure 4). Such naturally occurring image pairs provide excellent and diverse supervision signals for instruction tuning. Based on this observation, we developed a data construction pipeline that mines image pairs and leverages MLLMs to generate open-ended instructions that capture their inter-image relationships. First, we collect grouped images from mmc4 (Zhu et al., 2023) core fewer-faces subset, where each image is accompanied by a caption. Using SigLIP (Zhai et al., 2023), we cluster images with similar captions (allowing up to 6 images per group, with a similarity threshold of 0.5). In each group, the image with minimum average similarity to the others is designated as the target, while the remaining images serve as source images. This process yields a total of 2.4M image pairs. Finally, we employ Qwen2.5-VL 3B (Bai et al., 2025) to generate instructions for each pair, describing how to transform the source images into the target image (See Appendix A for the detailed MLLM prompt). We experimented with instruction-tuning our Base size model on the proposed 2.4M dataset for 3 epochs, using the same learning rate schedule as in pre-training and a batch size of 2048.", + "bbox": [ + 109, + 388, + 883, + 691 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5 Experiments", + "text_level": 1, + "bbox": [ + 109, + 715, + 276, + 734 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we first evaluate MetaQuery on various multimodal understanding and text-to-image generation benchmarks (Section 5.1). We demonstrate that MetaQuery can be trained to reconstruct input images (Section 5.2). This image reconstruction capability can be easily transferred to perform image editing (Section 5.3). Furthermore, we show that MetaQuery can be instruction-tuned to perform zero-shot subject-driven generation (Section 5.4). By leveraging our approach for collecting instruction tuning data from naturally existing image pairs, we also reveal that MetaQuery can unlock novel capabilities like visual association and logo design (also in Section 5.4). Additionally, we demonstrate that MetaQuery can benefit from the internal knowledge and reasoning capabilities of the frozen MLLM, overcoming common failures exhibited by other generation models (Section 5.5). Finally, we discuss the impact of different MLLM backbones and compare MetaQuery's behavior with the baseline that uses MLLM last layer embeddings (Section 5.6).", + "bbox": [ + 107, + 747, + 883, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 936, + 504, + 948 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b9c3a23589c96c41551027759ee3efb1784846504d3b7874cd009fd5d0fedae2.jpg", + "image_caption": [ + "A hot air balloon in the shape of a heart. Grand Canyon" + ], + "image_footnote": [], + "bbox": [ + 116, + 79, + 305, + 227 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d97f1701555c6678bd51e7b23d877a0a7ed770994c0ffded7c265bac83b8a108.jpg", + "image_caption": [ + "A British shorthair wearing sunglasses" + ], + "image_footnote": [], + "bbox": [ + 307, + 79, + 496, + 227 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fe14b58523fd1199df8a35521fb8b1b3670ba32ccfbb5bcd196c0ff9f20299ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 79, + 686, + 228 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4cd49de60a91872c7f33c2ed1240629712279838d61f4ad56a145108151af2af.jpg", + "image_caption": [ + "A butterfly lands directly on the nose of a German Shepherd." + ], + "image_footnote": [], + "bbox": [ + 686, + 79, + 879, + 229 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dde05b8ac87ef551f6c6722f0f3a69c9df1b0be719bc5033e26f38b356ace440.jpg", + "image_caption": [ + "A close-up of honey being drizzled onto pancakes, the thick liquid flowing slowly and smoothly." + ], + "image_footnote": [], + "bbox": [ + 116, + 253, + 305, + 402 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fcbdb08c63f65558cdf160396bedb8523d98a6851b524df3201eec1f27a037b8.jpg", + "image_caption": [ + "A sunken ship at the bottom of the ocean.", + "The word 'START' written on a street surface." + ], + "image_footnote": [], + "bbox": [ + 307, + 253, + 496, + 402 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b12ee3389eb59af291e4591659081ba05f6a90a9227a6beb350600174faf3f18.jpg", + "image_caption": [ + "A paper origami dragon riding a boat in waves.", + "An old rusted robot wearing pants and a jacket riding skis in a supermarket." + ], + "image_footnote": [], + "bbox": [ + 496, + 253, + 687, + 402 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/36fc86f721567164cf07c1649bdde9d18e478995d36fbc4ff03ad2d2af643563.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 253, + 879, + 402 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e8e1b7bf4cec9f8528f7381916efc241f178f796c22ba90ba0983c2557c0ac6c.jpg", + "image_caption": [ + "A close-up of a painter's brush touching the canvas, with paint spreading and blending in a swirl of colors.", + "Figure 5 Qualitative results of MetaQuery. Prompts are from PartiPrompt (Yu et al., 2022), Sana (Xie et al., 2025) and Movie Gen Bench (Polyak et al., 2024)." + ], + "image_footnote": [], + "bbox": [ + 116, + 438, + 307, + 585 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/31a647ae7331f834c474e220328e1e72996313388bcd70d7447ba4ad111f554a.jpg", + "image_caption": [ + "A giant humanoid, made of fluffy blue cotton candy, stomping on the ground, and roaring to the sky, clear blue sky behind them." + ], + "image_footnote": [], + "bbox": [ + 307, + 438, + 496, + 585 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/af25337f48bf2a1fcba79b602b7512fb3badd1f589b46c51e3d25f9b094dac98.jpg", + "image_caption": [ + "Close-up of a bright blue parrot's feathers glittering in the light, showing its unique plumage and vibrant colors.", + "The reflection of a snowy mountain peak in a crystal-clear alpine lake, creating a perfect mirror image with a slight shimmering effect." + ], + "image_footnote": [], + "bbox": [ + 496, + 438, + 687, + 585 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7b499c68b1be62904daf47a04a892eb29c8a4c9f7d17cab06469846695a5b06a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 438, + 879, + 585 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1 Image Understanding and Generation", + "text_level": 1, + "bbox": [ + 109, + 702, + 462, + 718 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in Table 4, our model family demonstrates strong capabilities across both understanding and generation tasks. Benefiting from the flexible training approach that allows us to leverage arbitrary SOTA frozen MLLMs, all of our models in different sizes exhibit competitive performance on all understanding benchmarks (Fu et al., 2023; Liu et al., 2023; Li et al., 2023a; Yue et al., 2024; Yu et al., 2023). In terms of image generation, MetaQuery achieves SOTA visual quality on MJHQ-30K (Li et al., 2024b). Given the fact that MetaQuery works with frozen MLLMs, we can naturally connect with an arbitrary number of diffusion models. Since the base Sana-1.6B (Xie et al., 2025) model is already fine-tuned on aesthetic data, we adopt Stable Diffusion v1.5 (Rombach et al., 2021) for COCO FID evaluation. Our results suggest that after adapting it to powerful MLLMs, we can achieve improved visual quality as indicated by the COCO FID score of 8.69. This also establishes a new SOTA COCO FID score among all Stable Diffusion v1.5-based unified models including MetaMorph (Tong et al., 2024) (11.8) and Emu (Sun et al., 2024b) (11.66).", + "bbox": [ + 109, + 726, + 885, + 893 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/70b0361654016b250254d8a9163f2affb03ec516f759c8748b50254aae39b093.jpg", + "image_caption": [ + "Real Image" + ], + "image_footnote": [], + "bbox": [ + 148, + 77, + 264, + 345 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/a626c2ad7ce49892adf9b4655ab02d93bb5762fc0945595b273155aa85a7da62.jpg", + "image_caption": [ + "SEED", + "(Ge et al., 2023)" + ], + "image_footnote": [], + "bbox": [ + 264, + 78, + 380, + 345 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f5b932ba4b3af60c9595d23c21d7eb8e63a7d357dde1449e8e9a53b30d301584.jpg", + "image_caption": [ + "Emu", + "(Sun et al., 2024b)" + ], + "image_footnote": [], + "bbox": [ + 380, + 78, + 496, + 345 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/edcb210930a495f6468a261995e8cd43331cc08b7917631b87941c67eb85b779.jpg", + "image_caption": [ + "Emu2", + "(Sun et al., 2024a)" + ], + "image_footnote": [], + "bbox": [ + 496, + 78, + 612, + 345 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/1cc8421e582befa6179530b3cee34505907ff5676ee7a14c60dbe6b90cbebfea.jpg", + "image_caption": [ + "GPT-4o", + "(OpenAI, 2025)" + ], + "image_footnote": [], + "bbox": [ + 612, + 79, + 725, + 345 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f7cb61c92aff1e1a951a059200c3802b85f7a6bad339597ac1ffbfbdec16a6fd.jpg", + "image_caption": [ + "MetaQuery-B" + ], + "image_footnote": [], + "bbox": [ + 725, + 79, + 841, + 345 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/34f8d42ab9a5c73eb8ad1887541fb0ba22d092eca4923276bffde665da809607.jpg", + "image_caption": [ + "Figure 6 Image reconstruction results. Results of SEED, Emu, and Emu2 are from Sun et al. (2024a).", + "Add a chef hat to the dog", + "There is a house in front of the mountain", + "Figure 7 Image editing results. This capability can be easily transferred from image reconstruction after lightweight fine-tuning." + ], + "image_footnote": [], + "bbox": [ + 114, + 412, + 305, + 561 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "", + "bbox": [ + 210, + 412, + 305, + 561 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/0c9fb1b5858a9ad2270627f7e7eb4d935d61f2172945f72b652a3578d4f08323.jpg", + "image_caption": [ + "Remove the 3-WAY sign" + ], + "image_footnote": [], + "bbox": [ + 305, + 412, + 400, + 561 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/9c6c074753e5c0db73a2544daf4a539eb7a42932749c7546b1c4f05818f8732f.jpg", + "image_caption": [ + "Replace the dog with a golden retriever" + ], + "image_footnote": [], + "bbox": [ + 400, + 412, + 495, + 561 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/77bc365456c7f1a324580189122792f8db5d207f06c8180d8e1bf41346dbbc5d.jpg", + "image_caption": [ + "Change to cartoon style", + "Change it into linear style" + ], + "image_footnote": [], + "bbox": [ + 495, + 412, + 684, + 561 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/55aca72edd9bc6fd2358694218fae4f8e6d7e4c0987fbd61f62661ac387ebd35.jpg", + "image_caption": [ + "Chenage the bird to a blue one" + ], + "image_footnote": [], + "bbox": [ + 591, + 412, + 779, + 561 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "", + "bbox": [ + 684, + 412, + 779, + 561 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/4b3982cc5c2c19d0b2fd26b9f06c0bbf5ad2f7331e1a2829e8a6c44d7973e951.jpg", + "image_caption": [ + "Replace the fries with salad" + ], + "image_footnote": [], + "bbox": [ + 779, + 412, + 875, + 561 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In terms of prompt alignment, MetaQuery also achieves competitive performance on GenEval (Ghosh et al., 2023) and DPG-Bench (Hu et al., 2024b), beating all diffusion model-based approaches including Transfusion (Zhou et al., 2025) and JanusFlow (Ma et al., 2025). We note that there is a performance gap between MetaQuery and Janus-Pro (Chen et al., 2025), which auto-regressively generates image tokens. We suggest that this gap may be due to the different failure modes of diffusion models and auto-regressive models: diffusion models usually fail to correctly follow the prompt, while auto-regressive models may suffer from more visual artifacts, which are difficult to quantify by GenEval and DPG-Bench. We tested the MJHQ-30K FID score of Janus-Pro under the same setting as ours and found that, in terms of visual quality and artifact control, MetaQuery is significantly better than Janus-Pro (see Appendix B for visual comparison). Additionally, we find that MetaQuery achieves much better world knowledge reasoning capability than Janus-Pro, which we will elaborate on in Section 5.5. We also found that when scaling up the size of frozen LLMs, the generation quality and prompt alignment also improves. MetaQuery provides a simple and principled way for leveraging the most advanced multimodal LLMs within a unified modeling framework. We also provide qualitative results in Figure 5 to illustrate the text-to-image generation capability of MetaQuery.", + "bbox": [ + 109, + 651, + 887, + 864 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 936, + 504, + 948 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8e80f20eaaa577eb8299d8509c1b636a00a6be83769d922edded5c3bdd3e26fc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 114, + 78, + 300, + 224 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/47401bdfe62a96981c1feaecf18c9d323881824be0d85aa784265e9a19d202c9.jpg", + "image_caption": [ + "Top view of the same berry bowl" + ], + "image_footnote": [], + "bbox": [ + 124, + 237, + 197, + 295 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/4149a2a9d8a0180d46b942d93daa35e3ebf8ddc950c9a4f1d92f2c80d369bf94.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 307, + 78, + 496, + 224 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/61d5ab558a3fffaa336d26f1f2171b61a5a2ce74c40d4cd1af84fced70c45a4e.jpg", + "image_caption": [ + "The same robot in Minecraft" + ], + "image_footnote": [], + "bbox": [ + 316, + 237, + 392, + 295 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/a64fb9d0751c7bee00cfe32d761c4cf3cf5e293940406f5287724778e33f7dd8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 78, + 689, + 224 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/0030c217861cbcb07db1e9bf4497c51ebe5ff2aa3fc53234babc79d9c4ca53eb.jpg", + "image_caption": [ + "The toy on the head of the cat" + ], + "image_footnote": [], + "bbox": [ + 511, + 228, + 586, + 286 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/c209fc3515bcd2eea2d99ebc2865c335d0d995899ce2bd7acc199d906ccb6ecc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 229, + 679, + 287 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/ab4ad8e76c716680a874cb79f434e48a84893b5174e1de3b19378e6c89009ff3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 79, + 883, + 224 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/7997eceab53dfc59027dfeeb9d00f0d54c438ad627f519543fa4528dc0b321de.jpg", + "image_caption": [ + "The dog wearing sunglasses" + ], + "image_footnote": [], + "bbox": [ + 705, + 229, + 779, + 287 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/4419bc1f599e2b5e75cfd27fe839c397d664930cb4e4ea9f79c6d864c52b521d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 799, + 229, + 872, + 287 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/b3b8ac37fa24e66c37666f35b4a47270abc1ff2917320e9161e2464df3cd9ee6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 112, + 311, + 302, + 458 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/924d212b61c2dbd9a97e97845fd03efdda618a18e51f97ff75229de657600cb8.jpg", + "image_caption": [ + "The same model but a real one in New York city" + ], + "image_footnote": [], + "bbox": [ + 124, + 462, + 197, + 520 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/aa994d8f438857841af137ce9cf69d2079d81944e8baa713cb1e310ebbc5a032.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 307, + 311, + 496, + 458 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/5d46cf09b50e4f98422bb851e2dd13b1734ea4bebc37a20eb08828e94d103b92.jpg", + "image_caption": [ + "The sky line view of the city from this building" + ], + "image_footnote": [], + "bbox": [ + 316, + 462, + 392, + 520 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/7d4c1fbb21fee1f8d29061754c600619607957874633c9de8a489aa2c276f85c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 311, + 689, + 458 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/6ba2cccb890caaf3726ac0b89e0a6017e25a056bd986ad530823545b6cc653b2.jpg", + "image_caption": [ + "The statue in the same city", + "Figure 8 Qualitative results for instruction tuning. Instruction-tuned MetaQuery achieves strong subject-driven capability (first row) and can even reason through the multimodal input to generate images (second row)." + ], + "image_footnote": [], + "bbox": [ + 511, + 462, + 586, + 520 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/a84d0b3eab726eb969d63ec7f14b4cf156b8489ede5810f95269439a025f5c84.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 712, + 340, + 862, + 426 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/b77f0c069a70b297d343734d4179c4df607c59c0e70650ea18864234e7002424.jpg", + "image_caption": [ + "A logo for the same teapot" + ], + "image_footnote": [], + "bbox": [ + 704, + 462, + 779, + 520 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/44c2d287d692818c451c8e0a355531754e120c05a69103809eff8e26775d92fd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsDINO Score↑CLIP-I Score↑CLIP-T Score↑
Real Images (Oracle)0.7740.885-
fine-tuning
Textual Inversion (Gal et al., 2023)0.5690.7800.255
DreamBooth (Ruiz et al., 2023)0.6680.8030.305
BLIP-Diffusion (Li et al., 2023b)0.6700.8050.302
zero-shot & test time tuning free
Re-Imagen (Chen et al., 2023)0.6000.7400.270
BLIP-Diffusion (Li et al., 2023b)0.5940.7790.300
Kosmos-G (Pan et al., 2024)0.6940.8470.287
MetaQuery-B-Instruct0.7370.8520.301
", + "bbox": [ + 212, + 583, + 784, + 757 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 5 Subject-driven generation results on DreamBench (Ruiz et al., 2023).", + "bbox": [ + 109, + 767, + 624, + 781 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.2 Image Reconstruction", + "text_level": 1, + "bbox": [ + 109, + 806, + 339, + 823 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We demonstrate that MetaQuery can be easily fine-tuned for image reconstruction tasks with a frozen MLLM (See Appendix C for more details). As shown in Figure 6, we compare our fine-tuned MetaQuery-B with existing diffusion autoencoders from various unified models, which reconstruct images from predicted visual features. Since these unified models are not explicitly fine-tuned for image reconstruction, their results are directly decoded from the vision encoder's output. Remarkably, even under this more constrained setup, our", + "bbox": [ + 107, + 830, + 885, + 906 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/4073247d621f7d479da91cfa9b7a98561d024ab67b1715f504a15a138a4b5b54.jpg", + "image_caption": [ + "Figure 9 MetaQuery leverages frozen MLLMs for reasoning- and knowledge-augmented generation, overcoming the failure cases encountered in the base Sana model. * denotes that the LLM last layer embeddings of input tokens are used for image generation; the model is in L size (Qwen2.5-VL 3B). This approach can be better than the base Sana model in some cases but fails to activate in-context learning to perform knowledge-augmented generation. Some of the test cases are from MetaMorph (Tong et al., 2024) and CommonsenseT2I (Fu et al., 2024)." + ], + "image_footnote": [], + "bbox": [ + 102, + 78, + 619, + 401 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/cb12222fe568d3d46a55c40b1795a095b91866ad756d26fc3e7316d7bcc184af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 629, + 78, + 880, + 397 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "fine-tuned MetaQuery-B can still achieve competitive performance, matching the best existing open-source model Emu2 (Sun et al., 2024a). When compared with GPT-4o (OpenAI, 2025), our model also achieves comparable quality.", + "bbox": [ + 109, + 513, + 888, + 561 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5.3 Image Editing", + "text_level": 1, + "bbox": [ + 109, + 578, + 274, + 595 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As shown in Figure 7, we demonstrate that MetaQuery can transfer its image reconstruction capability to perform image editing. We keep the MLLM backbone frozen and fine-tune our pre-trained Base model for only 1,000 steps on publicly available image editing data. Qualitative results demonstrate that MetaQuery performs effectively in these image-editing scenarios.", + "bbox": [ + 109, + 603, + 887, + 664 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5.4 Instruction Tuning", + "text_level": 1, + "bbox": [ + 109, + 683, + 312, + 699 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We show that after being instruction-tuned on the proposed 2.4M dataset in Section 4, MetaQuery can achieve impressive zero-shot subject-driven generation performance, producing coherent results even with multiple highly customized subjects (the first row of Figure 8). Using various supervision signals, the instruction-tuned MetaQuery-B model surprisingly unlocks novel capabilities like visual association and logo design that go beyond copy-pasting (the second row of Figure 8). For example, in the first case, the model identifies the specific model of the input Porsche 911 car image, then correctly generates a novel front view for that model. In the second case, the model recognizes the input image of Rockefeller Center and imagines the view of New York City from the top of the Rockefeller Center.", + "bbox": [ + 109, + 705, + 887, + 828 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We also follow DreamBooth (Ruiz et al., 2023) by adopting DINO, CLIP-I, and CLIP-T scores to quantitatively evaluate our model on the DreamBench (Ruiz et al., 2023) dataset. As shown in Table 5, our MetaQuery-B-Instruct model achieves SOTA performance, outperforming existing models like Kosmos-G (Pan et al., 2024) that are explicitly trained on constructed substitution tasks for subject-driven generation.", + "bbox": [ + 109, + 834, + 888, + 897 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 936, + 506, + 949 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/bfd2befb3965d454b9427d56799e66522902a3b508eda96ad2dd99909ec45430.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsCulturalTimeSpaceBiologyPhysicsChemistryOverall
GPT-4o** (OpenAI, 2025)0.940.640.980.930.980.950.89
Text-to-Image Models
SD-v1-5 (Rombach et al., 2021)0.340.350.320.280.290.210.32
SD-XL (Podell et al., 2023)0.430.480.470.440.450.270.43
PixArt-Alpha (Chen et al., 2024)0.450.500.480.490.560.340.47
playground-v2.5 (Li et al., 2024b)0.490.580.550.430.480.330.49
SD-3.5-large (Esser et al., 2024)0.440.500.580.440.520.310.46
FLUX.1-dev (Labs, 2024)0.480.580.620.420.510.350.50
Unified Models
show-o-512 (Xie et al., 2024)0.280.400.480.300.460.300.35
vila-u-7b-256 (Wu et al., 2025b)0.260.330.370.350.390.230.31
Emu3 (Wang et al., 2024)0.340.450.480.410.450.270.39
Janus-1.3B (Wu et al., 2025a)0.160.260.350.280.300.140.23
JanusFlow-1.3B (Ma et al., 2025)0.130.260.280.200.190.110.18
Janus-Pro-1B (Chen et al., 2025)0.200.280.450.240.320.160.26
Janus-Pro-7B (Chen et al., 2025)0.300.370.490.360.420.260.35
MetaQuery-B0.440.490.580.410.490.340.46
MetaQuery-L0.560.570.620.480.630.420.55
MetaQuery-XL0.560.550.620.490.630.410.55
", + "bbox": [ + 122, + 77, + 877, + 383 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/12319674663acc97cbdaaa2425ad0014a35fc74e9a349cf01d441b15deb73c8a.jpg", + "table_caption": [ + "Table 6 Comparison of world knowledge reasoning on WISE (Niu et al., 2025). The test cases in WISE are similar to the knowledge-augmented generation ones in Figure 9. MetaQuery achieves SOTA performance and significantly outperforms all other unified models. ** Results are evaluated by Yan et al. (2025) on a random subset of 200 out of 1000 samples." + ], + "table_footnote": [], + "table_body": "
Methodsw/o Neg. Promptw/ Neg. Prompt
DALL-E 3 (Ramesh et al., 2021) w/ rewrite40.17N/A
SD-XL (Podell et al., 2023)26.0044.83
SD-3-medium (Esser et al., 2024)26.1747.17
FLUX.1-dev (Labs, 2024)24.5022.50
Sana-1.6B (Xie et al., 2025)25.1743.33
MetaQuery-B27.3351.50
MetaQuery-L28.8357.67
", + "bbox": [ + 222, + 463, + 777, + 583 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 7 Comparison of visual commonsense reasoning capability on CommonsenseT2I (Fu et al., 2024).", + "bbox": [ + 109, + 593, + 792, + 608 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.5 Reasoning- and Knowledge-Augmented Generation", + "text_level": 1, + "bbox": [ + 109, + 633, + 578, + 648 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We show that the learnable queries can effectively leverage capabilities of the frozen LLM. This enables the model to better understand and follow complex prompts, including those requiring real-world knowledge and reasoning. As shown in Figure 9, for the left knowledge-augmented generation cases, MetaQuery-L can leverage world knowledge from the frozen MLLM and reason through the input question to generate the correct answer. For the right commonsense knowledge cases from CommonsenseT2I (Fu et al., 2024), the LLM provides better commonsense knowledge and enables MetaQuery to generate images that are consistent with the facts.", + "bbox": [ + 109, + 657, + 887, + 762 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To quantitatively evaluate MetaQuery's world knowledge reasoning capability, we employ the WISE (Niu et al., 2025) benchmark, which contains similar test cases to the knowledge-augmented generation examples shown in Figure 9. As demonstrated in Table 6, MetaQuery achieves SOTA performance, significantly outperforming all other unified models. Notably, before our work, existing unified models struggled to effectively leverage powerful MLLMs for reasoning and knowledge-augmented generation, resulting in inferior performance compared to text-to-image models. MetaQuery stands as the first unified model to successfully transfer the advanced capabilities of frozen MLLMs to image generation and exceed the performance of SOTA text-to-image models.", + "bbox": [ + 109, + 770, + 887, + 891 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/e30e2bc1074dc3fbe3a878833ad91c7795ce47ef394f738cf6e0ce70967c24f7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LLM BackbonesMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑CommonsenseT2I ↑
Qwen2.5-3B6.200.7981.3456.00
Qwen2.5-3B-Instruct6.360.7981.1254.33
Qwen2.5-VL-3B-Instruct6.350.7881.1057.67
", + "bbox": [ + 179, + 78, + 816, + 141 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/f9420a60fcb422ba7b6a72e7f07a960eb7ba70e86790181846908582736eb2a3.jpg", + "table_caption": [ + "Table 8 Comparison across different LLM backbones. Image generation capability is mostly orthogonal to multimodal understanding capability." + ], + "table_footnote": [], + "table_body": "
MethodsMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑WiScore ↑CommonsenseT2I ↑
Ours-L w/ Last Layer Embed*6.410.7881.230.4852.83
Ours-L w/ MetaQueries6.350.7881.100.5557.67
", + "bbox": [ + 117, + 195, + 879, + 244 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 9 Comparison between MetaQuery and LLM last layer embedding. * denotes that the LLM last layer embeddings of input tokens are used for image generation. We observe comparable performance between MetaQuery and LLM last layer embedding on visual quality and prompt alignment. However, MetaQuery can activate in-context learning to perform knowledge-augmented generation, yielding much better performance on commonsense reasoning and world knowledge reasoning.", + "bbox": [ + 109, + 256, + 883, + 325 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We also quantitatively evaluate MetaQuery's commonsense reasoning capability on the CommonsenseT2I benchmark (Fu et al., 2024) in Table 7. For simplicity, we use CLIP (Radford et al., 2021) as the evaluator following their original implementation. Results show that MetaQuery significantly improves the performance of the base Sana model, achieving SOTA performance.", + "bbox": [ + 109, + 352, + 883, + 412 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.6 Discussion", + "text_level": 1, + "bbox": [ + 109, + 431, + 250, + 446 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Comparison over different LLM backbones. As shown in Table 8, to test the impact of employing different LLM backbones for MetaQuery, we carefully select a family of backbone models: pre-trained LLM (Qwen2.5-3B), instruction-tuned LLM (Qwen2.5-3B-Instruct), and instruction-tuned MLLM (Qwen2.5-VL-3B-Instruct). Both instruction-tuned models are initialized with the first pre-trained model checkpoint. Experimental results show that instruction tuning can achieve better (multimodal) understanding capabilities. However, the improvements are orthogonal to image generation performance when employed to provide multimodal generation conditions.", + "bbox": [ + 109, + 455, + 887, + 561 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Comparison with using last layer embeddings. As shown in Table 1, our learnable queries approach achieves comparable image generation quality and prompt alignment to using the LLM's last layer embeddings of input tokens. However, the last layer embedding method essentially treats the decoder-only LLM as a text encoder, which inherently limits its in-context learning capabilities. While this approach does improve upon the base Sana model in some cases as demonstrated in Figure 9, it struggles with the knowledge-augmented generation cases shown in the same figure. These cases require the LLM to first process and answer input questions before generating corresponding images, demanding in-context learning beyond what text encoders typically provide. This performance gap is quantitatively confirmed in Table 9, where MetaQuery significantly outperforms the last layer embedding approach on both WiScore and CommonsenseT2I benchmarks. Integrated natively with the LLM, MetaQuery naturally leverages its in-context learning capabilities, enabling the model to reason through questions and generate appropriate images.", + "bbox": [ + 109, + 579, + 885, + 746 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 109, + 768, + 259, + 786 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We presented MetaQueries, a simple interface connecting MLLMs (for understanding) and diffusion decoders (for generation), effective even when the MLLM is frozen. This approach yields state-of-the-art understanding and generation performance with straightforward implementation. By enabling transfer between modalities, MetaQueries successfully channels MLLM knowledge and reasoning into multimodal generation. While effective, we hypothesize that bridging the remaining gap to leading proprietary systems may primarily involve further data scaling. We hope MetaQueries provides a powerful, accessible baseline for future unified multimodal model development.", + "bbox": [ + 109, + 801, + 885, + 907 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 112, + 80, + 227, + 97 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. In NeurIPS, 2022.", + "Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025.", + "Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In CVPR, 2023.", + "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. In NeurIPS, 2020.", + "Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. In ICLR, 2024.", + "Wenhu Chen, Hexiang Hu, Chitwan Sahara, and William W Cohen. Re-imagen: Retrieval-augmented text-to-image generator. In ICLR, 2023.", + "Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025.", + "Jacob Devlin. Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL, 2019.", + "Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamllm: Synergistic multimodal comprehension and creation. In ICLR, 2024.", + "Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In ICML, 2024.", + "Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023.", + "Xingyu Fu, Muyu He, Yujie Lu, William Yang Wang, and Dan Roth. Commonsense-t2i challenge: Can text-to-image generation models understand commonsense? In $COLM$ , 2024.", + "Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. In ICLR, 2023.", + "Yuying Ge, Yixiao Ge, Ziyun Zeng, Xintao Wang, and Ying Shan. Planting a seed of vision in large language model. arXiv preprint arXiv:2307.08041, 2023.", + "Yuying Ge, Sijie Zhao, Jinguo Zhu, Yixiao Ge, Kun Yi, Lin Song, Chen Li, Xiaohan Ding, and Ying Shan. Seed-x: Multimodal models with unified multi-granularity comprehension and generation. arXiv preprint arXiv:2404.14396, 2024.", + "Dhruba Ghosh, Hannaneh Hajishirzi, and Ludwig Schmidt. Geneval: An object-focused framework for evaluating text-to-image alignment. In NeurIPS, 2023.", + "Google. Experiment with gemini 2.0 flash native image generation, 2025. https://developers.googleblog.com/en/experiment-with-gemini-20-flash-native-image-generation/.", + "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In NeurIPS, 2017.", + "Hexiang Hu, Kelvin CK Chan, Yu-Chuan Su, Wenhu Chen, Yandong Li, Kihyuk Sohn, Yang Zhao, Xue Ben, Boqing Gong, William Cohen, et al. Instruct-imagen: Image generation with multi-modal instruction. In CVPR, 2024a.", + "Xiwei Hu, Rui Wang, Yixiao Fang, Bin Fu, Pei Cheng, and Gang Yu. Ella: Equip diffusion models with llm for enhanced semantic alignment. arXiv preprint arXiv:2403.05135, 2024b." + ], + "bbox": [ + 109, + 112, + 885, + 878 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yang Jin, Kun Xu, Liwei Chen, Chao Liao, Jianchao Tan, Bin Chen, Chenyi Lei, An Liu, Chengru Song, Xiaoqiang Lei, et al. Unified language-vision pretraining with dynamic discrete visual tokenization. In ICLR, 2024.", + "Jing Yu Koh, Daniel Fried, and Ruslan Salakhutdinov. Generating images with multimodal language models. In NeurIPS, 2023.", + "Black Forest Labs. Flux.1, 2024.", + "Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024a.", + "Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023a.", + "Daiqing Li, Aleks Kamko, Ehsan Akhgari, Ali Sabet, Linmiao Xu, and Suhail Doshi. Playground v2. 5: Three insights towards enhancing aesthetic quality in text-to-image generation. arXiv preprint arXiv:2402.17245, 2024b.", + "Dongxu Li, Junnan Li, and Steven CH Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing. In NeurIPS, 2023b.", + "Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024a.", + "Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2024b.", + "Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023.", + "Bingqi Ma, Zhuofan Zong, Guanglu Song, Hongsheng Li, and Yu Liu. Exploring the role of large language models in prompt encoding for diffusion models. In NeurIPS, 2024.", + "Yiyang Ma, Xingchao Liu, Xiaokang Chen, Wen Liu, Chengyue Wu, Zhiyu Wu, Zizheng Pan, Zhenda Xie, Haowei Zhang, Liang Zhao, et al. Janusflow: Harmonizing autoregression and rectified flow for unified multimodal understanding and generation. In CVPR, 2025.", + "Yuwei Niu, Munan Ning, Mengren Zheng, Bin Lin, Peng Jin, Jiaqi Liao, Kunpeng Ning, Bin Zhu, and Li Yuan. Wise: A world knowledge-informed semantic evaluation for text-to-image generation. arXiv preprint arXiv:2503.07265, 2025.", + "OpenAI. Introducing 4o image generation, 2025. https://openai.com/index/introducing-4o-image-generation/.", + "Xichen Pan, Li Dong, Shaohan Huang, Zhiliang Peng, Wenhu Chen, and Furu Wei. Kosmos-g: Generating images in context with multimodal large language models. In ICLR, 2024.", + "Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023.", + "Adam Polyak, Amit Zohar, Andrew Brown, Andros Tjandra, Animesh Sinha, Ann Lee, Apoorv Vyas, Bowen Shi, Chih-Yao Ma, Ching-Yao Chuang, et al. Movie gen: A cast of media foundation models. arXiv preprint arXiv:2410.13720, 2024.", + "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021.", + "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In ICML, 2021.", + "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, 2021.", + "Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023.", + "Weijia Shi, Xiaochuang Han, Chunting Zhou, Weixin Liang, Xi Victoria Lin, Luke Zettlemoyer, and Lili Yu. Llamafusion: Adapting pretrained language models for multimodal generation. arXiv preprint arXiv:2412.15188, 2024." + ], + "bbox": [ + 111, + 80, + 885, + 890 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In $CVPR$ , 2024a.", + "Quan Sun, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, Yueze Wang, Hongcheng Gao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative pretraining in multimodality. In ICLR, 2024b.", + "Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024a.", + "Qwen Team. Qwen2.5: A party of foundation models, 2024b.", + "Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024.", + "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.", + "Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024.", + "Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, et al. Janus: Decoupling visual encoding for unified multimodal understanding and generation. In CVPR, 2025a.", + "Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng Chua. Next-gpt: Any-to-any multimodal llm. arXiv preprint arXiv:2309.05519, 2023.", + "Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. In ICLR, 2025b.", + "Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and Zheng Liu. Omnigen: Unified image generation. In CVPR, 2025.", + "Enze Xie, Junsong Chen, Junyu Chen, Han Cai, Haotian Tang, Yujun Lin, Zhekai Zhang, Muyang Li, Ligeng Zhu, Yao Lu, et al. Sana: Efficient high-resolution image synthesis with linear diffusion transformers. In ICLR, 2025.", + "Jinheng Xie, Weijia Mao, Zechen Bai, David Junhao Zhang, Weihao Wang, Kevin Qinghong Lin, Yuchao Gu, Zhijie Chen, Zhenheng Yang, and Mike Zheng Shou. Show-o: One single transformer to unify multimodal understanding and generation. arXiv preprint arXiv:2408.12528, 2024.", + "Zhiyuan Yan, Junyan Ye, Weijia Li, Zilong Huang, Shenghai Yuan, Xiangyang He, Kaiqing Lin, Jun He, Conghui He, and Li Yuan. Gpt-imgeval: A comprehensive benchmark for diagnosing gpt4o in image generation. arXiv preprint arXiv:2504.02782, 2025.", + "Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. In TMLR, 2022.", + "Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. \nMm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023.", + "Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In CVPR, 2024.", + "Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023.", + "Kai Zhang, Yi Luan, Hexiang Hu, Kenton Lee, Siyuan Qiao, Wenhu Chen, Yu Su, and Ming-Wei Chang. Magiclens: Self-supervised image retrieval with open-ended instructions. In ICML, 2024.", + "Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. In ICLR, 2025." + ], + "bbox": [ + 109, + 80, + 887, + 875 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 936, + 508, + 949 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wanrong Zhu, Jack Hessel, Anas Awadalla, Samir Yitzhak Gadre, Jesse Dodge, Alex Fang, Youngjae Yu, Ludwig Schmidt, William Yang Wang, and Yejin Choi. Multimodal C4: An open, billion-scale corpus of images interleaved with text. In NeurIPS, 2023.", + "Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Lirui Zhao, Fu-Yun Wang, Zhanyu Ma, et al. Lumina next: Making lumina-t2x stronger and faster with next-dit. arXiv preprint arXiv:2406.18583, 2024." + ], + "bbox": [ + 112, + 80, + 885, + 172 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 936, + 508, + 948 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 109, + 75, + 250, + 101 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A Data Curation Details", + "text_level": 1, + "bbox": [ + 109, + 123, + 362, + 138 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For the data curation part, we use Qwen/Qwen2-VL-7B-Instruct² as our MLLM, The system prompt we are using is:", + "bbox": [ + 109, + 156, + 883, + 186 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Based on the provided of one or multiple source images, one target image, and their captions, create an interesting text prompt that can be used with the source images to generate the target image.", + "bbox": [ + 135, + 194, + 857, + 224 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This prompt should include:", + "bbox": [ + 135, + 224, + 341, + 239 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- one general and unspecific similarity shared with the source images (same jersey top, similar axe, similar building, etc).", + "- all differences that only the target image has." + ], + "bbox": [ + 161, + 239, + 857, + 285 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This prompt should NOT include:", + "bbox": [ + 135, + 286, + 383, + 300 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- any specific details that would allow generating the target image independently without referencing the source images.", + "bbox": [ + 161, + 301, + 857, + 330 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Remember the prompt should be concise and short. The generation has to be done by combining the source images and text prompts.", + "bbox": [ + 135, + 330, + 857, + 361 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B Qualitative Comparison with SOTA Open-Source Model on Text-to-Image Generation", + "text_level": 1, + "bbox": [ + 109, + 377, + 883, + 417 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We provide a qualitative comparison with Janus-Pro-7B (Chen et al., 2025) on MJHQ-30K (Li et al., 2024b) in Figure 10. We can see that MetaQuery-XL follows the prompt better and generates more visually appealing images than Janus-Pro-7B.", + "bbox": [ + 109, + 433, + 883, + 478 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C Training Objectives", + "text_level": 1, + "bbox": [ + 109, + 502, + 339, + 522 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/9f32362161c9b5ac0fc53f7dbadac96f6648ff2c8824c55b33ce850f06278d39.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ObjectiveRel. Wall TimeMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
Text-to-Image1.0x7.430.5675.35
Image Reconstruction2.79x27.420.3268.36
Mix2.61x8.270.5476.53
", + "bbox": [ + 205, + 541, + 790, + 604 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 10 Study on training objectives. Image reconstruction objective can be mixed with text-to-image objective to enable image reconstruction capabilities without harming visual quality and prompt alignment.", + "bbox": [ + 109, + 616, + 883, + 646 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We are using an MLLM for multimodal perception, besides the standard text-to-image objective, we can also use an image reconstruction objective to achieve alignment. In Table 10, we show that training with the text-to-image objective achieves much better performance than the image reconstruction objective. We demonstrate that a mix of both objectives can enable image reconstruction capabilities without being generally harmful to the T2I performance.", + "bbox": [ + 109, + 659, + 883, + 733 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct", + "bbox": [ + 129, + 898, + 465, + 912 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 936, + 506, + 948 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/14bedde3fdc5db9a2502133501221e3d482ecd91d5259320c8bb8efeadfee1fd.jpg", + "image_caption": [ + "Figure 10 Qualitative comparison with Janus-Pro-7B (Chen et al., 2025) on MJHQ-30K (Li et al., 2024b)." + ], + "image_footnote": [], + "bbox": [ + 101, + 138, + 883, + 818 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 936, + 508, + 949 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06256/805981bf-d643-4b2e-955e-6bcd5ca89984_model.json b/data/2025/2504_06xxx/2504.06256/805981bf-d643-4b2e-955e-6bcd5ca89984_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f0314587285c558775dbe7845aac18863d25cf3b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/805981bf-d643-4b2e-955e-6bcd5ca89984_model.json @@ -0,0 +1,3307 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.269, + 0.061, + 0.702 + ], + "angle": 270, + "content": "arXiv:2504.06256v1 [cs.CV] 8 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.139, + 0.101, + 0.809, + 0.125 + ], + "angle": 0, + "content": "Transfer between Modalities with MetaQueries" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.13, + 0.842, + 0.162 + ], + "angle": 0, + "content": "Xichen Pan\\(^{1,2}\\), Satya Narayan Shukla\\(^{1,\\dagger}\\), Aashu Singh\\(^{1}\\), Zhuokai Zhao\\(^{1}\\), Shlok Kumar Mishra\\(^{1}\\), Jialiang Wang\\(^{1}\\), Zhiyang Xu\\(^{1}\\), Jiuhai Chen\\(^{1}\\), Kunpeng Li\\(^{1}\\), Felix Juefei-Xu\\(^{1}\\), Ji Hou\\(^{1,\\dagger}\\), Saining Xie\\(^{2,\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.167, + 0.354, + 0.183 + ], + "angle": 0, + "content": "\\(^{1}\\)Meta, \\(^{2}\\)New York University" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.184, + 0.248, + 0.199 + ], + "angle": 0, + "content": "\\(\\dagger\\) Equal advising" + }, + { + "type": "list", + "bbox": [ + 0.139, + 0.167, + 0.354, + 0.199 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.216, + 0.861, + 0.383 + ], + "angle": 0, + "content": "Unified multimodal models aim to integrate understanding (text output) and generation (pixel output), but aligning these different modalities within a single architecture often demands complex training recipes and careful data balancing. We introduce MetaQueries, a set of learnable queries that act as an efficient interface between autoregressive multimodal LLMs (MLLMs) and diffusion models. MetaQueries connects the MLLM's latents to the diffusion decoder, enabling knowledge-augmented image generation by leveraging the MLLM's deep understanding and reasoning capabilities. Our method simplifies training, requiring only paired image-caption data and standard diffusion objectives. Notably, this transfer is effective even when the MLLM backbone remains frozen, thereby preserving its state-of-the-art multimodal understanding capabilities while achieving strong generative performance. Additionally, our method is flexible and can be easily instruction-tuned for advanced applications such as image editing and subject-driven generation." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.402, + 0.272, + 0.416 + ], + "angle": 0, + "content": "Date: April 9, 2025" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.418, + 0.678, + 0.431 + ], + "angle": 0, + "content": "Correspondence: satyanshukla@meta.com, jihou@meta.com, saining.xie@nyu.edu" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.432, + 0.466, + 0.447 + ], + "angle": 0, + "content": "Project Page: https://xichenpan.com/metaquery" + }, + { + "type": "text", + "bbox": [ + 0.785, + 0.432, + 0.86, + 0.448 + ], + "angle": 0, + "content": "Meta" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.491, + 0.272, + 0.508 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.523, + 0.888, + 0.691 + ], + "angle": 0, + "content": "The quest for unified multimodal models capable of both deep understanding (typically resulting in textual outputs) and rich generation (resulting in pixel outputs) holds immense promise. Such systems could unlock synergistic capabilities (OpenAI, 2025; Google, 2025), where understanding informs generation and vice versa. However, effectively connecting these different output modalities poses considerable challenges—e.g. how do we effectively transfer the latent world knowledge from the autoregressive multimodal LLM to the image generator? Although significant progress has been made, most published approaches (Ge et al., 2024; Sun et al., 2024b; Tong et al., 2024; Jin et al., 2024; Liu et al., 2024a; Team, 2024a; Xie et al., 2024; Wang et al., 2024; Wu et al., 2025a; Chen et al., 2025; Dong et al., 2024; Zhou et al., 2025; Shi et al., 2024) rely on carefully tuning base multimodal LLMs (MLLMs) to handle both understanding and generation tasks. This involves complex architectural design, data/loss balancing, multiple training stages, and other complex training recipes—without these, optimizing one capability could compromise the other." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.697, + 0.888, + 0.82 + ], + "angle": 0, + "content": "In this paper, we aim to deliver the promise of unified models via a simpler philosophy: Render unto diffusion what is generative, and unto LLMs what is understanding. In other words, instead of building a monolithic system from scratch, we focus on effectively transferring capabilities between state-of-the-art, pre-trained models specialized for different output modalities. To operationalize this, we keep MLLMs frozen so they can focus on what they do best—understanding—while entrusting image generation to diffusion models. We then demonstrate that even under this frozen condition, the MLLM's inherent world knowledge, strong reasoning, and in-context learning capabilities can indeed be transferred to image generation, provided the right architectural bridge is in place." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.826, + 0.889, + 0.903 + ], + "angle": 0, + "content": "However, leveraging an MLLM—especially a frozen one—for both multimodal understanding and generation is far from straightforward. Although (frozen) LLMs have shown good performance as conditional text encoders in text-to-image generation (Zhuo et al., 2024; Xie et al., 2025; Ma et al., 2024), they are not compatible with many desired tasks in unified modeling, such as in-context learning or producing multimodal, interleaved output. The architectural bridge we design in this work is MetaQuery (Figure 1). MetaQuery feeds a set of" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.504, + 0.949 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.153, + 0.079, + 0.847, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.296, + 0.889, + 0.355 + ], + "angle": 0, + "content": "Figure 1 Overview of our model. Blue tokens maintain SOTA multimodal understanding; MetaQueries are learnable queries that directly applied to frozen MLLMs to query out conditions for generation. The model is tuned using only denoising objective with paired data. The generative diffusion models can be either frozen or further instruction-tuned for advanced generation tasks." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.378, + 0.888, + 0.485 + ], + "angle": 0, + "content": "learnable queries directly into a frozen MLLM to extract multimodal conditions for multimodal generation. Our experiments reveal that, even without fine-tuning or enabling bi-directional attention, the frozen LLM serves as a powerful feature resampler (Alayrac et al., 2022), producing high-quality conditions for multimodal generation. Training unified models with MetaQueries requires only a modest amount of paired image-caption data to connect these prompted conditions to any conditional diffusion model. Because the entire MLLM stays intact for understanding, the training objective remains the original denoising objective—just as efficient and stable as fine-tuning a diffusion model." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.492, + 0.888, + 0.584 + ], + "angle": 0, + "content": "More specifically, previous unified models aim to train a single autoregressive transformer backbone to jointly model \\( p(\\text{text}, \\text{pixels}) \\). In contrast, we choose to use a token \\( \\rightarrow \\) [transformer] \\( \\rightarrow \\) [diffusion] \\( \\rightarrow \\) pixels paradigm, which might share a high-level philosophy with the concurrent GPT-4o image generation system, as hinted at by OpenAI (2025). This approach composes the MLLM's autoregressive prior with a powerful diffusion decoder, directly leveraging the frozen MLLM's strong capability in modeling compressed semantic representations, thus avoiding the more challenging task of directly generating pixels." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.59, + 0.888, + 0.697 + ], + "angle": 0, + "content": "To validate our approach, we conduct a series of controlled experiments, showing that MetaQuery1 outperforms the use of a frozen MLLM purely as a conditional text encoder for image generation. Moreover, MetaQuery can match the performance of fully tuning the MLLM backbone, yet it is significantly more efficient. We also systematically investigate the training strategy, including the number of tokens and architectural configurations. With just 25M publicly available image-caption pairs, we are able to train a family of unified models that not only preserves state-of-the-art (SOTA) performance in image understanding, but also achieves SOTA-level results in text-to-image generation across multiple benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.703, + 0.888, + 0.841 + ], + "angle": 0, + "content": "The promise of unified modeling goes beyond handling multimodal understanding and text-to-image generation in parallel. A deeper synergy is expected—one that taps into advanced MLLM abilities like reasoning, internal knowledge, multimodal perception, and in-context learning to enhance generation. Our results show that our method draws on the frozen MLLM's commonsense knowledge, achieving SOTA visual-commonsense generation on the CommonsenseT2I benchmark (Fu et al., 2024). Our approach also harnesses the built-in reasoning and in-context learning capabilities of frozen MLLMs, producing images from complex prompts—such as generating the United States flag in response to \"The national flag of the country where Yellowstone National Park is located.\" (See Figure 9 for examples.) We also benchmark this type of world knowledge reasoning capability on WISE (Niu et al., 2025) and demonstrate SOTA performance." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.847, + 0.888, + 0.893 + ], + "angle": 0, + "content": "Finally, by connecting, preserving, and enhancing multimodal input with MetaQueries and a frozen MLLM backbone, our model can be further instruction-tuned for advanced generation tasks such as image editing and subject-driven generation. We show that this can be achieved both efficiently and effectively using a scalable" + }, + { + "type": "page_footnote", + "bbox": [ + 0.13, + 0.9, + 0.522, + 0.914 + ], + "angle": 0, + "content": "1For simplicity, we also use MetaQuery to represent our method." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.505, + 0.949 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.081, + 0.888, + 0.143 + ], + "angle": 0, + "content": "data curation pipeline that directly leverages naturally occurring image pairs from web corpora, instead of depending on human-created pairs or synthetically generated data (Brooks et al., 2023; Hu et al., 2024a; Xiao et al., 2025). This natural supervision surprisingly unlocks several new capabilities beyond subject-driven generation, such as visual association and logo design (see Figure 8 for examples)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.149, + 0.888, + 0.21 + ], + "angle": 0, + "content": "In summary, we explore a simple yet underexplored alternative to unified multimodal modeling. Our method, MetaQuery, bridges frozen MLLM backbones and diffusion models. Experiments show that this framework delivers all the capabilities once thought to require MLLM fine-tuning while being much easier to train. The main results and findings in this paper include:" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.217, + 0.884, + 0.245 + ], + "angle": 0, + "content": "- With MetaQuery and frozen MLLM backbones, we maintain SOTA multimodal understanding performance while enabling SOTA-level multimodal generation." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.255, + 0.883, + 0.283 + ], + "angle": 0, + "content": "MetaQuery can transfer the capabilities of MLLMs for reasoning- and knowledge-augmented image generation." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.293, + 0.884, + 0.321 + ], + "angle": 0, + "content": "MetaQuery can extract highly detailed visual conditions beyond semantic similarity from frozen MLLMs, enabling image reconstruction and editing tasks." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.33, + 0.883, + 0.359 + ], + "angle": 0, + "content": "- Our method can be easily instruction-tuned even with a frozen MLLM backbone, enabling advanced multimodal generation tasks like subject-driven generation." + }, + { + "type": "list", + "bbox": [ + 0.135, + 0.217, + 0.884, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.384, + 0.285, + 0.402 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.416, + 0.889, + 0.599 + ], + "angle": 0, + "content": "Unified understanding and generation models. Next-token prediction has proven to be an effective approach for training models to understand language (Devlin, 2019; Brown et al., 2020) and multimodal content (Liu et al., 2024b). Recently, the community has witnessed numerous efforts to extend the success of multimodal understanding (Liu et al., 2024b) to multimodal generation by training LLM backbones to generate images at the same time. However, unlike adapting text-only LLMs (Touvron et al., 2023) to understand multimodal content with one single next text token prediction objective (Liu et al., 2024b), generating multimodal content requires a different set of training objectives. SEED-X (Ge et al., 2024), Emu (Sun et al., 2024b), and MetaMorph (Tong et al., 2024) learn to regress image features; LaVIT (Jin et al., 2024), LWM (Liu et al., 2024a), Chameleon (Team, 2024a), Show-o (Xie et al., 2024), EMU3 (Wang et al., 2024), and Janus (Wu et al., 2025a; Chen et al., 2025) auto-regressively predict next visual tokens; and DreamLLM (Dong et al., 2024), Transfusion (Zhou et al., 2025) employ diffusion objectives. However, these approaches necessitate tuning LLMs for generating both modalities, naturally posing challenges in multi-task balancing." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.615, + 0.888, + 0.783 + ], + "angle": 0, + "content": "Unified models with frozen LLMs. Several studies have explored the use of frozen LLMs for multimodal understanding and generation. For instance, LMFusion (Shi et al., 2024) trains image generation expert feed-forward networks (FFNs) and query-key-value (QKV) modules in parallel with a frozen LLM backbone to deeply fuse input conditions and denoise visual outputs. However, this approach offers limited flexibility as it shares the same architecture as specific LLM backbones and requires training a separate set of generative modules for every single LLM backbone. This not only imposes more computational burden but also restricts the ability to leverage powerful pre-trained generative models. An earlier work, GILL (Koh et al., 2023), investigates feeding learnable tokens into frozen MLLMs. It employs a combined contrastive loss and regression loss for image retrieval and generation, rather than directly employing the denoising objective for more efficient training. Its application is restricted to contextual image generation and it does not systematically explore the impact of frozen MLLMs and learnable queries." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.805, + 0.262, + 0.825 + ], + "angle": 0, + "content": "3 MetaQuery" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.836, + 0.886, + 0.898 + ], + "angle": 0, + "content": "In this work, we propose MetaQuery, which losslessly augments understanding-only MLLMs with multimodal generation capabilities while preserving their original architecture designs and parameters intact. We carefully analyze the impact of applying MetaQuery on image generation performance. Results show that a frozen MLLM can provide strong conditions for multimodal generation." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.505, + 0.949 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.199, + 0.079, + 0.803, + 0.158 + ], + "angle": 0, + "content": "
Methods# of TokensMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
LLM last layer embedding*-7.490.5578.41
Random queries648.590.3554.81
Learnable queries647.430.5675.35
Learnable queries5127.340.5678.43
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.167, + 0.885, + 0.197 + ], + "angle": 0, + "content": "Table 1 Study on different conditions for image generation. * denotes the embeddings of input tokens. Learnable queries achieve comparable performance to using all hidden states and can even surpass them with more tokens." + }, + { + "type": "table", + "bbox": [ + 0.207, + 0.209, + 0.794, + 0.289 + ], + "angle": 0, + "content": "
MethodsTrain LLMTrain DiTMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
MLLM tuningX7.750.5878.97
E2E tuning6.280.6179.39
Frozen MLLMXX7.430.5675.35
Frozen MLLMX6.060.6176.66
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.298, + 0.886, + 0.342 + ], + "angle": 0, + "content": "Table 2 Study on strategies for adapting MLLMs. The methods without training LLM do not suffer from multimodal understanding degradation. Frozen MLLM achieves comparable performance to full MLLM tuning, with slightly lower prompt alignment but slightly improved visual quality." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.366, + 0.264, + 0.381 + ], + "angle": 0, + "content": "3.1 Architecture" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.39, + 0.886, + 0.527 + ], + "angle": 0, + "content": "MetaQuery bridges frozen MLLMs with diffusion models. We use randomly initialized learnable queries \\(\\mathcal{Q} \\in \\mathbb{R}^{N \\times D}\\) to query out the conditions \\(\\mathcal{C}\\) for generation. \\(N\\) is the number of queries and \\(D\\) is the dimension of the queries, which is the same as the MLLM hidden dimension. For simplicity and compatibility, we continue to use causal masking for the entire sequence rather than specifically enabling full attention for \\(\\mathcal{Q}\\). The conditions \\(\\mathcal{C}\\) are then fed into a trainable connector to align with the input space of text-to-image diffusion models. These models can be arbitrary as long as they have a conditional input interface; we simply replace its original condition with our \\(\\mathcal{C}\\). The whole model is trained with the original generation objective on paired data. In this paper, we focus on image generation tasks, but the model can be easily extended to other modalities like audio, video, 3D, and more." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.545, + 0.285, + 0.56 + ], + "angle": 0, + "content": "3.2 Design Choices" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.57, + 0.888, + 0.692 + ], + "angle": 0, + "content": "The proposed architecture involves two design choices: using learnable queries and keeping the MLLM backbone frozen. We explain the reasons why we adopted these choices and how they impact performance. For all experiments, unless otherwise specified, we use the same frozen LLaVA-OneVision-0.5B (Li et al., 2024a) MLLM backbone, frozen Sana-0.6B (Xie et al., 2025) diffusion model in 512 resolution, learnable queries with \\( N = 64 \\) tokens, and a connector with a 24-layer transformer encoder. All models are trained on 25M publicly available image caption pairs for 4 epochs. We report FID score (Heusel et al., 2017) on MJHQ-30K (Li et al., 2024b) for visual aesthetic quality, and GenEval (Ghosh et al., 2023) and DPG-Bench (Hu et al., 2024b) (both without prompt rewriting) for prompt alignment, respectively." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.708, + 0.886, + 0.86 + ], + "angle": 0, + "content": "Learnable queries. Many models like Lumina-Next (Zhuo et al., 2024), Sana (Xie et al., 2025), and Kosmos-G (Pan et al., 2024) use the (M)LLM's last layer embedding of input tokens as image generation conditions. However, this approach is not ideal for unified models as it is not compatible with many desired tasks in unified modeling, such as in-context learning or producing multimodal, interleaved output (we provide more discussion and comparison with MetaQuery in Section 5.6). As shown in Table 1, using learnable queries with just \\( N = 64 \\) tokens achieves image generation quality comparable to that of utilizing the last layer embedding of input tokens. While random queries produce acceptable FID scores, they struggle with prompt alignment, highlighting the importance of learnable queries. Additionally, since the last layer embedding setting naturally comes with a longer sequence length, we also tested learnable queries with \\( N = 512 \\) tokens, which further improves performance and even outperforms the last layer embedding approach." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.876, + 0.885, + 0.907 + ], + "angle": 0, + "content": "Frozen MLLM. Existing unified models train MLLMs to jointly model \\( p(\\text{text}, \\text{pixels}) \\), resulting in a more complicated training process and even downgraded understanding performance. MetaQuery keeps the original" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.505, + 0.949 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.116, + 0.085, + 0.496, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.314, + 0.393, + 0.328 + ], + "angle": 0, + "content": "(a) Text-to-image results." + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.079, + 0.885, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.586, + 0.315, + 0.804, + 0.328 + ], + "angle": 0, + "content": "(b) Image reconstruction results." + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.34, + 0.885, + 0.369 + ], + "angle": 0, + "content": "Figure 2 Study on the scaling of token numbers. As the number of tokens increases, text-to-image prompt alignment and image reconstruction results consistently improve." + }, + { + "type": "image", + "bbox": [ + 0.114, + 0.381, + 0.887, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.58, + 0.66, + 0.596 + ], + "angle": 0, + "content": "Figure 3 Visaul samples for image reconstruction with different numbers of tokens." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.621, + 0.884, + 0.727 + ], + "angle": 0, + "content": "MLLM architecture and parameters interact to preserve SOTA understanding capabilities. However, for multimodal generation, a key concern is whether MetaQuery's performance with significantly fewer tunable parameters would be substantially worse than methods with full MLLM tuning. As shown in Table 2, frozen MLLMs achieve comparable performance to full MLLM tuning, with slightly lower prompt alignment but slightly improved visual quality. Tuning DiT can further improve performance for both settings. This suggests that MetaQuery is another possible training strategy, one that is simpler but also effective, as an alternative to fine-tuning the entire MLLM." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.746, + 0.287, + 0.763 + ], + "angle": 0, + "content": "3.3 Training Recipe" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.77, + 0.885, + 0.816 + ], + "angle": 0, + "content": "Based on insights from our design choices, we further study key training options for the two main components of MetaQuery: learnable queries and connectors. This study examines the number of tokens and connector design. Unless otherwise specified, all experiments in this section use the same setup as described in Section 3.2." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.833, + 0.884, + 0.909 + ], + "angle": 0, + "content": "Number of tokens. Many works (Wu et al., 2023; Pan et al., 2024; Ge et al., 2024) have employed learnable queries for condition extraction. However, they either set the number of tokens to match the fixed input sequence length of the image decoder (e.g., \\(N = 77\\) for the CLIP (Radford et al., 2021) text encoder in Stable Diffusion v1.5 (Rombach et al., 2021)), or use an arbitrary fixed number like \\(N = 64\\) without further investigation. Given that modern diffusion models like Lumina-Next (Zhuo et al., 2024) and Sana (Xie" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.123, + 0.079, + 0.875, + 0.158 + ], + "angle": 0, + "content": "
Architecture# of LayersDims# of ParamsRel. Wall TimeMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
Proj-Enc62304517M1.06x7.800.5373.37
Proj-Enc2423042046M1.23x7.410.5173.75
Enc-Proj689684M1x7.730.4971.39
Enc-Proj24896316M1.06x7.430.5675.35
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.167, + 0.885, + 0.196 + ], + "angle": 0, + "content": "Table 3 Study on connector design. Aligning the conditions first in the same dimension as the MLLM hidden states (Enc-Proj) is more effective and parameter-efficient." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.222, + 0.888, + 0.344 + ], + "angle": 0, + "content": "et al., 2025) naturally accept variable-length conditions, determining the optimal number of tokens for learnable queries is crucial. In Figure 2, we provide a careful study of the number of tokens and observe promising scalability of MetaQueries. For text-to-image generation, visual quality begins to converge after 64 tokens, while more tokens consistently yield better prompt alignment. This is more evident for long captions, as GenEval with rewritten prompts increases more rapidly as the number of tokens increases. For image reconstruction, we observe that more tokens consistently improve the quality of reconstructed images (visual samples can be found in Figure 3). In our later experiments, we set the number of tokens to \\( N = 256 \\) for all models, as it achieves a good balance between performance and efficiency." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.36, + 0.888, + 0.482 + ], + "angle": 0, + "content": "Connector design. The connector is another important component in MetaQuery. We use the same architecture as the Qwen2.5 (Team, 2024b) LLM, but enable bi-directional attention for the connector. We study two different designs: Projection Before Encoder (Proj-Enc) and Projection After Encoder (Enc-Proj). Proj-Enc first projects the conditions into the input dimension of the diffusion decoder, then uses a transformer encoder to align the conditions. On the other hand, Enc-Proj first uses a transformer encoder to align the conditions in the same dimension as the MLLM hidden states, then projects the conditions into the input dimension of the diffusion decoder. As shown in Table 3, the Enc-Proj design achieves better performance than the Proj-Enc design while having fewer parameters." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.505, + 0.296, + 0.525 + ], + "angle": 0, + "content": "4 Model Training" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.537, + 0.376, + 0.84 + ], + "angle": 0, + "content": "We train MetaQuery in two stages: the pre-training stage and the instruction tuning stage. Both training stages keep MLLMs frozen and fine-tune learnable queries, connectors, and diffusion models. We use three different MLLM backbones for different sizes: Base (LLaVA-OneVision 0.5B (Li et al., 2024a)), Large (Qwen2.5-VL 3B (Bai et al., 2025)), and X-Large (Qwen2.5-VL 7B (Bai et al., 2025)). We set the number of tokens to \\(N = 256\\) for all models, and utilize a 24-layer connector with Enc-Proj architecture. For image generation heads, we tested two different diffusion models: Stable Diffusion v1.5 (Rombach et al., 2021) and Sana-1.6B (Xie et al., 2025)." + }, + { + "type": "image", + "bbox": [ + 0.399, + 0.534, + 0.882, + 0.778 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.394, + 0.787, + 0.885, + 0.843 + ], + "angle": 0, + "content": "Figure 4 Overview of instruction tuning data curation pipeline. We group images from web corpora based on caption similarity using the SigLIP (Zhai et al., 2023) model, then construct instruction-tuning data from these image pairs using an MLLM." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.856, + 0.886, + 0.903 + ], + "angle": 0, + "content": "Pre-training. We pre-train our model on 25M publicly available image-caption pairs for 8 epochs with a learning rate of 1e-4 and a global batch size of 4096. The learning rate follows a cosine decay schedule with a 4,000-step warmup period before gradually decreasing to 1e-5." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.504, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.127, + 0.079, + 0.874, + 0.311 + ], + "angle": 0, + "content": "
MethodsBase (M)LLMMME-PMMBSEEDMMMUMM-VetCOCO FID ↓MJHQ FID ↓GenEval ↑DPG-Bench ↑
EmuLLaMA 13B-----11.66---
DreamLLMVicuna 7B----36.68.46---
ChameleonFrom Scratch 7B---22.48.326.74-0.39-
Show-o-512Phi-1.5 1.3B1097.2--26.7-9.2415.180.68-
VILA-ULLaMA-2 7B1401.8-59.0-33.5-7.69--
Emu3From Scratch 7B-58.568.231.637.212.80-0.66†80.60
MetaMorphLLaMA-3 8B-75.271.8--11.8---
TokenFlow-XLQwen-2.5 14B1551.176.872.643.248.2--0.63†73.38
TransfusionFrom Scratch 7B-----8.70-0.63-
LMFusionLLaVA-Next 8B1603.772.172.541.7-8.20---
JanusDeepSeek-LLM 1.5B1338.069.463.730.534.38.5310.100.61-
JanusFlowDeepSeek-LLM 1.5B1333.174.970.529.330.9-9.510.6380.09
Janus-Pro-1BDeepSeek-LLM 1.5B1444.075.568.336.339.8-14.33‡0.7382.63
Janus-Pro-7BDeepSeek-LLM 7B1567.179.272.141.050.0-13.48‡0.8084.19
MetaQuery-BLLaVA-ov 0.5B1238.058.566.631.429.18.916.280.74†80.04
MetaQuery-LQwen2.5-VL 3B1574.378.673.853.163.28.876.350.78†81.10
MetaQuery-XLQwen2.5-VL 7B1685.283.576.958.666.68.696.020.80†82.05
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.322, + 0.884, + 0.365 + ], + "angle": 0, + "content": "Table 4 Quantitative results on multimodal understanding and generation benchmarks. We report the COCO FID with Stable Diffusion v1.5 (Rombach et al., 2021), and other metrics with Sana (Xie et al., 2025). † denotes rewritten prompts. ‡ denotes results tested by us under the same settings." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.39, + 0.885, + 0.693 + ], + "angle": 0, + "content": "Instruction tuning. Furthermore, in this work, we rethink the data curation process for instruction tuning in image generation. All current methods rely on expert models to generate target images from source images and instructions (Ge et al., 2024; Xiao et al., 2025; Hu et al., 2024a). However, this approach is limited in scalability and may introduce biases, as the available expert models cover only a narrow range of image transformations. Inspired by MagicLens (Zhang et al., 2024), we construct instruction-tuning data using naturally occurring image pairs in web corpora. These corpora contain rich multimodal contexts with interleaved text and images on related subjects or topics. These image pairs often exhibit meaningful associations and specific relationships spanning a broad spectrum, from direct visual similarities to more subtle semantic connections (as shown in Figure 4). Such naturally occurring image pairs provide excellent and diverse supervision signals for instruction tuning. Based on this observation, we developed a data construction pipeline that mines image pairs and leverages MLLMs to generate open-ended instructions that capture their inter-image relationships. First, we collect grouped images from mmc4 (Zhu et al., 2023) core fewer-faces subset, where each image is accompanied by a caption. Using SigLIP (Zhai et al., 2023), we cluster images with similar captions (allowing up to 6 images per group, with a similarity threshold of 0.5). In each group, the image with minimum average similarity to the others is designated as the target, while the remaining images serve as source images. This process yields a total of 2.4M image pairs. Finally, we employ Qwen2.5-VL 3B (Bai et al., 2025) to generate instructions for each pair, describing how to transform the source images into the target image (See Appendix A for the detailed MLLM prompt). We experimented with instruction-tuning our Base size model on the proposed 2.4M dataset for 3 epochs, using the same learning rate schedule as in pre-training and a batch size of 2048." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.716, + 0.277, + 0.736 + ], + "angle": 0, + "content": "5 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.748, + 0.885, + 0.9 + ], + "angle": 0, + "content": "In this section, we first evaluate MetaQuery on various multimodal understanding and text-to-image generation benchmarks (Section 5.1). We demonstrate that MetaQuery can be trained to reconstruct input images (Section 5.2). This image reconstruction capability can be easily transferred to perform image editing (Section 5.3). Furthermore, we show that MetaQuery can be instruction-tuned to perform zero-shot subject-driven generation (Section 5.4). By leveraging our approach for collecting instruction tuning data from naturally existing image pairs, we also reveal that MetaQuery can unlock novel capabilities like visual association and logo design (also in Section 5.4). Additionally, we demonstrate that MetaQuery can benefit from the internal knowledge and reasoning capabilities of the frozen MLLM, overcoming common failures exhibited by other generation models (Section 5.5). Finally, we discuss the impact of different MLLM backbones and compare MetaQuery's behavior with the baseline that uses MLLM last layer embeddings (Section 5.6)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.505, + 0.949 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.117, + 0.08, + 0.307, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.119, + 0.232, + 0.289, + 0.252 + ], + "angle": 0, + "content": "A hot air balloon in the shape of a heart. Grand Canyon" + }, + { + "type": "image", + "bbox": [ + 0.308, + 0.08, + 0.498, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.31, + 0.232, + 0.484, + 0.251 + ], + "angle": 0, + "content": "A sunken ship at the bottom of the ocean." + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.08, + 0.687, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.497, + 0.231, + 0.685, + 0.242 + ], + "angle": 0, + "content": "A British shorthair wearing sunglasses" + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.08, + 0.88, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.688, + 0.231, + 0.875, + 0.251 + ], + "angle": 0, + "content": "A butterfly lands directly on the nose of a German Shepherd." + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.254, + 0.307, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.119, + 0.406, + 0.295, + 0.437 + ], + "angle": 0, + "content": "A close-up of honey being drizzled onto pancakes, the thick liquid flowing slowly and smoothly." + }, + { + "type": "image", + "bbox": [ + 0.308, + 0.254, + 0.498, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.31, + 0.406, + 0.491, + 0.426 + ], + "angle": 0, + "content": "The word 'START' written on a street surface." + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.254, + 0.688, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.406, + 0.684, + 0.426 + ], + "angle": 0, + "content": "A paper origami dragon riding a boat in waves." + }, + { + "type": "image", + "bbox": [ + 0.688, + 0.254, + 0.88, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.689, + 0.405, + 0.861, + 0.435 + ], + "angle": 0, + "content": "An old rusted robot wearing pants and a jacket riding skis in a supermarket." + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.439, + 0.308, + 0.587 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.119, + 0.59, + 0.298, + 0.631 + ], + "angle": 0, + "content": "A close-up of a painter's brush touching the canvas, with paint spreading and blending in a swirl of colors." + }, + { + "type": "image", + "bbox": [ + 0.308, + 0.439, + 0.498, + 0.587 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.309, + 0.59, + 0.491, + 0.631 + ], + "angle": 0, + "content": "A giant humanoid, made of fluffy blue cotton candy, stomping on the ground, and roaring to the sky, clear blue sky behind them." + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.439, + 0.688, + 0.587 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.59, + 0.665, + 0.631 + ], + "angle": 0, + "content": "Close-up of a bright blue parrot's feathers glittering in the light, showing its unique plumage and vibrant colors." + }, + { + "type": "image", + "bbox": [ + 0.688, + 0.439, + 0.88, + 0.587 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.689, + 0.59, + 0.87, + 0.631 + ], + "angle": 0, + "content": "The reflection of a snowy mountain peak in a crystal-clear alpine lake, creating a perfect mirror image with a slight shimmering effect." + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.649, + 0.885, + 0.677 + ], + "angle": 0, + "content": "Figure 5 Qualitative results of MetaQuery. Prompts are from PartiPrompt (Yu et al., 2022), Sana (Xie et al., 2025) and Movie Gen Bench (Polyak et al., 2024)." + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.703, + 0.463, + 0.719 + ], + "angle": 0, + "content": "5.1 Image Understanding and Generation" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.727, + 0.886, + 0.894 + ], + "angle": 0, + "content": "As shown in Table 4, our model family demonstrates strong capabilities across both understanding and generation tasks. Benefiting from the flexible training approach that allows us to leverage arbitrary SOTA frozen MLLMs, all of our models in different sizes exhibit competitive performance on all understanding benchmarks (Fu et al., 2023; Liu et al., 2023; Li et al., 2023a; Yue et al., 2024; Yu et al., 2023). In terms of image generation, MetaQuery achieves SOTA visual quality on MJHQ-30K (Li et al., 2024b). Given the fact that MetaQuery works with frozen MLLMs, we can naturally connect with an arbitrary number of diffusion models. Since the base Sana-1.6B (Xie et al., 2025) model is already fine-tuned on aesthetic data, we adopt Stable Diffusion v1.5 (Rombach et al., 2021) for COCO FID evaluation. Our results suggest that after adapting it to powerful MLLMs, we can achieve improved visual quality as indicated by the COCO FID score of 8.69. This also establishes a new SOTA COCO FID score among all Stable Diffusion v1.5-based unified models including MetaMorph (Tong et al., 2024) (11.8) and Emu (Sun et al., 2024b) (11.66)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.15, + 0.078, + 0.266, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.173, + 0.354, + 0.241, + 0.364 + ], + "angle": 0, + "content": "Real Image" + }, + { + "type": "image", + "bbox": [ + 0.266, + 0.079, + 0.382, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.305, + 0.35, + 0.342, + 0.359 + ], + "angle": 0, + "content": "SEED" + }, + { + "type": "image_caption", + "bbox": [ + 0.277, + 0.36, + 0.371, + 0.37 + ], + "angle": 0, + "content": "(Ge et al., 2023)" + }, + { + "type": "image", + "bbox": [ + 0.382, + 0.079, + 0.497, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.429, + 0.349, + 0.456, + 0.358 + ], + "angle": 0, + "content": "Emu" + }, + { + "type": "image_caption", + "bbox": [ + 0.409, + 0.359, + 0.475, + 0.378 + ], + "angle": 0, + "content": "(Sun et al., 2024b)" + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.079, + 0.613, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.54, + 0.349, + 0.574, + 0.358 + ], + "angle": 0, + "content": "Emu2" + }, + { + "type": "image_caption", + "bbox": [ + 0.524, + 0.359, + 0.589, + 0.378 + ], + "angle": 0, + "content": "(Sun et al., 2024a)" + }, + { + "type": "image", + "bbox": [ + 0.613, + 0.08, + 0.726, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.647, + 0.349, + 0.694, + 0.359 + ], + "angle": 0, + "content": "GPT-4o" + }, + { + "type": "image_caption", + "bbox": [ + 0.625, + 0.36, + 0.714, + 0.37 + ], + "angle": 0, + "content": "(OpenAI, 2025)" + }, + { + "type": "image", + "bbox": [ + 0.726, + 0.08, + 0.843, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.752, + 0.355, + 0.822, + 0.365 + ], + "angle": 0, + "content": "MetaQuery-B" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.384, + 0.787, + 0.399 + ], + "angle": 0, + "content": "Figure 6 Image reconstruction results. Results of SEED, Emu, and Emu2 are from Sun et al. (2024a)." + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.413, + 0.307, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.12, + 0.565, + 0.212, + 0.576 + ], + "angle": 0, + "content": "Add a chef hat to the dog" + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.413, + 0.307, + 0.562 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.564, + 0.309, + 0.581 + ], + "angle": 0, + "content": "There is a house in front of the mountain" + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.413, + 0.401, + 0.562 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.313, + 0.565, + 0.397, + 0.576 + ], + "angle": 0, + "content": "Remove the 3-WAY sign" + }, + { + "type": "image", + "bbox": [ + 0.401, + 0.413, + 0.496, + 0.562 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.411, + 0.564, + 0.49, + 0.581 + ], + "angle": 0, + "content": "Replace the dog with a golden retriever" + }, + { + "type": "image", + "bbox": [ + 0.496, + 0.413, + 0.685, + 0.562 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.565, + 0.587, + 0.576 + ], + "angle": 0, + "content": "Change to cartoon style" + }, + { + "type": "image", + "bbox": [ + 0.592, + 0.413, + 0.781, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.595, + 0.565, + 0.686, + 0.576 + ], + "angle": 0, + "content": "Change it into linear style" + }, + { + "type": "image", + "bbox": [ + 0.686, + 0.413, + 0.781, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.688, + 0.564, + 0.781, + 0.58 + ], + "angle": 0, + "content": "Chenage the bird to a blue one" + }, + { + "type": "image", + "bbox": [ + 0.781, + 0.413, + 0.877, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.782, + 0.565, + 0.877, + 0.576 + ], + "angle": 0, + "content": "Replace the fries with salad" + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.597, + 0.885, + 0.626 + ], + "angle": 0, + "content": "Figure 7 Image editing results. This capability can be easily transferred from image reconstruction after lightweight fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.652, + 0.888, + 0.866 + ], + "angle": 0, + "content": "In terms of prompt alignment, MetaQuery also achieves competitive performance on GenEval (Ghosh et al., 2023) and DPG-Bench (Hu et al., 2024b), beating all diffusion model-based approaches including Transfusion (Zhou et al., 2025) and JanusFlow (Ma et al., 2025). We note that there is a performance gap between MetaQuery and Janus-Pro (Chen et al., 2025), which auto-regressively generates image tokens. We suggest that this gap may be due to the different failure modes of diffusion models and auto-regressive models: diffusion models usually fail to correctly follow the prompt, while auto-regressive models may suffer from more visual artifacts, which are difficult to quantify by GenEval and DPG-Bench. We tested the MJHQ-30K FID score of Janus-Pro under the same setting as ours and found that, in terms of visual quality and artifact control, MetaQuery is significantly better than Janus-Pro (see Appendix B for visual comparison). Additionally, we find that MetaQuery achieves much better world knowledge reasoning capability than Janus-Pro, which we will elaborate on in Section 5.5. We also found that when scaling up the size of frozen LLMs, the generation quality and prompt alignment also improves. MetaQuery provides a simple and principled way for leveraging the most advanced multimodal LLMs within a unified modeling framework. We also provide qualitative results in Figure 5 to illustrate the text-to-image generation capability of MetaQuery." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.938, + 0.505, + 0.949 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.115, + 0.079, + 0.301, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.238, + 0.199, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.202, + 0.253, + 0.298, + 0.282 + ], + "angle": 0, + "content": "Top view of the same berry bowl" + }, + { + "type": "image", + "bbox": [ + 0.308, + 0.079, + 0.497, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.238, + 0.393, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.396, + 0.253, + 0.486, + 0.281 + ], + "angle": 0, + "content": "The same robot in Minecraft" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.079, + 0.691, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.229, + 0.588, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.23, + 0.68, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.29, + 0.685, + 0.303 + ], + "angle": 0, + "content": "The toy on the head of the cat" + }, + { + "type": "image", + "bbox": [ + 0.696, + 0.08, + 0.885, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.23, + 0.781, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.8, + 0.23, + 0.874, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.715, + 0.29, + 0.868, + 0.304 + ], + "angle": 0, + "content": "The dog wearing sunglasses" + }, + { + "type": "image", + "bbox": [ + 0.114, + 0.313, + 0.303, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.463, + 0.199, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.202, + 0.471, + 0.297, + 0.515 + ], + "angle": 0, + "content": "The same model but a real one in New York city" + }, + { + "type": "image", + "bbox": [ + 0.308, + 0.313, + 0.497, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.318, + 0.463, + 0.393, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.396, + 0.471, + 0.486, + 0.515 + ], + "angle": 0, + "content": "The sky line view of the city from this building" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.313, + 0.691, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.463, + 0.587, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.59, + 0.478, + 0.666, + 0.508 + ], + "angle": 0, + "content": "The statue in the same city" + }, + { + "type": "image", + "bbox": [ + 0.714, + 0.342, + 0.864, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.705, + 0.463, + 0.78, + 0.521 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.783, + 0.478, + 0.862, + 0.507 + ], + "angle": 0, + "content": "A logo for the same teapot" + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.542, + 0.885, + 0.571 + ], + "angle": 0, + "content": "Figure 8 Qualitative results for instruction tuning. Instruction-tuned MetaQuery achieves strong subject-driven capability (first row) and can even reason through the multimodal input to generate images (second row)." + }, + { + "type": "table", + "bbox": [ + 0.214, + 0.584, + 0.785, + 0.758 + ], + "angle": 0, + "content": "
MethodsDINO Score↑CLIP-I Score↑CLIP-T Score↑
Real Images (Oracle)0.7740.885-
fine-tuning
Textual Inversion (Gal et al., 2023)0.5690.7800.255
DreamBooth (Ruiz et al., 2023)0.6680.8030.305
BLIP-Diffusion (Li et al., 2023b)0.6700.8050.302
zero-shot & test time tuning free
Re-Imagen (Chen et al., 2023)0.6000.7400.270
BLIP-Diffusion (Li et al., 2023b)0.5940.7790.300
Kosmos-G (Pan et al., 2024)0.6940.8470.287
MetaQuery-B-Instruct0.7370.8520.301
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.768, + 0.625, + 0.782 + ], + "angle": 0, + "content": "Table 5 Subject-driven generation results on DreamBench (Ruiz et al., 2023)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.808, + 0.341, + 0.824 + ], + "angle": 0, + "content": "5.2 Image Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.832, + 0.886, + 0.907 + ], + "angle": 0, + "content": "We demonstrate that MetaQuery can be easily fine-tuned for image reconstruction tasks with a frozen MLLM (See Appendix C for more details). As shown in Figure 6, we compare our fine-tuned MetaQuery-B with existing diffusion autoencoders from various unified models, which reconstruct images from predicted visual features. Since these unified models are not explicitly fine-tuned for image reconstruction, their results are directly decoded from the vision encoder's output. Remarkably, even under this more constrained setup, our" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.079, + 0.62, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.63, + 0.079, + 0.882, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.419, + 0.887, + 0.492 + ], + "angle": 0, + "content": "Figure 9 MetaQuery leverages frozen MLLMs for reasoning- and knowledge-augmented generation, overcoming the failure cases encountered in the base Sana model. * denotes that the LLM last layer embeddings of input tokens are used for image generation; the model is in L size (Qwen2.5-VL 3B). This approach can be better than the base Sana model in some cases but fails to activate in-context learning to perform knowledge-augmented generation. Some of the test cases are from MetaMorph (Tong et al., 2024) and CommonsenseT2I (Fu et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.515, + 0.889, + 0.562 + ], + "angle": 0, + "content": "fine-tuned MetaQuery-B can still achieve competitive performance, matching the best existing open-source model Emu2 (Sun et al., 2024a). When compared with GPT-4o (OpenAI, 2025), our model also achieves comparable quality." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.579, + 0.275, + 0.596 + ], + "angle": 0, + "content": "5.3 Image Editing" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.604, + 0.888, + 0.665 + ], + "angle": 0, + "content": "As shown in Figure 7, we demonstrate that MetaQuery can transfer its image reconstruction capability to perform image editing. We keep the MLLM backbone frozen and fine-tune our pre-trained Base model for only 1,000 steps on publicly available image editing data. Qualitative results demonstrate that MetaQuery performs effectively in these image-editing scenarios." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.684, + 0.313, + 0.7 + ], + "angle": 0, + "content": "5.4 Instruction Tuning" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.707, + 0.888, + 0.829 + ], + "angle": 0, + "content": "We show that after being instruction-tuned on the proposed 2.4M dataset in Section 4, MetaQuery can achieve impressive zero-shot subject-driven generation performance, producing coherent results even with multiple highly customized subjects (the first row of Figure 8). Using various supervision signals, the instruction-tuned MetaQuery-B model surprisingly unlocks novel capabilities like visual association and logo design that go beyond copy-pasting (the second row of Figure 8). For example, in the first case, the model identifies the specific model of the input Porsche 911 car image, then correctly generates a novel front view for that model. In the second case, the model recognizes the input image of Rockefeller Center and imagines the view of New York City from the top of the Rockefeller Center." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.835, + 0.889, + 0.898 + ], + "angle": 0, + "content": "We also follow DreamBooth (Ruiz et al., 2023) by adopting DINO, CLIP-I, and CLIP-T scores to quantitatively evaluate our model on the DreamBench (Ruiz et al., 2023) dataset. As shown in Table 5, our MetaQuery-B-Instruct model achieves SOTA performance, outperforming existing models like Kosmos-G (Pan et al., 2024) that are explicitly trained on constructed substitution tasks for subject-driven generation." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.508, + 0.95 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.123, + 0.078, + 0.878, + 0.384 + ], + "angle": 0, + "content": "
MethodsCulturalTimeSpaceBiologyPhysicsChemistryOverall
GPT-4o** (OpenAI, 2025)0.940.640.980.930.980.950.89
Text-to-Image Models
SD-v1-5 (Rombach et al., 2021)0.340.350.320.280.290.210.32
SD-XL (Podell et al., 2023)0.430.480.470.440.450.270.43
PixArt-Alpha (Chen et al., 2024)0.450.500.480.490.560.340.47
playground-v2.5 (Li et al., 2024b)0.490.580.550.430.480.330.49
SD-3.5-large (Esser et al., 2024)0.440.500.580.440.520.310.46
FLUX.1-dev (Labs, 2024)0.480.580.620.420.510.350.50
Unified Models
show-o-512 (Xie et al., 2024)0.280.400.480.300.460.300.35
vila-u-7b-256 (Wu et al., 2025b)0.260.330.370.350.390.230.31
Emu3 (Wang et al., 2024)0.340.450.480.410.450.270.39
Janus-1.3B (Wu et al., 2025a)0.160.260.350.280.300.140.23
JanusFlow-1.3B (Ma et al., 2025)0.130.260.280.200.190.110.18
Janus-Pro-1B (Chen et al., 2025)0.200.280.450.240.320.160.26
Janus-Pro-7B (Chen et al., 2025)0.300.370.490.360.420.260.35
MetaQuery-B0.440.490.580.410.490.340.46
MetaQuery-L0.560.570.620.480.630.420.55
MetaQuery-XL0.560.550.620.490.630.410.55
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.394, + 0.889, + 0.45 + ], + "angle": 0, + "content": "Table 6 Comparison of world knowledge reasoning on WISE (Niu et al., 2025). The test cases in WISE are similar to the knowledge-augmented generation ones in Figure 9. MetaQuery achieves SOTA performance and significantly outperforms all other unified models. ** Results are evaluated by Yan et al. (2025) on a random subset of 200 out of 1000 samples." + }, + { + "type": "table", + "bbox": [ + 0.223, + 0.464, + 0.778, + 0.584 + ], + "angle": 0, + "content": "
Methodsw/o Neg. Promptw/ Neg. Prompt
DALL-E 3 (Ramesh et al., 2021) w/ rewrite40.17N/A
SD-XL (Podell et al., 2023)26.0044.83
SD-3-medium (Esser et al., 2024)26.1747.17
FLUX.1-dev (Labs, 2024)24.5022.50
Sana-1.6B (Xie et al., 2025)25.1743.33
MetaQuery-B27.3351.50
MetaQuery-L28.8357.67
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.594, + 0.794, + 0.609 + ], + "angle": 0, + "content": "Table 7 Comparison of visual commonsense reasoning capability on CommonsenseT2I (Fu et al., 2024)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.634, + 0.58, + 0.65 + ], + "angle": 0, + "content": "5.5 Reasoning- and Knowledge-Augmented Generation" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.658, + 0.888, + 0.763 + ], + "angle": 0, + "content": "We show that the learnable queries can effectively leverage capabilities of the frozen LLM. This enables the model to better understand and follow complex prompts, including those requiring real-world knowledge and reasoning. As shown in Figure 9, for the left knowledge-augmented generation cases, MetaQuery-L can leverage world knowledge from the frozen MLLM and reason through the input question to generate the correct answer. For the right commonsense knowledge cases from CommonsenseT2I (Fu et al., 2024), the LLM provides better commonsense knowledge and enables MetaQuery to generate images that are consistent with the facts." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.771, + 0.888, + 0.892 + ], + "angle": 0, + "content": "To quantitatively evaluate MetaQuery's world knowledge reasoning capability, we employ the WISE (Niu et al., 2025) benchmark, which contains similar test cases to the knowledge-augmented generation examples shown in Figure 9. As demonstrated in Table 6, MetaQuery achieves SOTA performance, significantly outperforming all other unified models. Notably, before our work, existing unified models struggled to effectively leverage powerful MLLMs for reasoning and knowledge-augmented generation, resulting in inferior performance compared to text-to-image models. MetaQuery stands as the first unified model to successfully transfer the advanced capabilities of frozen MLLMs to image generation and exceed the performance of SOTA text-to-image models." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.18, + 0.079, + 0.817, + 0.142 + ], + "angle": 0, + "content": "
LLM BackbonesMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑CommonsenseT2I ↑
Qwen2.5-3B6.200.7981.3456.00
Qwen2.5-3B-Instruct6.360.7981.1254.33
Qwen2.5-VL-3B-Instruct6.350.7881.1057.67
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.154, + 0.885, + 0.181 + ], + "angle": 0, + "content": "Table 8 Comparison across different LLM backbones. Image generation capability is mostly orthogonal to multimodal understanding capability." + }, + { + "type": "table", + "bbox": [ + 0.119, + 0.196, + 0.88, + 0.246 + ], + "angle": 0, + "content": "
MethodsMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑WiScore ↑CommonsenseT2I ↑
Ours-L w/ Last Layer Embed*6.410.7881.230.4852.83
Ours-L w/ MetaQueries6.350.7881.100.5557.67
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.257, + 0.885, + 0.327 + ], + "angle": 0, + "content": "Table 9 Comparison between MetaQuery and LLM last layer embedding. * denotes that the LLM last layer embeddings of input tokens are used for image generation. We observe comparable performance between MetaQuery and LLM last layer embedding on visual quality and prompt alignment. However, MetaQuery can activate in-context learning to perform knowledge-augmented generation, yielding much better performance on commonsense reasoning and world knowledge reasoning." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.353, + 0.885, + 0.414 + ], + "angle": 0, + "content": "We also quantitatively evaluate MetaQuery's commonsense reasoning capability on the CommonsenseT2I benchmark (Fu et al., 2024) in Table 7. For simplicity, we use CLIP (Radford et al., 2021) as the evaluator following their original implementation. Results show that MetaQuery significantly improves the performance of the base Sana model, achieving SOTA performance." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.433, + 0.25, + 0.447 + ], + "angle": 0, + "content": "5.6 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.457, + 0.888, + 0.562 + ], + "angle": 0, + "content": "Comparison over different LLM backbones. As shown in Table 8, to test the impact of employing different LLM backbones for MetaQuery, we carefully select a family of backbone models: pre-trained LLM (Qwen2.5-3B), instruction-tuned LLM (Qwen2.5-3B-Instruct), and instruction-tuned MLLM (Qwen2.5-VL-3B-Instruct). Both instruction-tuned models are initialized with the first pre-trained model checkpoint. Experimental results show that instruction tuning can achieve better (multimodal) understanding capabilities. However, the improvements are orthogonal to image generation performance when employed to provide multimodal generation conditions." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.58, + 0.886, + 0.747 + ], + "angle": 0, + "content": "Comparison with using last layer embeddings. As shown in Table 1, our learnable queries approach achieves comparable image generation quality and prompt alignment to using the LLM's last layer embeddings of input tokens. However, the last layer embedding method essentially treats the decoder-only LLM as a text encoder, which inherently limits its in-context learning capabilities. While this approach does improve upon the base Sana model in some cases as demonstrated in Figure 9, it struggles with the knowledge-augmented generation cases shown in the same figure. These cases require the LLM to first process and answer input questions before generating corresponding images, demanding in-context learning beyond what text encoders typically provide. This performance gap is quantitatively confirmed in Table 9, where MetaQuery significantly outperforms the last layer embedding approach on both WiScore and CommonsenseT2I benchmarks. Integrated natively with the LLM, MetaQuery naturally leverages its in-context learning capabilities, enabling the model to reason through questions and generate appropriate images." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.77, + 0.26, + 0.787 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.803, + 0.886, + 0.909 + ], + "angle": 0, + "content": "We presented MetaQueries, a simple interface connecting MLLMs (for understanding) and diffusion decoders (for generation), effective even when the MLLM is frozen. This approach yields state-of-the-art understanding and generation performance with straightforward implementation. By enabling transfer between modalities, MetaQueries successfully channels MLLM knowledge and reasoning into multimodal generation. While effective, we hypothesize that bridging the remaining gap to leading proprietary systems may primarily involve further data scaling. We hope MetaQueries provides a powerful, accessible baseline for future unified multimodal model development." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.082, + 0.228, + 0.098 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.113, + 0.887, + 0.155 + ], + "angle": 0, + "content": "Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. In NeurIPS, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.161, + 0.885, + 0.192 + ], + "angle": 0, + "content": "Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.197, + 0.887, + 0.225 + ], + "angle": 0, + "content": "Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In CVPR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.233, + 0.885, + 0.261 + ], + "angle": 0, + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. In NeurIPS, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.269, + 0.885, + 0.31 + ], + "angle": 0, + "content": "Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. In ICLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.318, + 0.885, + 0.346 + ], + "angle": 0, + "content": "Wenhu Chen, Hexiang Hu, Chitwan Sahara, and William W Cohen. Re-imagen: Retrieval-augmented text-to-image generator. In ICLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.353, + 0.885, + 0.393 + ], + "angle": 0, + "content": "Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.402, + 0.872, + 0.416 + ], + "angle": 0, + "content": "Jacob Devlin. Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.423, + 0.885, + 0.451 + ], + "angle": 0, + "content": "Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamllm: Synergistic multimodal comprehension and creation. In ICLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.459, + 0.885, + 0.499 + ], + "angle": 0, + "content": "Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.508, + 0.885, + 0.548 + ], + "angle": 0, + "content": "Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.557, + 0.885, + 0.584 + ], + "angle": 0, + "content": "Xingyu Fu, Muyu He, Yujie Lu, William Yang Wang, and Dan Roth. Commonsense-t2i challenge: Can text-to-image generation models understand commonsense? In \\(COLM\\), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.592, + 0.885, + 0.62 + ], + "angle": 0, + "content": "Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. In ICLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.627, + 0.885, + 0.655 + ], + "angle": 0, + "content": "Yuying Ge, Yixiao Ge, Ziyun Zeng, Xintao Wang, and Ying Shan. Planting a seed of vision in large language model. arXiv preprint arXiv:2307.08041, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.662, + 0.885, + 0.703 + ], + "angle": 0, + "content": "Yuying Ge, Sijie Zhao, Jinguo Zhu, Yixiao Ge, Kun Yi, Lin Song, Chen Li, Xiaohan Ding, and Ying Shan. Seed-x: Multimodal models with unified multi-granularity comprehension and generation. arXiv preprint arXiv:2404.14396, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.712, + 0.885, + 0.739 + ], + "angle": 0, + "content": "Dhruba Ghosh, Hannaneh Hajishirzi, and Ludwig Schmidt. Geneval: An object-focused framework for evaluating text-to-image alignment. In NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.747, + 0.885, + 0.774 + ], + "angle": 0, + "content": "Google. Experiment with gemini 2.0 flash native image generation, 2025. https://developers.googleblog.com/en/experiment-with-gemini-20-flash-native-image-generation/." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.782, + 0.885, + 0.81 + ], + "angle": 0, + "content": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In NeurIPS, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.817, + 0.885, + 0.845 + ], + "angle": 0, + "content": "Hexiang Hu, Kelvin CK Chan, Yu-Chuan Su, Wenhu Chen, Yandong Li, Kihyuk Sohn, Yang Zhao, Xue Ben, Boqing Gong, William Cohen, et al. Instruct-imagen: Image generation with multi-modal instruction. In CVPR, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.852, + 0.885, + 0.88 + ], + "angle": 0, + "content": "Xiwei Hu, Rui Wang, Yixiao Fang, Bin Fu, Pei Cheng, and Gang Yu. Ella: Equip diffusion models with llm for enhanced semantic alignment. arXiv preprint arXiv:2403.05135, 2024b." + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.113, + 0.887, + 0.88 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.081, + 0.885, + 0.11 + ], + "angle": 0, + "content": "Yang Jin, Kun Xu, Liwei Chen, Chao Liao, Jianchao Tan, Bin Chen, Chenyi Lei, An Liu, Chengru Song, Xiaoqiang Lei, et al. Unified language-vision pretraining with dynamic discrete visual tokenization. In ICLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.117, + 0.884, + 0.145 + ], + "angle": 0, + "content": "Jing Yu Koh, Daniel Fried, and Ruslan Salakhutdinov. Generating images with multimodal language models. In NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.153, + 0.331, + 0.167 + ], + "angle": 0, + "content": "Black Forest Labs. Flux.1, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.174, + 0.885, + 0.202 + ], + "angle": 0, + "content": "Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.209, + 0.884, + 0.237 + ], + "angle": 0, + "content": "Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.244, + 0.884, + 0.273 + ], + "angle": 0, + "content": "Daiqing Li, Aleks Kamko, Ehsan Akhgari, Ali Sabet, Linmiao Xu, and Suhail Doshi. Playground v2. 5: Three insights towards enhancing aesthetic quality in text-to-image generation. arXiv preprint arXiv:2402.17245, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.279, + 0.884, + 0.308 + ], + "angle": 0, + "content": "Dongxu Li, Junnan Li, and Steven CH Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing. In NeurIPS, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.315, + 0.884, + 0.343 + ], + "angle": 0, + "content": "Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.35, + 0.832, + 0.365 + ], + "angle": 0, + "content": "Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.371, + 0.887, + 0.412 + ], + "angle": 0, + "content": "Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.42, + 0.884, + 0.449 + ], + "angle": 0, + "content": "Bingqi Ma, Zhuofan Zong, Guanglu Song, Hongsheng Li, and Yu Liu. Exploring the role of large language models in prompt encoding for diffusion models. In NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.456, + 0.885, + 0.497 + ], + "angle": 0, + "content": "Yiyang Ma, Xingchao Liu, Xiaokang Chen, Wen Liu, Chengyue Wu, Zhiyu Wu, Zizheng Pan, Zhenda Xie, Haowei Zhang, Liang Zhao, et al. Janusflow: Harmonizing autoregression and rectified flow for unified multimodal understanding and generation. In CVPR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.505, + 0.885, + 0.545 + ], + "angle": 0, + "content": "Yuwei Niu, Munan Ning, Mengren Zheng, Bin Lin, Peng Jin, Jiaqi Liao, Kunpeng Ning, Bin Zhu, and Li Yuan. Wise: A world knowledge-informed semantic evaluation for text-to-image generation. arXiv preprint arXiv:2503.07265, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.554, + 0.856, + 0.569 + ], + "angle": 0, + "content": "OpenAI. Introducing 4o image generation, 2025. https://openai.com/index/introducing-4o-image-generation/." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.575, + 0.884, + 0.604 + ], + "angle": 0, + "content": "Xichen Pan, Li Dong, Shaohan Huang, Zhiliang Peng, Wenhu Chen, and Furu Wei. Kosmos-g: Generating images in context with multimodal large language models. In ICLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.61, + 0.885, + 0.651 + ], + "angle": 0, + "content": "Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.659, + 0.885, + 0.701 + ], + "angle": 0, + "content": "Adam Polyak, Amit Zohar, Andrew Brown, Andros Tjandra, Animesh Sinha, Ann Lee, Apoorv Vyas, Bowen Shi, Chih-Yao Ma, Ching-Yao Chuang, et al. Movie gen: A cast of media foundation models. arXiv preprint arXiv:2410.13720, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.709, + 0.885, + 0.75 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.757, + 0.885, + 0.786 + ], + "angle": 0, + "content": "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In ICML, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.793, + 0.885, + 0.821 + ], + "angle": 0, + "content": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.828, + 0.884, + 0.856 + ], + "angle": 0, + "content": "Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.863, + 0.885, + 0.891 + ], + "angle": 0, + "content": "Weijia Shi, Xiaochuang Han, Chunting Zhou, Weixin Liang, Xi Victoria Lin, Luke Zettlemoyer, and Lili Yu. Llamafusion: Adapting pretrained language models for multimodal generation. arXiv preprint arXiv:2412.15188, 2024." + }, + { + "type": "list", + "bbox": [ + 0.112, + 0.081, + 0.887, + 0.891 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.081, + 0.885, + 0.11 + ], + "angle": 0, + "content": "Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In \\(CVPR\\), 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.116, + 0.885, + 0.147 + ], + "angle": 0, + "content": "Quan Sun, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, Yueze Wang, Hongcheng Gao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative pretraining in multimodality. In ICLR, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.152, + 0.887, + 0.168 + ], + "angle": 0, + "content": "Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.174, + 0.521, + 0.189 + ], + "angle": 0, + "content": "Qwen Team. Qwen2.5: A party of foundation models, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.195, + 0.888, + 0.238 + ], + "angle": 0, + "content": "Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.244, + 0.887, + 0.286 + ], + "angle": 0, + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.293, + 0.887, + 0.322 + ], + "angle": 0, + "content": "Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.328, + 0.887, + 0.37 + ], + "angle": 0, + "content": "Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, et al. Janus: Decoupling visual encoding for unified multimodal understanding and generation. In CVPR, 2025a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.377, + 0.887, + 0.406 + ], + "angle": 0, + "content": "Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng Chua. Next-gpt: Any-to-any multimodal llm. arXiv preprint arXiv:2309.05519, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.413, + 0.887, + 0.454 + ], + "angle": 0, + "content": "Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. In ICLR, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.462, + 0.885, + 0.491 + ], + "angle": 0, + "content": "Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and Zheng Liu. Omnigen: Unified image generation. In CVPR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.497, + 0.885, + 0.527 + ], + "angle": 0, + "content": "Enze Xie, Junsong Chen, Junyu Chen, Han Cai, Haotian Tang, Yujun Lin, Zhekai Zhang, Muyang Li, Ligeng Zhu, Yao Lu, et al. Sana: Efficient high-resolution image synthesis with linear diffusion transformers. In ICLR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.532, + 0.885, + 0.575 + ], + "angle": 0, + "content": "Jinheng Xie, Weijia Mao, Zechen Bai, David Junhao Zhang, Weihao Wang, Kevin Qinghong Lin, Yuchao Gu, Zhijie Chen, Zhenheng Yang, and Mike Zheng Shou. Show-o: One single transformer to unify multimodal understanding and generation. arXiv preprint arXiv:2408.12528, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.581, + 0.885, + 0.623 + ], + "angle": 0, + "content": "Zhiyuan Yan, Junyan Ye, Weijia Li, Zilong Huang, Shenghai Yuan, Xiangyang He, Kaiqing Lin, Jun He, Conghui He, and Li Yuan. Gpt-imgeval: A comprehensive benchmark for diagnosing gpt4o in image generation. arXiv preprint arXiv:2504.02782, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.63, + 0.885, + 0.672 + ], + "angle": 0, + "content": "Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. In TMLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.679, + 0.885, + 0.709 + ], + "angle": 0, + "content": "Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. \nMm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.715, + 0.885, + 0.757 + ], + "angle": 0, + "content": "Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.764, + 0.885, + 0.792 + ], + "angle": 0, + "content": "Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.799, + 0.885, + 0.828 + ], + "angle": 0, + "content": "Kai Zhang, Yi Luan, Hexiang Hu, Kenton Lee, Siyuan Qiao, Wenhu Chen, Yu Su, and Ming-Wei Chang. Magiclens: Self-supervised image retrieval with open-ended instructions. In ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.834, + 0.885, + 0.876 + ], + "angle": 0, + "content": "Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. In ICLR, 2025." + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.081, + 0.888, + 0.876 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.081, + 0.887, + 0.127 + ], + "angle": 0, + "content": "Wanrong Zhu, Jack Hessel, Anas Awadalla, Samir Yitzhak Gadre, Jesse Dodge, Alex Fang, Youngjae Yu, Ludwig Schmidt, William Yang Wang, and Yejin Choi. Multimodal C4: An open, billion-scale corpus of images interleaved with text. In NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.131, + 0.887, + 0.174 + ], + "angle": 0, + "content": "Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Lirui Zhao, Fu-Yun Wang, Zhanyu Ma, et al. Lumina next: Making lumina-t2x stronger and faster with next-dit. arXiv preprint arXiv:2406.18583, 2024." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.081, + 0.887, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.938, + 0.509, + 0.949 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.111, + 0.077, + 0.251, + 0.102 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.124, + 0.363, + 0.14 + ], + "angle": 0, + "content": "A Data Curation Details" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.157, + 0.885, + 0.187 + ], + "angle": 0, + "content": "For the data curation part, we use Qwen/Qwen2-VL-7B-Instruct² as our MLLM, The system prompt we are using is:" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.195, + 0.859, + 0.225 + ], + "angle": 0, + "content": "Based on the provided of one or multiple source images, one target image, and their captions, create an interesting text prompt that can be used with the source images to generate the target image." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.226, + 0.343, + 0.24 + ], + "angle": 0, + "content": "This prompt should include:" + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.241, + 0.858, + 0.27 + ], + "angle": 0, + "content": "- one general and unspecific similarity shared with the source images (same jersey top, similar axe, similar building, etc)." + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.272, + 0.506, + 0.286 + ], + "angle": 0, + "content": "- all differences that only the target image has." + }, + { + "type": "list", + "bbox": [ + 0.162, + 0.241, + 0.858, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.287, + 0.385, + 0.301 + ], + "angle": 0, + "content": "This prompt should NOT include:" + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.302, + 0.858, + 0.331 + ], + "angle": 0, + "content": "- any specific details that would allow generating the target image independently without referencing the source images." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.332, + 0.858, + 0.362 + ], + "angle": 0, + "content": "Remember the prompt should be concise and short. The generation has to be done by combining the source images and text prompts." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.378, + 0.885, + 0.418 + ], + "angle": 0, + "content": "B Qualitative Comparison with SOTA Open-Source Model on Text-to-Image Generation" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.434, + 0.885, + 0.479 + ], + "angle": 0, + "content": "We provide a qualitative comparison with Janus-Pro-7B (Chen et al., 2025) on MJHQ-30K (Li et al., 2024b) in Figure 10. We can see that MetaQuery-XL follows the prompt better and generates more visually appealing images than Janus-Pro-7B." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.503, + 0.341, + 0.523 + ], + "angle": 0, + "content": "C Training Objectives" + }, + { + "type": "table", + "bbox": [ + 0.207, + 0.542, + 0.791, + 0.606 + ], + "angle": 0, + "content": "
ObjectiveRel. Wall TimeMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
Text-to-Image1.0x7.430.5675.35
Image Reconstruction2.79x27.420.3268.36
Mix2.61x8.270.5476.53
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.617, + 0.884, + 0.647 + ], + "angle": 0, + "content": "Table 10 Study on training objectives. Image reconstruction objective can be mixed with text-to-image objective to enable image reconstruction capabilities without harming visual quality and prompt alignment." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.66, + 0.885, + 0.734 + ], + "angle": 0, + "content": "We are using an MLLM for multimodal perception, besides the standard text-to-image objective, we can also use an image reconstruction objective to achieve alignment. In Table 10, we show that training with the text-to-image objective achieves much better performance than the image reconstruction objective. We demonstrate that a mix of both objectives can enable image reconstruction capabilities without being generally harmful to the T2I performance." + }, + { + "type": "page_footnote", + "bbox": [ + 0.13, + 0.9, + 0.466, + 0.913 + ], + "angle": 0, + "content": "\\(^{2}\\)https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.938, + 0.508, + 0.949 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.139, + 0.885, + 0.819 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.834, + 0.816, + 0.849 + ], + "angle": 0, + "content": "Figure 10 Qualitative comparison with Janus-Pro-7B (Chen et al., 2025) on MJHQ-30K (Li et al., 2024b)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.938, + 0.509, + 0.95 + ], + "angle": 0, + "content": "19" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06256/805981bf-d643-4b2e-955e-6bcd5ca89984_origin.pdf b/data/2025/2504_06xxx/2504.06256/805981bf-d643-4b2e-955e-6bcd5ca89984_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a2d6340027579b6b2d9c8c65c533cde7b974c2fd --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/805981bf-d643-4b2e-955e-6bcd5ca89984_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c09cce53e26691ee0fd0dfdc02ae68b7c0b020cebfbe6b67bf074a37ab777047 +size 12284181 diff --git a/data/2025/2504_06xxx/2504.06256/full.md b/data/2025/2504_06xxx/2504.06256/full.md new file mode 100644 index 0000000000000000000000000000000000000000..43bef6cdc558231d0df1a1c01650620c4ed03048 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/full.md @@ -0,0 +1,417 @@ +# Transfer between Modalities with MetaQueries + +Xichen Pan $^{1,2}$ , Satya Narayan Shukla $^{1,\dagger}$ , Aashu Singh $^{1}$ , Zhuokai Zhao $^{1}$ , Shlok Kumar Mishra $^{1}$ , Jialiang Wang $^{1}$ , Zhiyang Xu $^{1}$ , Jiuhai Chen $^{1}$ , Kunpeng Li $^{1}$ , Felix Juefei-Xu $^{1}$ , Ji Hou $^{1,\dagger}$ , Saining Xie $^{2,\dagger}$ + +$^{1}$ Meta, $^{2}$ New York University +$\dagger$ Equal advising + +Unified multimodal models aim to integrate understanding (text output) and generation (pixel output), but aligning these different modalities within a single architecture often demands complex training recipes and careful data balancing. We introduce MetaQueries, a set of learnable queries that act as an efficient interface between autoregressive multimodal LLMs (MLLMs) and diffusion models. MetaQueries connects the MLLM's latents to the diffusion decoder, enabling knowledge-augmented image generation by leveraging the MLLM's deep understanding and reasoning capabilities. Our method simplifies training, requiring only paired image-caption data and standard diffusion objectives. Notably, this transfer is effective even when the MLLM backbone remains frozen, thereby preserving its state-of-the-art multimodal understanding capabilities while achieving strong generative performance. Additionally, our method is flexible and can be easily instruction-tuned for advanced applications such as image editing and subject-driven generation. + +Date: April 9, 2025 + +Correspondence: satyanshukla@meta.com, jihou@meta.com, saining.xie@nyu.edu + +Project Page: https://xichenpan.com/metaquery + +Meta + +# 1 Introduction + +The quest for unified multimodal models capable of both deep understanding (typically resulting in textual outputs) and rich generation (resulting in pixel outputs) holds immense promise. Such systems could unlock synergistic capabilities (OpenAI, 2025; Google, 2025), where understanding informs generation and vice versa. However, effectively connecting these different output modalities poses considerable challenges—e.g. how do we effectively transfer the latent world knowledge from the autoregressive multimodal LLM to the image generator? Although significant progress has been made, most published approaches (Ge et al., 2024; Sun et al., 2024b; Tong et al., 2024; Jin et al., 2024; Liu et al., 2024a; Team, 2024a; Xie et al., 2024; Wang et al., 2024; Wu et al., 2025a; Chen et al., 2025; Dong et al., 2024; Zhou et al., 2025; Shi et al., 2024) rely on carefully tuning base multimodal LLMs (MLLMs) to handle both understanding and generation tasks. This involves complex architectural design, data/loss balancing, multiple training stages, and other complex training recipes—without these, optimizing one capability could compromise the other. + +In this paper, we aim to deliver the promise of unified models via a simpler philosophy: Render unto diffusion what is generative, and unto LLMs what is understanding. In other words, instead of building a monolithic system from scratch, we focus on effectively transferring capabilities between state-of-the-art, pre-trained models specialized for different output modalities. To operationalize this, we keep MLLMs frozen so they can focus on what they do best—understanding—while entrusting image generation to diffusion models. We then demonstrate that even under this frozen condition, the MLLM's inherent world knowledge, strong reasoning, and in-context learning capabilities can indeed be transferred to image generation, provided the right architectural bridge is in place. + +However, leveraging an MLLM—especially a frozen one—for both multimodal understanding and generation is far from straightforward. Although (frozen) LLMs have shown good performance as conditional text encoders in text-to-image generation (Zhuo et al., 2024; Xie et al., 2025; Ma et al., 2024), they are not compatible with many desired tasks in unified modeling, such as in-context learning or producing multimodal, interleaved output. The architectural bridge we design in this work is MetaQuery (Figure 1). MetaQuery feeds a set of + +![](images/70fdf417d4fb39bf516fdf05899650efd71ec63b281c5cfcbbf45619d8d2b2b4.jpg) +Figure 1 Overview of our model. Blue tokens maintain SOTA multimodal understanding; MetaQueries are learnable queries that directly applied to frozen MLLMs to query out conditions for generation. The model is tuned using only denoising objective with paired data. The generative diffusion models can be either frozen or further instruction-tuned for advanced generation tasks. + +learnable queries directly into a frozen MLLM to extract multimodal conditions for multimodal generation. Our experiments reveal that, even without fine-tuning or enabling bi-directional attention, the frozen LLM serves as a powerful feature resampler (Alayrac et al., 2022), producing high-quality conditions for multimodal generation. Training unified models with MetaQueries requires only a modest amount of paired image-caption data to connect these prompted conditions to any conditional diffusion model. Because the entire MLLM stays intact for understanding, the training objective remains the original denoising objective—just as efficient and stable as fine-tuning a diffusion model. + +More specifically, previous unified models aim to train a single autoregressive transformer backbone to jointly model $p(\text{text}, \text{pixels})$ . In contrast, we choose to use a token $\rightarrow$ [transformer] $\rightarrow$ [diffusion] $\rightarrow$ pixels paradigm, which might share a high-level philosophy with the concurrent GPT-4o image generation system, as hinted at by OpenAI (2025). This approach composes the MLLM's autoregressive prior with a powerful diffusion decoder, directly leveraging the frozen MLLM's strong capability in modeling compressed semantic representations, thus avoiding the more challenging task of directly generating pixels. + +To validate our approach, we conduct a series of controlled experiments, showing that MetaQuery1 outperforms the use of a frozen MLLM purely as a conditional text encoder for image generation. Moreover, MetaQuery can match the performance of fully tuning the MLLM backbone, yet it is significantly more efficient. We also systematically investigate the training strategy, including the number of tokens and architectural configurations. With just 25M publicly available image-caption pairs, we are able to train a family of unified models that not only preserves state-of-the-art (SOTA) performance in image understanding, but also achieves SOTA-level results in text-to-image generation across multiple benchmarks. + +The promise of unified modeling goes beyond handling multimodal understanding and text-to-image generation in parallel. A deeper synergy is expected—one that taps into advanced MLLM abilities like reasoning, internal knowledge, multimodal perception, and in-context learning to enhance generation. Our results show that our method draws on the frozen MLLM's commonsense knowledge, achieving SOTA visual-commonsense generation on the CommonsenseT2I benchmark (Fu et al., 2024). Our approach also harnesses the built-in reasoning and in-context learning capabilities of frozen MLLMs, producing images from complex prompts—such as generating the United States flag in response to "The national flag of the country where Yellowstone National Park is located." (See Figure 9 for examples.) We also benchmark this type of world knowledge reasoning capability on WISE (Niu et al., 2025) and demonstrate SOTA performance. + +Finally, by connecting, preserving, and enhancing multimodal input with MetaQueries and a frozen MLLM backbone, our model can be further instruction-tuned for advanced generation tasks such as image editing and subject-driven generation. We show that this can be achieved both efficiently and effectively using a scalable + +data curation pipeline that directly leverages naturally occurring image pairs from web corpora, instead of depending on human-created pairs or synthetically generated data (Brooks et al., 2023; Hu et al., 2024a; Xiao et al., 2025). This natural supervision surprisingly unlocks several new capabilities beyond subject-driven generation, such as visual association and logo design (see Figure 8 for examples). + +In summary, we explore a simple yet underexplored alternative to unified multimodal modeling. Our method, MetaQuery, bridges frozen MLLM backbones and diffusion models. Experiments show that this framework delivers all the capabilities once thought to require MLLM fine-tuning while being much easier to train. The main results and findings in this paper include: + +- With MetaQuery and frozen MLLM backbones, we maintain SOTA multimodal understanding performance while enabling SOTA-level multimodal generation. +MetaQuery can transfer the capabilities of MLLMs for reasoning- and knowledge-augmented image generation. +MetaQuery can extract highly detailed visual conditions beyond semantic similarity from frozen MLLMs, enabling image reconstruction and editing tasks. +- Our method can be easily instruction-tuned even with a frozen MLLM backbone, enabling advanced multimodal generation tasks like subject-driven generation. + +# 2 Related Work + +Unified understanding and generation models. Next-token prediction has proven to be an effective approach for training models to understand language (Devlin, 2019; Brown et al., 2020) and multimodal content (Liu et al., 2024b). Recently, the community has witnessed numerous efforts to extend the success of multimodal understanding (Liu et al., 2024b) to multimodal generation by training LLM backbones to generate images at the same time. However, unlike adapting text-only LLMs (Touvron et al., 2023) to understand multimodal content with one single next text token prediction objective (Liu et al., 2024b), generating multimodal content requires a different set of training objectives. SEED-X (Ge et al., 2024), Emu (Sun et al., 2024b), and MetaMorph (Tong et al., 2024) learn to regress image features; LaVIT (Jin et al., 2024), LWM (Liu et al., 2024a), Chameleon (Team, 2024a), Show-o (Xie et al., 2024), EMU3 (Wang et al., 2024), and Janus (Wu et al., 2025a; Chen et al., 2025) auto-regressively predict next visual tokens; and DreamLLM (Dong et al., 2024), Transfusion (Zhou et al., 2025) employ diffusion objectives. However, these approaches necessitate tuning LLMs for generating both modalities, naturally posing challenges in multi-task balancing. + +Unified models with frozen LLMs. Several studies have explored the use of frozen LLMs for multimodal understanding and generation. For instance, LMFusion (Shi et al., 2024) trains image generation expert feed-forward networks (FFNs) and query-key-value (QKV) modules in parallel with a frozen LLM backbone to deeply fuse input conditions and denoise visual outputs. However, this approach offers limited flexibility as it shares the same architecture as specific LLM backbones and requires training a separate set of generative modules for every single LLM backbone. This not only imposes more computational burden but also restricts the ability to leverage powerful pre-trained generative models. An earlier work, GILL (Koh et al., 2023), investigates feeding learnable tokens into frozen MLLMs. It employs a combined contrastive loss and regression loss for image retrieval and generation, rather than directly employing the denoising objective for more efficient training. Its application is restricted to contextual image generation and it does not systematically explore the impact of frozen MLLMs and learnable queries. + +# 3 MetaQuery + +In this work, we propose MetaQuery, which losslessly augments understanding-only MLLMs with multimodal generation capabilities while preserving their original architecture designs and parameters intact. We carefully analyze the impact of applying MetaQuery on image generation performance. Results show that a frozen MLLM can provide strong conditions for multimodal generation. + +
Methods# of TokensMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
LLM last layer embedding*-7.490.5578.41
Random queries648.590.3554.81
Learnable queries647.430.5675.35
Learnable queries5127.340.5678.43
+ +Table 1 Study on different conditions for image generation. * denotes the embeddings of input tokens. Learnable queries achieve comparable performance to using all hidden states and can even surpass them with more tokens. + +
MethodsTrain LLMTrain DiTMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
MLLM tuningX7.750.5878.97
E2E tuning6.280.6179.39
Frozen MLLMXX7.430.5675.35
Frozen MLLMX6.060.6176.66
+ +Table 2 Study on strategies for adapting MLLMs. The methods without training LLM do not suffer from multimodal understanding degradation. Frozen MLLM achieves comparable performance to full MLLM tuning, with slightly lower prompt alignment but slightly improved visual quality. + +# 3.1 Architecture + +MetaQuery bridges frozen MLLMs with diffusion models. We use randomly initialized learnable queries $\mathcal{Q} \in \mathbb{R}^{N \times D}$ to query out the conditions $\mathcal{C}$ for generation. $N$ is the number of queries and $D$ is the dimension of the queries, which is the same as the MLLM hidden dimension. For simplicity and compatibility, we continue to use causal masking for the entire sequence rather than specifically enabling full attention for $\mathcal{Q}$ . The conditions $\mathcal{C}$ are then fed into a trainable connector to align with the input space of text-to-image diffusion models. These models can be arbitrary as long as they have a conditional input interface; we simply replace its original condition with our $\mathcal{C}$ . The whole model is trained with the original generation objective on paired data. In this paper, we focus on image generation tasks, but the model can be easily extended to other modalities like audio, video, 3D, and more. + +# 3.2 Design Choices + +The proposed architecture involves two design choices: using learnable queries and keeping the MLLM backbone frozen. We explain the reasons why we adopted these choices and how they impact performance. For all experiments, unless otherwise specified, we use the same frozen LLaVA-OneVision-0.5B (Li et al., 2024a) MLLM backbone, frozen Sana-0.6B (Xie et al., 2025) diffusion model in 512 resolution, learnable queries with $N = 64$ tokens, and a connector with a 24-layer transformer encoder. All models are trained on 25M publicly available image caption pairs for 4 epochs. We report FID score (Heusel et al., 2017) on MJHQ-30K (Li et al., 2024b) for visual aesthetic quality, and GenEval (Ghosh et al., 2023) and DPG-Bench (Hu et al., 2024b) (both without prompt rewriting) for prompt alignment, respectively. + +Learnable queries. Many models like Lumina-Next (Zhuo et al., 2024), Sana (Xie et al., 2025), and Kosmos-G (Pan et al., 2024) use the (M)LLM's last layer embedding of input tokens as image generation conditions. However, this approach is not ideal for unified models as it is not compatible with many desired tasks in unified modeling, such as in-context learning or producing multimodal, interleaved output (we provide more discussion and comparison with MetaQuery in Section 5.6). As shown in Table 1, using learnable queries with just $N = 64$ tokens achieves image generation quality comparable to that of utilizing the last layer embedding of input tokens. While random queries produce acceptable FID scores, they struggle with prompt alignment, highlighting the importance of learnable queries. Additionally, since the last layer embedding setting naturally comes with a longer sequence length, we also tested learnable queries with $N = 512$ tokens, which further improves performance and even outperforms the last layer embedding approach. + +Frozen MLLM. Existing unified models train MLLMs to jointly model $p(\text{text}, \text{pixels})$ , resulting in a more complicated training process and even downgraded understanding performance. MetaQuery keeps the original + +![](images/26e5def00142de348936cfa334f6dcbef2aa8e4cd65e4ff90e11e7056c47e06b.jpg) +(a) Text-to-image results. + +![](images/6cbcca96c2745b0fc235a258d5debaaacb8baf3df31684a63e481d46e4678b1c.jpg) +(b) Image reconstruction results. + +![](images/991b1ac03b36f5361d4e58d6868c9741411c1cc7e9963640e355b72a09945466.jpg) +Figure 2 Study on the scaling of token numbers. As the number of tokens increases, text-to-image prompt alignment and image reconstruction results consistently improve. +Figure 3 Visaul samples for image reconstruction with different numbers of tokens. + +MLLM architecture and parameters interact to preserve SOTA understanding capabilities. However, for multimodal generation, a key concern is whether MetaQuery's performance with significantly fewer tunable parameters would be substantially worse than methods with full MLLM tuning. As shown in Table 2, frozen MLLMs achieve comparable performance to full MLLM tuning, with slightly lower prompt alignment but slightly improved visual quality. Tuning DiT can further improve performance for both settings. This suggests that MetaQuery is another possible training strategy, one that is simpler but also effective, as an alternative to fine-tuning the entire MLLM. + +# 3.3 Training Recipe + +Based on insights from our design choices, we further study key training options for the two main components of MetaQuery: learnable queries and connectors. This study examines the number of tokens and connector design. Unless otherwise specified, all experiments in this section use the same setup as described in Section 3.2. + +Number of tokens. Many works (Wu et al., 2023; Pan et al., 2024; Ge et al., 2024) have employed learnable queries for condition extraction. However, they either set the number of tokens to match the fixed input sequence length of the image decoder (e.g., $N = 77$ for the CLIP (Radford et al., 2021) text encoder in Stable Diffusion v1.5 (Rombach et al., 2021)), or use an arbitrary fixed number like $N = 64$ without further investigation. Given that modern diffusion models like Lumina-Next (Zhuo et al., 2024) and Sana (Xie + +
Architecture# of LayersDims# of ParamsRel. Wall TimeMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
Proj-Enc62304517M1.06x7.800.5373.37
Proj-Enc2423042046M1.23x7.410.5173.75
Enc-Proj689684M1x7.730.4971.39
Enc-Proj24896316M1.06x7.430.5675.35
+ +Table 3 Study on connector design. Aligning the conditions first in the same dimension as the MLLM hidden states (Enc-Proj) is more effective and parameter-efficient. + +et al., 2025) naturally accept variable-length conditions, determining the optimal number of tokens for learnable queries is crucial. In Figure 2, we provide a careful study of the number of tokens and observe promising scalability of MetaQueries. For text-to-image generation, visual quality begins to converge after 64 tokens, while more tokens consistently yield better prompt alignment. This is more evident for long captions, as GenEval with rewritten prompts increases more rapidly as the number of tokens increases. For image reconstruction, we observe that more tokens consistently improve the quality of reconstructed images (visual samples can be found in Figure 3). In our later experiments, we set the number of tokens to $N = 256$ for all models, as it achieves a good balance between performance and efficiency. + +Connector design. The connector is another important component in MetaQuery. We use the same architecture as the Qwen2.5 (Team, 2024b) LLM, but enable bi-directional attention for the connector. We study two different designs: Projection Before Encoder (Proj-Enc) and Projection After Encoder (Enc-Proj). Proj-Enc first projects the conditions into the input dimension of the diffusion decoder, then uses a transformer encoder to align the conditions. On the other hand, Enc-Proj first uses a transformer encoder to align the conditions in the same dimension as the MLLM hidden states, then projects the conditions into the input dimension of the diffusion decoder. As shown in Table 3, the Enc-Proj design achieves better performance than the Proj-Enc design while having fewer parameters. + +# 4 Model Training + +We train MetaQuery in two stages: the pre-training stage and the instruction tuning stage. Both training stages keep MLLMs frozen and fine-tune learnable queries, connectors, and diffusion models. We use three different MLLM backbones for different sizes: Base (LLaVA-OneVision 0.5B (Li et al., 2024a)), Large (Qwen2.5-VL 3B (Bai et al., 2025)), and X-Large (Qwen2.5-VL 7B (Bai et al., 2025)). We set the number of tokens to $N = 256$ for all models, and utilize a 24-layer connector with Enc-Proj architecture. For image generation heads, we tested two different diffusion models: Stable Diffusion v1.5 (Rombach et al., 2021) and Sana-1.6B (Xie et al., 2025). + +![](images/71a1d9a265fd63ade28febee74e11c217a12bde87dcfcf180be066256678952a.jpg) +Figure 4 Overview of instruction tuning data curation pipeline. We group images from web corpora based on caption similarity using the SigLIP (Zhai et al., 2023) model, then construct instruction-tuning data from these image pairs using an MLLM. + +Pre-training. We pre-train our model on 25M publicly available image-caption pairs for 8 epochs with a learning rate of 1e-4 and a global batch size of 4096. The learning rate follows a cosine decay schedule with a 4,000-step warmup period before gradually decreasing to 1e-5. + +
MethodsBase (M)LLMMME-PMMBSEEDMMMUMM-VetCOCO FID ↓MJHQ FID ↓GenEval ↑DPG-Bench ↑
EmuLLaMA 13B-----11.66---
DreamLLMVicuna 7B----36.68.46---
ChameleonFrom Scratch 7B---22.48.326.74-0.39-
Show-o-512Phi-1.5 1.3B1097.2--26.7-9.2415.180.68-
VILA-ULLaMA-2 7B1401.8-59.0-33.5-7.69--
Emu3From Scratch 7B-58.568.231.637.212.80-0.66†80.60
MetaMorphLLaMA-3 8B-75.271.8--11.8---
TokenFlow-XLQwen-2.5 14B1551.176.872.643.248.2--0.63†73.38
TransfusionFrom Scratch 7B-----8.70-0.63-
LMFusionLLaVA-Next 8B1603.772.172.541.7-8.20---
JanusDeepSeek-LLM 1.5B1338.069.463.730.534.38.5310.100.61-
JanusFlowDeepSeek-LLM 1.5B1333.174.970.529.330.9-9.510.6380.09
Janus-Pro-1BDeepSeek-LLM 1.5B1444.075.568.336.339.8-14.33‡0.7382.63
Janus-Pro-7BDeepSeek-LLM 7B1567.179.272.141.050.0-13.48‡0.8084.19
MetaQuery-BLLaVA-ov 0.5B1238.058.566.631.429.18.916.280.74†80.04
MetaQuery-LQwen2.5-VL 3B1574.378.673.853.163.28.876.350.78†81.10
MetaQuery-XLQwen2.5-VL 7B1685.283.576.958.666.68.696.020.80†82.05
+ +Table 4 Quantitative results on multimodal understanding and generation benchmarks. We report the COCO FID with Stable Diffusion v1.5 (Rombach et al., 2021), and other metrics with Sana (Xie et al., 2025). † denotes rewritten prompts. ‡ denotes results tested by us under the same settings. + +Instruction tuning. Furthermore, in this work, we rethink the data curation process for instruction tuning in image generation. All current methods rely on expert models to generate target images from source images and instructions (Ge et al., 2024; Xiao et al., 2025; Hu et al., 2024a). However, this approach is limited in scalability and may introduce biases, as the available expert models cover only a narrow range of image transformations. Inspired by MagicLens (Zhang et al., 2024), we construct instruction-tuning data using naturally occurring image pairs in web corpora. These corpora contain rich multimodal contexts with interleaved text and images on related subjects or topics. These image pairs often exhibit meaningful associations and specific relationships spanning a broad spectrum, from direct visual similarities to more subtle semantic connections (as shown in Figure 4). Such naturally occurring image pairs provide excellent and diverse supervision signals for instruction tuning. Based on this observation, we developed a data construction pipeline that mines image pairs and leverages MLLMs to generate open-ended instructions that capture their inter-image relationships. First, we collect grouped images from mmc4 (Zhu et al., 2023) core fewer-faces subset, where each image is accompanied by a caption. Using SigLIP (Zhai et al., 2023), we cluster images with similar captions (allowing up to 6 images per group, with a similarity threshold of 0.5). In each group, the image with minimum average similarity to the others is designated as the target, while the remaining images serve as source images. This process yields a total of 2.4M image pairs. Finally, we employ Qwen2.5-VL 3B (Bai et al., 2025) to generate instructions for each pair, describing how to transform the source images into the target image (See Appendix A for the detailed MLLM prompt). We experimented with instruction-tuning our Base size model on the proposed 2.4M dataset for 3 epochs, using the same learning rate schedule as in pre-training and a batch size of 2048. + +# 5 Experiments + +In this section, we first evaluate MetaQuery on various multimodal understanding and text-to-image generation benchmarks (Section 5.1). We demonstrate that MetaQuery can be trained to reconstruct input images (Section 5.2). This image reconstruction capability can be easily transferred to perform image editing (Section 5.3). Furthermore, we show that MetaQuery can be instruction-tuned to perform zero-shot subject-driven generation (Section 5.4). By leveraging our approach for collecting instruction tuning data from naturally existing image pairs, we also reveal that MetaQuery can unlock novel capabilities like visual association and logo design (also in Section 5.4). Additionally, we demonstrate that MetaQuery can benefit from the internal knowledge and reasoning capabilities of the frozen MLLM, overcoming common failures exhibited by other generation models (Section 5.5). Finally, we discuss the impact of different MLLM backbones and compare MetaQuery's behavior with the baseline that uses MLLM last layer embeddings (Section 5.6). + +![](images/b9c3a23589c96c41551027759ee3efb1784846504d3b7874cd009fd5d0fedae2.jpg) +A hot air balloon in the shape of a heart. Grand Canyon + +![](images/d97f1701555c6678bd51e7b23d877a0a7ed770994c0ffded7c265bac83b8a108.jpg) +A British shorthair wearing sunglasses + +![](images/fe14b58523fd1199df8a35521fb8b1b3670ba32ccfbb5bcd196c0ff9f20299ca.jpg) + +![](images/4cd49de60a91872c7f33c2ed1240629712279838d61f4ad56a145108151af2af.jpg) +A butterfly lands directly on the nose of a German Shepherd. + +![](images/dde05b8ac87ef551f6c6722f0f3a69c9df1b0be719bc5033e26f38b356ace440.jpg) +A close-up of honey being drizzled onto pancakes, the thick liquid flowing slowly and smoothly. + +![](images/fcbdb08c63f65558cdf160396bedb8523d98a6851b524df3201eec1f27a037b8.jpg) +A sunken ship at the bottom of the ocean. +The word 'START' written on a street surface. + +![](images/b12ee3389eb59af291e4591659081ba05f6a90a9227a6beb350600174faf3f18.jpg) +A paper origami dragon riding a boat in waves. +An old rusted robot wearing pants and a jacket riding skis in a supermarket. + +![](images/36fc86f721567164cf07c1649bdde9d18e478995d36fbc4ff03ad2d2af643563.jpg) + +![](images/e8e1b7bf4cec9f8528f7381916efc241f178f796c22ba90ba0983c2557c0ac6c.jpg) +A close-up of a painter's brush touching the canvas, with paint spreading and blending in a swirl of colors. +Figure 5 Qualitative results of MetaQuery. Prompts are from PartiPrompt (Yu et al., 2022), Sana (Xie et al., 2025) and Movie Gen Bench (Polyak et al., 2024). + +![](images/31a647ae7331f834c474e220328e1e72996313388bcd70d7447ba4ad111f554a.jpg) +A giant humanoid, made of fluffy blue cotton candy, stomping on the ground, and roaring to the sky, clear blue sky behind them. + +![](images/af25337f48bf2a1fcba79b602b7512fb3badd1f589b46c51e3d25f9b094dac98.jpg) +Close-up of a bright blue parrot's feathers glittering in the light, showing its unique plumage and vibrant colors. +The reflection of a snowy mountain peak in a crystal-clear alpine lake, creating a perfect mirror image with a slight shimmering effect. + +![](images/7b499c68b1be62904daf47a04a892eb29c8a4c9f7d17cab06469846695a5b06a.jpg) + +# 5.1 Image Understanding and Generation + +As shown in Table 4, our model family demonstrates strong capabilities across both understanding and generation tasks. Benefiting from the flexible training approach that allows us to leverage arbitrary SOTA frozen MLLMs, all of our models in different sizes exhibit competitive performance on all understanding benchmarks (Fu et al., 2023; Liu et al., 2023; Li et al., 2023a; Yue et al., 2024; Yu et al., 2023). In terms of image generation, MetaQuery achieves SOTA visual quality on MJHQ-30K (Li et al., 2024b). Given the fact that MetaQuery works with frozen MLLMs, we can naturally connect with an arbitrary number of diffusion models. Since the base Sana-1.6B (Xie et al., 2025) model is already fine-tuned on aesthetic data, we adopt Stable Diffusion v1.5 (Rombach et al., 2021) for COCO FID evaluation. Our results suggest that after adapting it to powerful MLLMs, we can achieve improved visual quality as indicated by the COCO FID score of 8.69. This also establishes a new SOTA COCO FID score among all Stable Diffusion v1.5-based unified models including MetaMorph (Tong et al., 2024) (11.8) and Emu (Sun et al., 2024b) (11.66). + +![](images/70b0361654016b250254d8a9163f2affb03ec516f759c8748b50254aae39b093.jpg) +Real Image + +![](images/a626c2ad7ce49892adf9b4655ab02d93bb5762fc0945595b273155aa85a7da62.jpg) +SEED +(Ge et al., 2023) + +![](images/f5b932ba4b3af60c9595d23c21d7eb8e63a7d357dde1449e8e9a53b30d301584.jpg) +Emu +(Sun et al., 2024b) + +![](images/edcb210930a495f6468a261995e8cd43331cc08b7917631b87941c67eb85b779.jpg) +Emu2 +(Sun et al., 2024a) + +![](images/1cc8421e582befa6179530b3cee34505907ff5676ee7a14c60dbe6b90cbebfea.jpg) +GPT-4o +(OpenAI, 2025) + +![](images/f7cb61c92aff1e1a951a059200c3802b85f7a6bad339597ac1ffbfbdec16a6fd.jpg) +MetaQuery-B + +![](images/34f8d42ab9a5c73eb8ad1887541fb0ba22d092eca4923276bffde665da809607.jpg) +Figure 6 Image reconstruction results. Results of SEED, Emu, and Emu2 are from Sun et al. (2024a). +Add a chef hat to the dog +There is a house in front of the mountain +Figure 7 Image editing results. This capability can be easily transferred from image reconstruction after lightweight fine-tuning. + +![](images/0c9fb1b5858a9ad2270627f7e7eb4d935d61f2172945f72b652a3578d4f08323.jpg) +Remove the 3-WAY sign + +![](images/9c6c074753e5c0db73a2544daf4a539eb7a42932749c7546b1c4f05818f8732f.jpg) +Replace the dog with a golden retriever + +![](images/77bc365456c7f1a324580189122792f8db5d207f06c8180d8e1bf41346dbbc5d.jpg) +Change to cartoon style +Change it into linear style + +![](images/55aca72edd9bc6fd2358694218fae4f8e6d7e4c0987fbd61f62661ac387ebd35.jpg) +Chenage the bird to a blue one + +![](images/4b3982cc5c2c19d0b2fd26b9f06c0bbf5ad2f7331e1a2829e8a6c44d7973e951.jpg) +Replace the fries with salad + +In terms of prompt alignment, MetaQuery also achieves competitive performance on GenEval (Ghosh et al., 2023) and DPG-Bench (Hu et al., 2024b), beating all diffusion model-based approaches including Transfusion (Zhou et al., 2025) and JanusFlow (Ma et al., 2025). We note that there is a performance gap between MetaQuery and Janus-Pro (Chen et al., 2025), which auto-regressively generates image tokens. We suggest that this gap may be due to the different failure modes of diffusion models and auto-regressive models: diffusion models usually fail to correctly follow the prompt, while auto-regressive models may suffer from more visual artifacts, which are difficult to quantify by GenEval and DPG-Bench. We tested the MJHQ-30K FID score of Janus-Pro under the same setting as ours and found that, in terms of visual quality and artifact control, MetaQuery is significantly better than Janus-Pro (see Appendix B for visual comparison). Additionally, we find that MetaQuery achieves much better world knowledge reasoning capability than Janus-Pro, which we will elaborate on in Section 5.5. We also found that when scaling up the size of frozen LLMs, the generation quality and prompt alignment also improves. MetaQuery provides a simple and principled way for leveraging the most advanced multimodal LLMs within a unified modeling framework. We also provide qualitative results in Figure 5 to illustrate the text-to-image generation capability of MetaQuery. + +![](images/8e80f20eaaa577eb8299d8509c1b636a00a6be83769d922edded5c3bdd3e26fc.jpg) + +![](images/47401bdfe62a96981c1feaecf18c9d323881824be0d85aa784265e9a19d202c9.jpg) +Top view of the same berry bowl + +![](images/4149a2a9d8a0180d46b942d93daa35e3ebf8ddc950c9a4f1d92f2c80d369bf94.jpg) + +![](images/61d5ab558a3fffaa336d26f1f2171b61a5a2ce74c40d4cd1af84fced70c45a4e.jpg) +The same robot in Minecraft + +![](images/a64fb9d0751c7bee00cfe32d761c4cf3cf5e293940406f5287724778e33f7dd8.jpg) + +![](images/0030c217861cbcb07db1e9bf4497c51ebe5ff2aa3fc53234babc79d9c4ca53eb.jpg) +The toy on the head of the cat + +![](images/c209fc3515bcd2eea2d99ebc2865c335d0d995899ce2bd7acc199d906ccb6ecc.jpg) + +![](images/ab4ad8e76c716680a874cb79f434e48a84893b5174e1de3b19378e6c89009ff3.jpg) + +![](images/7997eceab53dfc59027dfeeb9d00f0d54c438ad627f519543fa4528dc0b321de.jpg) +The dog wearing sunglasses + +![](images/4419bc1f599e2b5e75cfd27fe839c397d664930cb4e4ea9f79c6d864c52b521d.jpg) + +![](images/b3b8ac37fa24e66c37666f35b4a47270abc1ff2917320e9161e2464df3cd9ee6.jpg) + +![](images/924d212b61c2dbd9a97e97845fd03efdda618a18e51f97ff75229de657600cb8.jpg) +The same model but a real one in New York city + +![](images/aa994d8f438857841af137ce9cf69d2079d81944e8baa713cb1e310ebbc5a032.jpg) + +![](images/5d46cf09b50e4f98422bb851e2dd13b1734ea4bebc37a20eb08828e94d103b92.jpg) +The sky line view of the city from this building + +![](images/7d4c1fbb21fee1f8d29061754c600619607957874633c9de8a489aa2c276f85c.jpg) + +![](images/6ba2cccb890caaf3726ac0b89e0a6017e25a056bd986ad530823545b6cc653b2.jpg) +The statue in the same city +Figure 8 Qualitative results for instruction tuning. Instruction-tuned MetaQuery achieves strong subject-driven capability (first row) and can even reason through the multimodal input to generate images (second row). + +![](images/a84d0b3eab726eb969d63ec7f14b4cf156b8489ede5810f95269439a025f5c84.jpg) + +![](images/b77f0c069a70b297d343734d4179c4df607c59c0e70650ea18864234e7002424.jpg) +A logo for the same teapot + +
MethodsDINO Score↑CLIP-I Score↑CLIP-T Score↑
Real Images (Oracle)0.7740.885-
fine-tuning
Textual Inversion (Gal et al., 2023)0.5690.7800.255
DreamBooth (Ruiz et al., 2023)0.6680.8030.305
BLIP-Diffusion (Li et al., 2023b)0.6700.8050.302
zero-shot & test time tuning free
Re-Imagen (Chen et al., 2023)0.6000.7400.270
BLIP-Diffusion (Li et al., 2023b)0.5940.7790.300
Kosmos-G (Pan et al., 2024)0.6940.8470.287
MetaQuery-B-Instruct0.7370.8520.301
+ +Table 5 Subject-driven generation results on DreamBench (Ruiz et al., 2023). + +# 5.2 Image Reconstruction + +We demonstrate that MetaQuery can be easily fine-tuned for image reconstruction tasks with a frozen MLLM (See Appendix C for more details). As shown in Figure 6, we compare our fine-tuned MetaQuery-B with existing diffusion autoencoders from various unified models, which reconstruct images from predicted visual features. Since these unified models are not explicitly fine-tuned for image reconstruction, their results are directly decoded from the vision encoder's output. Remarkably, even under this more constrained setup, our + +![](images/4073247d621f7d479da91cfa9b7a98561d024ab67b1715f504a15a138a4b5b54.jpg) +Figure 9 MetaQuery leverages frozen MLLMs for reasoning- and knowledge-augmented generation, overcoming the failure cases encountered in the base Sana model. * denotes that the LLM last layer embeddings of input tokens are used for image generation; the model is in L size (Qwen2.5-VL 3B). This approach can be better than the base Sana model in some cases but fails to activate in-context learning to perform knowledge-augmented generation. Some of the test cases are from MetaMorph (Tong et al., 2024) and CommonsenseT2I (Fu et al., 2024). + +![](images/cb12222fe568d3d46a55c40b1795a095b91866ad756d26fc3e7316d7bcc184af.jpg) + +fine-tuned MetaQuery-B can still achieve competitive performance, matching the best existing open-source model Emu2 (Sun et al., 2024a). When compared with GPT-4o (OpenAI, 2025), our model also achieves comparable quality. + +# 5.3 Image Editing + +As shown in Figure 7, we demonstrate that MetaQuery can transfer its image reconstruction capability to perform image editing. We keep the MLLM backbone frozen and fine-tune our pre-trained Base model for only 1,000 steps on publicly available image editing data. Qualitative results demonstrate that MetaQuery performs effectively in these image-editing scenarios. + +# 5.4 Instruction Tuning + +We show that after being instruction-tuned on the proposed 2.4M dataset in Section 4, MetaQuery can achieve impressive zero-shot subject-driven generation performance, producing coherent results even with multiple highly customized subjects (the first row of Figure 8). Using various supervision signals, the instruction-tuned MetaQuery-B model surprisingly unlocks novel capabilities like visual association and logo design that go beyond copy-pasting (the second row of Figure 8). For example, in the first case, the model identifies the specific model of the input Porsche 911 car image, then correctly generates a novel front view for that model. In the second case, the model recognizes the input image of Rockefeller Center and imagines the view of New York City from the top of the Rockefeller Center. + +We also follow DreamBooth (Ruiz et al., 2023) by adopting DINO, CLIP-I, and CLIP-T scores to quantitatively evaluate our model on the DreamBench (Ruiz et al., 2023) dataset. As shown in Table 5, our MetaQuery-B-Instruct model achieves SOTA performance, outperforming existing models like Kosmos-G (Pan et al., 2024) that are explicitly trained on constructed substitution tasks for subject-driven generation. + +
MethodsCulturalTimeSpaceBiologyPhysicsChemistryOverall
GPT-4o** (OpenAI, 2025)0.940.640.980.930.980.950.89
Text-to-Image Models
SD-v1-5 (Rombach et al., 2021)0.340.350.320.280.290.210.32
SD-XL (Podell et al., 2023)0.430.480.470.440.450.270.43
PixArt-Alpha (Chen et al., 2024)0.450.500.480.490.560.340.47
playground-v2.5 (Li et al., 2024b)0.490.580.550.430.480.330.49
SD-3.5-large (Esser et al., 2024)0.440.500.580.440.520.310.46
FLUX.1-dev (Labs, 2024)0.480.580.620.420.510.350.50
Unified Models
show-o-512 (Xie et al., 2024)0.280.400.480.300.460.300.35
vila-u-7b-256 (Wu et al., 2025b)0.260.330.370.350.390.230.31
Emu3 (Wang et al., 2024)0.340.450.480.410.450.270.39
Janus-1.3B (Wu et al., 2025a)0.160.260.350.280.300.140.23
JanusFlow-1.3B (Ma et al., 2025)0.130.260.280.200.190.110.18
Janus-Pro-1B (Chen et al., 2025)0.200.280.450.240.320.160.26
Janus-Pro-7B (Chen et al., 2025)0.300.370.490.360.420.260.35
MetaQuery-B0.440.490.580.410.490.340.46
MetaQuery-L0.560.570.620.480.630.420.55
MetaQuery-XL0.560.550.620.490.630.410.55
+ +Table 6 Comparison of world knowledge reasoning on WISE (Niu et al., 2025). The test cases in WISE are similar to the knowledge-augmented generation ones in Figure 9. MetaQuery achieves SOTA performance and significantly outperforms all other unified models. ** Results are evaluated by Yan et al. (2025) on a random subset of 200 out of 1000 samples. + +
Methodsw/o Neg. Promptw/ Neg. Prompt
DALL-E 3 (Ramesh et al., 2021) w/ rewrite40.17N/A
SD-XL (Podell et al., 2023)26.0044.83
SD-3-medium (Esser et al., 2024)26.1747.17
FLUX.1-dev (Labs, 2024)24.5022.50
Sana-1.6B (Xie et al., 2025)25.1743.33
MetaQuery-B27.3351.50
MetaQuery-L28.8357.67
+ +Table 7 Comparison of visual commonsense reasoning capability on CommonsenseT2I (Fu et al., 2024). + +# 5.5 Reasoning- and Knowledge-Augmented Generation + +We show that the learnable queries can effectively leverage capabilities of the frozen LLM. This enables the model to better understand and follow complex prompts, including those requiring real-world knowledge and reasoning. As shown in Figure 9, for the left knowledge-augmented generation cases, MetaQuery-L can leverage world knowledge from the frozen MLLM and reason through the input question to generate the correct answer. For the right commonsense knowledge cases from CommonsenseT2I (Fu et al., 2024), the LLM provides better commonsense knowledge and enables MetaQuery to generate images that are consistent with the facts. + +To quantitatively evaluate MetaQuery's world knowledge reasoning capability, we employ the WISE (Niu et al., 2025) benchmark, which contains similar test cases to the knowledge-augmented generation examples shown in Figure 9. As demonstrated in Table 6, MetaQuery achieves SOTA performance, significantly outperforming all other unified models. Notably, before our work, existing unified models struggled to effectively leverage powerful MLLMs for reasoning and knowledge-augmented generation, resulting in inferior performance compared to text-to-image models. MetaQuery stands as the first unified model to successfully transfer the advanced capabilities of frozen MLLMs to image generation and exceed the performance of SOTA text-to-image models. + +
LLM BackbonesMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑CommonsenseT2I ↑
Qwen2.5-3B6.200.7981.3456.00
Qwen2.5-3B-Instruct6.360.7981.1254.33
Qwen2.5-VL-3B-Instruct6.350.7881.1057.67
+ +Table 8 Comparison across different LLM backbones. Image generation capability is mostly orthogonal to multimodal understanding capability. + +
MethodsMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑WiScore ↑CommonsenseT2I ↑
Ours-L w/ Last Layer Embed*6.410.7881.230.4852.83
Ours-L w/ MetaQueries6.350.7881.100.5557.67
+ +Table 9 Comparison between MetaQuery and LLM last layer embedding. * denotes that the LLM last layer embeddings of input tokens are used for image generation. We observe comparable performance between MetaQuery and LLM last layer embedding on visual quality and prompt alignment. However, MetaQuery can activate in-context learning to perform knowledge-augmented generation, yielding much better performance on commonsense reasoning and world knowledge reasoning. + +We also quantitatively evaluate MetaQuery's commonsense reasoning capability on the CommonsenseT2I benchmark (Fu et al., 2024) in Table 7. For simplicity, we use CLIP (Radford et al., 2021) as the evaluator following their original implementation. Results show that MetaQuery significantly improves the performance of the base Sana model, achieving SOTA performance. + +# 5.6 Discussion + +Comparison over different LLM backbones. As shown in Table 8, to test the impact of employing different LLM backbones for MetaQuery, we carefully select a family of backbone models: pre-trained LLM (Qwen2.5-3B), instruction-tuned LLM (Qwen2.5-3B-Instruct), and instruction-tuned MLLM (Qwen2.5-VL-3B-Instruct). Both instruction-tuned models are initialized with the first pre-trained model checkpoint. Experimental results show that instruction tuning can achieve better (multimodal) understanding capabilities. However, the improvements are orthogonal to image generation performance when employed to provide multimodal generation conditions. + +Comparison with using last layer embeddings. As shown in Table 1, our learnable queries approach achieves comparable image generation quality and prompt alignment to using the LLM's last layer embeddings of input tokens. However, the last layer embedding method essentially treats the decoder-only LLM as a text encoder, which inherently limits its in-context learning capabilities. While this approach does improve upon the base Sana model in some cases as demonstrated in Figure 9, it struggles with the knowledge-augmented generation cases shown in the same figure. These cases require the LLM to first process and answer input questions before generating corresponding images, demanding in-context learning beyond what text encoders typically provide. This performance gap is quantitatively confirmed in Table 9, where MetaQuery significantly outperforms the last layer embedding approach on both WiScore and CommonsenseT2I benchmarks. Integrated natively with the LLM, MetaQuery naturally leverages its in-context learning capabilities, enabling the model to reason through questions and generate appropriate images. + +# 6 Conclusion + +We presented MetaQueries, a simple interface connecting MLLMs (for understanding) and diffusion decoders (for generation), effective even when the MLLM is frozen. This approach yields state-of-the-art understanding and generation performance with straightforward implementation. By enabling transfer between modalities, MetaQueries successfully channels MLLM knowledge and reasoning into multimodal generation. While effective, we hypothesize that bridging the remaining gap to leading proprietary systems may primarily involve further data scaling. We hope MetaQueries provides a powerful, accessible baseline for future unified multimodal model development. + +# References + +Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. In NeurIPS, 2022. +Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. +Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In CVPR, 2023. +Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. In NeurIPS, 2020. +Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. In ICLR, 2024. +Wenhu Chen, Hexiang Hu, Chitwan Sahara, and William W Cohen. Re-imagen: Retrieval-augmented text-to-image generator. In ICLR, 2023. +Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. +Jacob Devlin. Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL, 2019. +Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamllm: Synergistic multimodal comprehension and creation. In ICLR, 2024. +Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In ICML, 2024. +Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. +Xingyu Fu, Muyu He, Yujie Lu, William Yang Wang, and Dan Roth. Commonsense-t2i challenge: Can text-to-image generation models understand commonsense? In $COLM$ , 2024. +Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. In ICLR, 2023. +Yuying Ge, Yixiao Ge, Ziyun Zeng, Xintao Wang, and Ying Shan. Planting a seed of vision in large language model. arXiv preprint arXiv:2307.08041, 2023. +Yuying Ge, Sijie Zhao, Jinguo Zhu, Yixiao Ge, Kun Yi, Lin Song, Chen Li, Xiaohan Ding, and Ying Shan. Seed-x: Multimodal models with unified multi-granularity comprehension and generation. arXiv preprint arXiv:2404.14396, 2024. +Dhruba Ghosh, Hannaneh Hajishirzi, and Ludwig Schmidt. Geneval: An object-focused framework for evaluating text-to-image alignment. In NeurIPS, 2023. +Google. Experiment with gemini 2.0 flash native image generation, 2025. https://developers.googleblog.com/en/experiment-with-gemini-20-flash-native-image-generation/. +Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In NeurIPS, 2017. +Hexiang Hu, Kelvin CK Chan, Yu-Chuan Su, Wenhu Chen, Yandong Li, Kihyuk Sohn, Yang Zhao, Xue Ben, Boqing Gong, William Cohen, et al. Instruct-imagen: Image generation with multi-modal instruction. In CVPR, 2024a. +Xiwei Hu, Rui Wang, Yixiao Fang, Bin Fu, Pei Cheng, and Gang Yu. Ella: Equip diffusion models with llm for enhanced semantic alignment. arXiv preprint arXiv:2403.05135, 2024b. + +Yang Jin, Kun Xu, Liwei Chen, Chao Liao, Jianchao Tan, Bin Chen, Chenyi Lei, An Liu, Chengru Song, Xiaoqiang Lei, et al. Unified language-vision pretraining with dynamic discrete visual tokenization. In ICLR, 2024. +Jing Yu Koh, Daniel Fried, and Ruslan Salakhutdinov. Generating images with multimodal language models. In NeurIPS, 2023. +Black Forest Labs. Flux.1, 2024. +Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024a. +Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023a. +Daiqing Li, Aleks Kamko, Ehsan Akhgari, Ali Sabet, Linmiao Xu, and Suhail Doshi. Playground v2. 5: Three insights towards enhancing aesthetic quality in text-to-image generation. arXiv preprint arXiv:2402.17245, 2024b. +Dongxu Li, Junnan Li, and Steven CH Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing. In NeurIPS, 2023b. +Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024a. +Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2024b. +Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023. +Bingqi Ma, Zhuofan Zong, Guanglu Song, Hongsheng Li, and Yu Liu. Exploring the role of large language models in prompt encoding for diffusion models. In NeurIPS, 2024. +Yiyang Ma, Xingchao Liu, Xiaokang Chen, Wen Liu, Chengyue Wu, Zhiyu Wu, Zizheng Pan, Zhenda Xie, Haowei Zhang, Liang Zhao, et al. Janusflow: Harmonizing autoregression and rectified flow for unified multimodal understanding and generation. In CVPR, 2025. +Yuwei Niu, Munan Ning, Mengren Zheng, Bin Lin, Peng Jin, Jiaqi Liao, Kunpeng Ning, Bin Zhu, and Li Yuan. Wise: A world knowledge-informed semantic evaluation for text-to-image generation. arXiv preprint arXiv:2503.07265, 2025. +OpenAI. Introducing 4o image generation, 2025. https://openai.com/index/introducing-4o-image-generation/. +Xichen Pan, Li Dong, Shaohan Huang, Zhiliang Peng, Wenhu Chen, and Furu Wei. Kosmos-g: Generating images in context with multimodal large language models. In ICLR, 2024. +Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. +Adam Polyak, Amit Zohar, Andrew Brown, Andros Tjandra, Animesh Sinha, Ann Lee, Apoorv Vyas, Bowen Shi, Chih-Yao Ma, Ching-Yao Chuang, et al. Movie gen: A cast of media foundation models. arXiv preprint arXiv:2410.13720, 2024. +Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. +Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In ICML, 2021. +Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, 2021. +Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023. +Weijia Shi, Xiaochuang Han, Chunting Zhou, Weixin Liang, Xi Victoria Lin, Luke Zettlemoyer, and Lili Yu. Llamafusion: Adapting pretrained language models for multimodal generation. arXiv preprint arXiv:2412.15188, 2024. + +Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In $CVPR$ , 2024a. +Quan Sun, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, Yueze Wang, Hongcheng Gao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative pretraining in multimodality. In ICLR, 2024b. +Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024a. +Qwen Team. Qwen2.5: A party of foundation models, 2024b. +Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024. +Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. +Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. +Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, et al. Janus: Decoupling visual encoding for unified multimodal understanding and generation. In CVPR, 2025a. +Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng Chua. Next-gpt: Any-to-any multimodal llm. arXiv preprint arXiv:2309.05519, 2023. +Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. In ICLR, 2025b. +Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and Zheng Liu. Omnigen: Unified image generation. In CVPR, 2025. +Enze Xie, Junsong Chen, Junyu Chen, Han Cai, Haotian Tang, Yujun Lin, Zhekai Zhang, Muyang Li, Ligeng Zhu, Yao Lu, et al. Sana: Efficient high-resolution image synthesis with linear diffusion transformers. In ICLR, 2025. +Jinheng Xie, Weijia Mao, Zechen Bai, David Junhao Zhang, Weihao Wang, Kevin Qinghong Lin, Yuchao Gu, Zhijie Chen, Zhenheng Yang, and Mike Zheng Shou. Show-o: One single transformer to unify multimodal understanding and generation. arXiv preprint arXiv:2408.12528, 2024. +Zhiyuan Yan, Junyan Ye, Weijia Li, Zilong Huang, Shenghai Yuan, Xiangyang He, Kaiqing Lin, Jun He, Conghui He, and Li Yuan. Gpt-imgeval: A comprehensive benchmark for diagnosing gpt4o in image generation. arXiv preprint arXiv:2504.02782, 2025. +Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. In TMLR, 2022. +Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. +Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023. +Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In CVPR, 2024. +Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023. +Kai Zhang, Yi Luan, Hexiang Hu, Kenton Lee, Siyuan Qiao, Wenhu Chen, Yu Su, and Ming-Wei Chang. Magiclens: Self-supervised image retrieval with open-ended instructions. In ICML, 2024. +Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. In ICLR, 2025. + +Wanrong Zhu, Jack Hessel, Anas Awadalla, Samir Yitzhak Gadre, Jesse Dodge, Alex Fang, Youngjae Yu, Ludwig Schmidt, William Yang Wang, and Yejin Choi. Multimodal C4: An open, billion-scale corpus of images interleaved with text. In NeurIPS, 2023. +Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Lirui Zhao, Fu-Yun Wang, Zhanyu Ma, et al. Lumina next: Making lumina-t2x stronger and faster with next-dit. arXiv preprint arXiv:2406.18583, 2024. + +# Appendix + +# A Data Curation Details + +For the data curation part, we use Qwen/Qwen2-VL-7B-Instruct² as our MLLM, The system prompt we are using is: + +Based on the provided of one or multiple source images, one target image, and their captions, create an interesting text prompt that can be used with the source images to generate the target image. + +This prompt should include: + +- one general and unspecific similarity shared with the source images (same jersey top, similar axe, similar building, etc). +- all differences that only the target image has. + +This prompt should NOT include: + +- any specific details that would allow generating the target image independently without referencing the source images. + +Remember the prompt should be concise and short. The generation has to be done by combining the source images and text prompts. + +# B Qualitative Comparison with SOTA Open-Source Model on Text-to-Image Generation + +We provide a qualitative comparison with Janus-Pro-7B (Chen et al., 2025) on MJHQ-30K (Li et al., 2024b) in Figure 10. We can see that MetaQuery-XL follows the prompt better and generates more visually appealing images than Janus-Pro-7B. + +# C Training Objectives + +
ObjectiveRel. Wall TimeMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
Text-to-Image1.0x7.430.5675.35
Image Reconstruction2.79x27.420.3268.36
Mix2.61x8.270.5476.53
+ +Table 10 Study on training objectives. Image reconstruction objective can be mixed with text-to-image objective to enable image reconstruction capabilities without harming visual quality and prompt alignment. + +We are using an MLLM for multimodal perception, besides the standard text-to-image objective, we can also use an image reconstruction objective to achieve alignment. In Table 10, we show that training with the text-to-image objective achieves much better performance than the image reconstruction objective. We demonstrate that a mix of both objectives can enable image reconstruction capabilities without being generally harmful to the T2I performance. + +![](images/14bedde3fdc5db9a2502133501221e3d482ecd91d5259320c8bb8efeadfee1fd.jpg) +Figure 10 Qualitative comparison with Janus-Pro-7B (Chen et al., 2025) on MJHQ-30K (Li et al., 2024b). \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06256/images/0030c217861cbcb07db1e9bf4497c51ebe5ff2aa3fc53234babc79d9c4ca53eb.jpg b/data/2025/2504_06xxx/2504.06256/images/0030c217861cbcb07db1e9bf4497c51ebe5ff2aa3fc53234babc79d9c4ca53eb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f97c2f317259392bfe737fdb4c7c1ba53467332a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/0030c217861cbcb07db1e9bf4497c51ebe5ff2aa3fc53234babc79d9c4ca53eb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e026b44cd9785cda1ee68f5da773e1c71af817066c91e12d64b1c09274e00d3 +size 5137 diff --git a/data/2025/2504_06xxx/2504.06256/images/0c9fb1b5858a9ad2270627f7e7eb4d935d61f2172945f72b652a3578d4f08323.jpg b/data/2025/2504_06xxx/2504.06256/images/0c9fb1b5858a9ad2270627f7e7eb4d935d61f2172945f72b652a3578d4f08323.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f782eacb184d636045ea80d37b8a22bd6f002d4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/0c9fb1b5858a9ad2270627f7e7eb4d935d61f2172945f72b652a3578d4f08323.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2beb08c97af075d0dc4dd6f610e85264cf0cc041d7e829fd33b92a70f9ee8244 +size 13917 diff --git a/data/2025/2504_06xxx/2504.06256/images/12319674663acc97cbdaaa2425ad0014a35fc74e9a349cf01d441b15deb73c8a.jpg b/data/2025/2504_06xxx/2504.06256/images/12319674663acc97cbdaaa2425ad0014a35fc74e9a349cf01d441b15deb73c8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd8c553790c7ddcfb491926de002ce9926a1036f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/12319674663acc97cbdaaa2425ad0014a35fc74e9a349cf01d441b15deb73c8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:289d766a184504086725990ed008f2da97cf1486211f166837267ee32caa7b76 +size 38439 diff --git a/data/2025/2504_06xxx/2504.06256/images/14bedde3fdc5db9a2502133501221e3d482ecd91d5259320c8bb8efeadfee1fd.jpg b/data/2025/2504_06xxx/2504.06256/images/14bedde3fdc5db9a2502133501221e3d482ecd91d5259320c8bb8efeadfee1fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41d7d768fd5232069a23a48717d3bd9569868024 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/14bedde3fdc5db9a2502133501221e3d482ecd91d5259320c8bb8efeadfee1fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec39c91d9b0763ff958a1d4d5828ce50584fcdbb053e74c2384c044ac3cdd6cc +size 316143 diff --git a/data/2025/2504_06xxx/2504.06256/images/1cc8421e582befa6179530b3cee34505907ff5676ee7a14c60dbe6b90cbebfea.jpg b/data/2025/2504_06xxx/2504.06256/images/1cc8421e582befa6179530b3cee34505907ff5676ee7a14c60dbe6b90cbebfea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7abefec57f32b6a637440787ba964d3f09c2d8fd --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/1cc8421e582befa6179530b3cee34505907ff5676ee7a14c60dbe6b90cbebfea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d26c0ec873f15a9ef87a1fe296eacfc80ee0876634da878fab03ca25485bbf7f +size 22292 diff --git a/data/2025/2504_06xxx/2504.06256/images/26e5def00142de348936cfa334f6dcbef2aa8e4cd65e4ff90e11e7056c47e06b.jpg b/data/2025/2504_06xxx/2504.06256/images/26e5def00142de348936cfa334f6dcbef2aa8e4cd65e4ff90e11e7056c47e06b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a109ce641f378e540b8d7e7eff6b9a8581194752 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/26e5def00142de348936cfa334f6dcbef2aa8e4cd65e4ff90e11e7056c47e06b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e550411f4d9160a18e4f26d2affa1e1e3561a88f9f26e48547728b03840309e5 +size 37422 diff --git a/data/2025/2504_06xxx/2504.06256/images/31a647ae7331f834c474e220328e1e72996313388bcd70d7447ba4ad111f554a.jpg b/data/2025/2504_06xxx/2504.06256/images/31a647ae7331f834c474e220328e1e72996313388bcd70d7447ba4ad111f554a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..805863e2d6b5aa7791435404f80debf2e76b092a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/31a647ae7331f834c474e220328e1e72996313388bcd70d7447ba4ad111f554a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1157f91cb1162a7ab734ad95d63dcc3eb5076693a083b257d9a2b69b09a178cd +size 15336 diff --git a/data/2025/2504_06xxx/2504.06256/images/34f8d42ab9a5c73eb8ad1887541fb0ba22d092eca4923276bffde665da809607.jpg b/data/2025/2504_06xxx/2504.06256/images/34f8d42ab9a5c73eb8ad1887541fb0ba22d092eca4923276bffde665da809607.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20f89b7a827ede7c80d3bea4d3a3f73c7bb7eadb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/34f8d42ab9a5c73eb8ad1887541fb0ba22d092eca4923276bffde665da809607.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65975476b71dd6835f6b9afae10d74eb2cfc181037b33ba9b79f2a09c4aef891 +size 21660 diff --git a/data/2025/2504_06xxx/2504.06256/images/36fc86f721567164cf07c1649bdde9d18e478995d36fbc4ff03ad2d2af643563.jpg b/data/2025/2504_06xxx/2504.06256/images/36fc86f721567164cf07c1649bdde9d18e478995d36fbc4ff03ad2d2af643563.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d28693d3e9d1f7b23e66df305913a0ab2a7d901 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/36fc86f721567164cf07c1649bdde9d18e478995d36fbc4ff03ad2d2af643563.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:824ac336854d694fdea78d9fdafbd919bbd140c59f267376a7758050e47678ae +size 20998 diff --git a/data/2025/2504_06xxx/2504.06256/images/4073247d621f7d479da91cfa9b7a98561d024ab67b1715f504a15a138a4b5b54.jpg b/data/2025/2504_06xxx/2504.06256/images/4073247d621f7d479da91cfa9b7a98561d024ab67b1715f504a15a138a4b5b54.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b60ac922e16324ab52bbcba2cf59e1fbe53b727d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/4073247d621f7d479da91cfa9b7a98561d024ab67b1715f504a15a138a4b5b54.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4db555a9ad8d4dd8102f38b2fcf385a8bcbe62d99903a7a5ee20dc3915098335 +size 124787 diff --git a/data/2025/2504_06xxx/2504.06256/images/4149a2a9d8a0180d46b942d93daa35e3ebf8ddc950c9a4f1d92f2c80d369bf94.jpg b/data/2025/2504_06xxx/2504.06256/images/4149a2a9d8a0180d46b942d93daa35e3ebf8ddc950c9a4f1d92f2c80d369bf94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e510a597fb665f8aae55600a29a261a2af04683 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/4149a2a9d8a0180d46b942d93daa35e3ebf8ddc950c9a4f1d92f2c80d369bf94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac6b1d69d7accb58961ea8f9743a3ff393ab2787e2e917b13697ff5faafa6cd3 +size 17995 diff --git a/data/2025/2504_06xxx/2504.06256/images/4419bc1f599e2b5e75cfd27fe839c397d664930cb4e4ea9f79c6d864c52b521d.jpg b/data/2025/2504_06xxx/2504.06256/images/4419bc1f599e2b5e75cfd27fe839c397d664930cb4e4ea9f79c6d864c52b521d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98ec8d6b29a95f343e6eaaaaeb37425d9594340d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/4419bc1f599e2b5e75cfd27fe839c397d664930cb4e4ea9f79c6d864c52b521d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f31237edb63459849dd17ca430bf7d6e1814276f8fb5f7406200b25b2256524e +size 3895 diff --git a/data/2025/2504_06xxx/2504.06256/images/44c2d287d692818c451c8e0a355531754e120c05a69103809eff8e26775d92fd.jpg b/data/2025/2504_06xxx/2504.06256/images/44c2d287d692818c451c8e0a355531754e120c05a69103809eff8e26775d92fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..615bb583136242f262413fef442dacdd1f998438 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/44c2d287d692818c451c8e0a355531754e120c05a69103809eff8e26775d92fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c036d20957eae995f53d9f4a1fd96422ef2426e7822afe2f0d0994e07345c08b +size 61588 diff --git a/data/2025/2504_06xxx/2504.06256/images/47401bdfe62a96981c1feaecf18c9d323881824be0d85aa784265e9a19d202c9.jpg b/data/2025/2504_06xxx/2504.06256/images/47401bdfe62a96981c1feaecf18c9d323881824be0d85aa784265e9a19d202c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb59bdb49dbf38f0ba3029f209de7be7930e118a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/47401bdfe62a96981c1feaecf18c9d323881824be0d85aa784265e9a19d202c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c356ae57ae7561fad097f72a1e81a9cd28e64ef98a9d4573a25c42da450b3d8 +size 3230 diff --git a/data/2025/2504_06xxx/2504.06256/images/47fde3a154dfb5fd97836334bedd3adfe43814936d49863da6f7b62e487a3a46.jpg b/data/2025/2504_06xxx/2504.06256/images/47fde3a154dfb5fd97836334bedd3adfe43814936d49863da6f7b62e487a3a46.jpg new file mode 100644 index 0000000000000000000000000000000000000000..deb821b15d4cd8bd81b6d9bde5d1d467ef482340 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/47fde3a154dfb5fd97836334bedd3adfe43814936d49863da6f7b62e487a3a46.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1833d9f72336ef0469f9622e161952b64d82dcb7bbe53bf349c1a74ee50a4ae4 +size 104309 diff --git a/data/2025/2504_06xxx/2504.06256/images/4b3982cc5c2c19d0b2fd26b9f06c0bbf5ad2f7331e1a2829e8a6c44d7973e951.jpg b/data/2025/2504_06xxx/2504.06256/images/4b3982cc5c2c19d0b2fd26b9f06c0bbf5ad2f7331e1a2829e8a6c44d7973e951.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a88d18d3268f3ba6e84145ae797ea98397d7f10b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/4b3982cc5c2c19d0b2fd26b9f06c0bbf5ad2f7331e1a2829e8a6c44d7973e951.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65b09c181112a5af8fff41a2d558886eed43fe43cb28b417683f76fd08ae584f +size 13170 diff --git a/data/2025/2504_06xxx/2504.06256/images/4cd49de60a91872c7f33c2ed1240629712279838d61f4ad56a145108151af2af.jpg b/data/2025/2504_06xxx/2504.06256/images/4cd49de60a91872c7f33c2ed1240629712279838d61f4ad56a145108151af2af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bae7dbe18fcb48fa2fd47eba3f41c2ebee9981ac --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/4cd49de60a91872c7f33c2ed1240629712279838d61f4ad56a145108151af2af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:841d2e2a751bb03740cbd14f138490cfdf235dc86fc0aee47acea52ca8dc46b2 +size 16251 diff --git a/data/2025/2504_06xxx/2504.06256/images/55aca72edd9bc6fd2358694218fae4f8e6d7e4c0987fbd61f62661ac387ebd35.jpg b/data/2025/2504_06xxx/2504.06256/images/55aca72edd9bc6fd2358694218fae4f8e6d7e4c0987fbd61f62661ac387ebd35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ef889d254f58c7e07923af38993fffe81022f1d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/55aca72edd9bc6fd2358694218fae4f8e6d7e4c0987fbd61f62661ac387ebd35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3a97695db9f52b4a1c50ec539a2366f3222d628bcf556ef05957fba06123bf8 +size 21376 diff --git a/data/2025/2504_06xxx/2504.06256/images/5d46cf09b50e4f98422bb851e2dd13b1734ea4bebc37a20eb08828e94d103b92.jpg b/data/2025/2504_06xxx/2504.06256/images/5d46cf09b50e4f98422bb851e2dd13b1734ea4bebc37a20eb08828e94d103b92.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2fbba7cbadf9ea0ad0d332c11f810f16b6fa5460 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/5d46cf09b50e4f98422bb851e2dd13b1734ea4bebc37a20eb08828e94d103b92.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65ac1a1a480d080523265f1256f0e7fada7d98db10530394162ba1325388cd98 +size 6029 diff --git a/data/2025/2504_06xxx/2504.06256/images/61d5ab558a3fffaa336d26f1f2171b61a5a2ce74c40d4cd1af84fced70c45a4e.jpg b/data/2025/2504_06xxx/2504.06256/images/61d5ab558a3fffaa336d26f1f2171b61a5a2ce74c40d4cd1af84fced70c45a4e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9604f19ec2b4d40aa6f70ced5151a4dbcdfa5266 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/61d5ab558a3fffaa336d26f1f2171b61a5a2ce74c40d4cd1af84fced70c45a4e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf1adf6c05f9f362106d4dfae766ea59d75c9c5a5c0b123c4d3d3af73c4fb153 +size 5160 diff --git a/data/2025/2504_06xxx/2504.06256/images/6ba2cccb890caaf3726ac0b89e0a6017e25a056bd986ad530823545b6cc653b2.jpg b/data/2025/2504_06xxx/2504.06256/images/6ba2cccb890caaf3726ac0b89e0a6017e25a056bd986ad530823545b6cc653b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..507ff7bdd8f7569eda94c915ad812007e4915dce --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/6ba2cccb890caaf3726ac0b89e0a6017e25a056bd986ad530823545b6cc653b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f0e68752bd8132daab411eb09bb4101ee0535a8fd8f1fcb9bce6d8fc79076dd +size 4669 diff --git a/data/2025/2504_06xxx/2504.06256/images/6cbcca96c2745b0fc235a258d5debaaacb8baf3df31684a63e481d46e4678b1c.jpg b/data/2025/2504_06xxx/2504.06256/images/6cbcca96c2745b0fc235a258d5debaaacb8baf3df31684a63e481d46e4678b1c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f208048df84af71889203bd8d8e9010fc1ee53f7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/6cbcca96c2745b0fc235a258d5debaaacb8baf3df31684a63e481d46e4678b1c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01395e899aac2889ab609a1311298c612367e3fe531c665f73db97191953b640 +size 39773 diff --git a/data/2025/2504_06xxx/2504.06256/images/70b0361654016b250254d8a9163f2affb03ec516f759c8748b50254aae39b093.jpg b/data/2025/2504_06xxx/2504.06256/images/70b0361654016b250254d8a9163f2affb03ec516f759c8748b50254aae39b093.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8897cf60c246788d3d45318562f83547f9d6f25 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/70b0361654016b250254d8a9163f2affb03ec516f759c8748b50254aae39b093.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f99aa227294a8a3cc9281bf61e64c5b485e066151a0a23300f10b93045a5f3e +size 24722 diff --git a/data/2025/2504_06xxx/2504.06256/images/70fdf417d4fb39bf516fdf05899650efd71ec63b281c5cfcbbf45619d8d2b2b4.jpg b/data/2025/2504_06xxx/2504.06256/images/70fdf417d4fb39bf516fdf05899650efd71ec63b281c5cfcbbf45619d8d2b2b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..264b7a3228b0031752840c2a9cbd29dc84b54a46 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/70fdf417d4fb39bf516fdf05899650efd71ec63b281c5cfcbbf45619d8d2b2b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4102cafc796f9cb75a385075d61be4659f8c054116b8899cfb3f4a985af26752 +size 71468 diff --git a/data/2025/2504_06xxx/2504.06256/images/71a1d9a265fd63ade28febee74e11c217a12bde87dcfcf180be066256678952a.jpg b/data/2025/2504_06xxx/2504.06256/images/71a1d9a265fd63ade28febee74e11c217a12bde87dcfcf180be066256678952a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc39d6f64ba7ccd3b9f4bbae61fc3edf1851e1f4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/71a1d9a265fd63ade28febee74e11c217a12bde87dcfcf180be066256678952a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e99aa77db3485300719631a9021d65b4770c7777dc08995a4783513a56b33ae9 +size 78852 diff --git a/data/2025/2504_06xxx/2504.06256/images/77bc365456c7f1a324580189122792f8db5d207f06c8180d8e1bf41346dbbc5d.jpg b/data/2025/2504_06xxx/2504.06256/images/77bc365456c7f1a324580189122792f8db5d207f06c8180d8e1bf41346dbbc5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..776188773f835ffcf509a7e3cadc6fb85dbe62df --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/77bc365456c7f1a324580189122792f8db5d207f06c8180d8e1bf41346dbbc5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84f0c1f16064c040c0f92c951db2919c0e16b506419895c44775f4f41962019a +size 25873 diff --git a/data/2025/2504_06xxx/2504.06256/images/7997eceab53dfc59027dfeeb9d00f0d54c438ad627f519543fa4528dc0b321de.jpg b/data/2025/2504_06xxx/2504.06256/images/7997eceab53dfc59027dfeeb9d00f0d54c438ad627f519543fa4528dc0b321de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b69ea92014fe2c41699483f1f4b72d361776e32f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/7997eceab53dfc59027dfeeb9d00f0d54c438ad627f519543fa4528dc0b321de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33ba53e85cb036621edd15a6272503bd99a199d6c8ba707840ac18aeb9f45772 +size 4155 diff --git a/data/2025/2504_06xxx/2504.06256/images/7aae1bbc607ab3c1b39cc159c1512802e81ed3d35ad05074e80e97c48e4e8080.jpg b/data/2025/2504_06xxx/2504.06256/images/7aae1bbc607ab3c1b39cc159c1512802e81ed3d35ad05074e80e97c48e4e8080.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b13238e54763e4d54d1bec1e25ad431fc3e48a4f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/7aae1bbc607ab3c1b39cc159c1512802e81ed3d35ad05074e80e97c48e4e8080.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b774675071d030a7b83982d77999d914335cade2296fa84560f67c086e9e9b75 +size 35677 diff --git a/data/2025/2504_06xxx/2504.06256/images/7b499c68b1be62904daf47a04a892eb29c8a4c9f7d17cab06469846695a5b06a.jpg b/data/2025/2504_06xxx/2504.06256/images/7b499c68b1be62904daf47a04a892eb29c8a4c9f7d17cab06469846695a5b06a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04483ebdafd9e355238bcd40391f89e55091c67d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/7b499c68b1be62904daf47a04a892eb29c8a4c9f7d17cab06469846695a5b06a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b589890b36b045d13a5ebac6136da95a8932989dcd7c7006fe3708933ada7d8 +size 22415 diff --git a/data/2025/2504_06xxx/2504.06256/images/7d4c1fbb21fee1f8d29061754c600619607957874633c9de8a489aa2c276f85c.jpg b/data/2025/2504_06xxx/2504.06256/images/7d4c1fbb21fee1f8d29061754c600619607957874633c9de8a489aa2c276f85c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2075b010bc23a93bdc043c7fd1fe02a4be26444e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/7d4c1fbb21fee1f8d29061754c600619607957874633c9de8a489aa2c276f85c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac55271db0c73d1f123a07131b2753d59f5d1af5b94e60fc4fb1ced0b521568f +size 10492 diff --git a/data/2025/2504_06xxx/2504.06256/images/8e80f20eaaa577eb8299d8509c1b636a00a6be83769d922edded5c3bdd3e26fc.jpg b/data/2025/2504_06xxx/2504.06256/images/8e80f20eaaa577eb8299d8509c1b636a00a6be83769d922edded5c3bdd3e26fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ca91d06cb45da83bf398faf9befc6c7aede85d2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/8e80f20eaaa577eb8299d8509c1b636a00a6be83769d922edded5c3bdd3e26fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:288d42c53580b88226de9d28d1aefc5b95753565d93a465fab14c1754d373047 +size 13582 diff --git a/data/2025/2504_06xxx/2504.06256/images/924d212b61c2dbd9a97e97845fd03efdda618a18e51f97ff75229de657600cb8.jpg b/data/2025/2504_06xxx/2504.06256/images/924d212b61c2dbd9a97e97845fd03efdda618a18e51f97ff75229de657600cb8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e981af3ebbf4645646615b510b37321bed846d1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/924d212b61c2dbd9a97e97845fd03efdda618a18e51f97ff75229de657600cb8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c4717d516d78f99389195615a10c7461980ac5bcec08f6f326286762339ab89 +size 4149 diff --git a/data/2025/2504_06xxx/2504.06256/images/991b1ac03b36f5361d4e58d6868c9741411c1cc7e9963640e355b72a09945466.jpg b/data/2025/2504_06xxx/2504.06256/images/991b1ac03b36f5361d4e58d6868c9741411c1cc7e9963640e355b72a09945466.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43a0cac6e664b2ea6b4c65243d5f6837fc2f886c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/991b1ac03b36f5361d4e58d6868c9741411c1cc7e9963640e355b72a09945466.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ebb2ada8be3625d9f58bb8113fe993dac3991b9b3f8279734716d48cddfeddd +size 124093 diff --git a/data/2025/2504_06xxx/2504.06256/images/9c147ce456e995372bd45563a786652f18b43eeb6d52c666af84093298afa877.jpg b/data/2025/2504_06xxx/2504.06256/images/9c147ce456e995372bd45563a786652f18b43eeb6d52c666af84093298afa877.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8677acdb757b321aa95bbe500e4a07f2f82eb92d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/9c147ce456e995372bd45563a786652f18b43eeb6d52c666af84093298afa877.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c7a174e61431f55cb0817cc6971d255fbb9a0a478e42bccf9e20fd8c355b5eb +size 11401 diff --git a/data/2025/2504_06xxx/2504.06256/images/9c6c074753e5c0db73a2544daf4a539eb7a42932749c7546b1c4f05818f8732f.jpg b/data/2025/2504_06xxx/2504.06256/images/9c6c074753e5c0db73a2544daf4a539eb7a42932749c7546b1c4f05818f8732f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83a505c6ed9334f41b4e216a15baf33b58a83250 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/9c6c074753e5c0db73a2544daf4a539eb7a42932749c7546b1c4f05818f8732f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7a15dd4b6d38948fe22a74450f0ce1ec92efe7e62371e0063be281ab0c76842 +size 11671 diff --git a/data/2025/2504_06xxx/2504.06256/images/9f32362161c9b5ac0fc53f7dbadac96f6648ff2c8824c55b33ce850f06278d39.jpg b/data/2025/2504_06xxx/2504.06256/images/9f32362161c9b5ac0fc53f7dbadac96f6648ff2c8824c55b33ce850f06278d39.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1a28196906d0e7d92aa65ea68228396cb0e96d1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/9f32362161c9b5ac0fc53f7dbadac96f6648ff2c8824c55b33ce850f06278d39.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acd67d26bef0b2f7fcb7a7a62538f481b353a10be6c14f845cacd3700f92588e +size 22649 diff --git a/data/2025/2504_06xxx/2504.06256/images/a626c2ad7ce49892adf9b4655ab02d93bb5762fc0945595b273155aa85a7da62.jpg b/data/2025/2504_06xxx/2504.06256/images/a626c2ad7ce49892adf9b4655ab02d93bb5762fc0945595b273155aa85a7da62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd503ed3a53585d2e01c3eaec7e5c16043f9a4aa --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/a626c2ad7ce49892adf9b4655ab02d93bb5762fc0945595b273155aa85a7da62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:579774388c4435d8d391567f1a4a4f8c48f36bbd3a544cb1f33bbdee9b10d5d4 +size 22990 diff --git a/data/2025/2504_06xxx/2504.06256/images/a64fb9d0751c7bee00cfe32d761c4cf3cf5e293940406f5287724778e33f7dd8.jpg b/data/2025/2504_06xxx/2504.06256/images/a64fb9d0751c7bee00cfe32d761c4cf3cf5e293940406f5287724778e33f7dd8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e931dfc0c9c6e079a70257a2ff14326479ee69e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/a64fb9d0751c7bee00cfe32d761c4cf3cf5e293940406f5287724778e33f7dd8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46d837ba875db77c2ea6f7b9d425403e6f1c689cfd2b16f412ba7670255628ef +size 15764 diff --git a/data/2025/2504_06xxx/2504.06256/images/a84d0b3eab726eb969d63ec7f14b4cf156b8489ede5810f95269439a025f5c84.jpg b/data/2025/2504_06xxx/2504.06256/images/a84d0b3eab726eb969d63ec7f14b4cf156b8489ede5810f95269439a025f5c84.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b7cb0299d62168a71fae449b1f08a882a2f12f8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/a84d0b3eab726eb969d63ec7f14b4cf156b8489ede5810f95269439a025f5c84.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aff7bca5cc494cc8ad7941fbab9227de152533e94cef457757bd8c5889143d74 +size 6525 diff --git a/data/2025/2504_06xxx/2504.06256/images/aa994d8f438857841af137ce9cf69d2079d81944e8baa713cb1e310ebbc5a032.jpg b/data/2025/2504_06xxx/2504.06256/images/aa994d8f438857841af137ce9cf69d2079d81944e8baa713cb1e310ebbc5a032.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75255cd1bc234cd92056033dbf7fc460501b178f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/aa994d8f438857841af137ce9cf69d2079d81944e8baa713cb1e310ebbc5a032.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0733dd9a8d40dac31e7bb6e79d21d9595c166952b7033cbf183d94c310ce418 +size 21006 diff --git a/data/2025/2504_06xxx/2504.06256/images/ab4ad8e76c716680a874cb79f434e48a84893b5174e1de3b19378e6c89009ff3.jpg b/data/2025/2504_06xxx/2504.06256/images/ab4ad8e76c716680a874cb79f434e48a84893b5174e1de3b19378e6c89009ff3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ce890f71041e7540cbd6498d30a150ec5913300 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/ab4ad8e76c716680a874cb79f434e48a84893b5174e1de3b19378e6c89009ff3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:625bca4a1ca17eb45aa5b361d8c46da414ab9c45147a05c0d75c57a19745f70b +size 13784 diff --git a/data/2025/2504_06xxx/2504.06256/images/af25337f48bf2a1fcba79b602b7512fb3badd1f589b46c51e3d25f9b094dac98.jpg b/data/2025/2504_06xxx/2504.06256/images/af25337f48bf2a1fcba79b602b7512fb3badd1f589b46c51e3d25f9b094dac98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ba3ec4227de1582ebb001f470bb79d17cb1d5c9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/af25337f48bf2a1fcba79b602b7512fb3badd1f589b46c51e3d25f9b094dac98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46807ee0c37c48164304853e19990a18979d9a1622e2629641eecd910c2b1e97 +size 23334 diff --git a/data/2025/2504_06xxx/2504.06256/images/b12ee3389eb59af291e4591659081ba05f6a90a9227a6beb350600174faf3f18.jpg b/data/2025/2504_06xxx/2504.06256/images/b12ee3389eb59af291e4591659081ba05f6a90a9227a6beb350600174faf3f18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4e2f6d9a0820f3b12e03d18cf8e00001455f2bc --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/b12ee3389eb59af291e4591659081ba05f6a90a9227a6beb350600174faf3f18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a312701c98b4d0cbaf3ba3a0ac97ecdcde1f7dd060be8ac1b433c0b6ce471b8c +size 20711 diff --git a/data/2025/2504_06xxx/2504.06256/images/b3b8ac37fa24e66c37666f35b4a47270abc1ff2917320e9161e2464df3cd9ee6.jpg b/data/2025/2504_06xxx/2504.06256/images/b3b8ac37fa24e66c37666f35b4a47270abc1ff2917320e9161e2464df3cd9ee6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc1250a4ba6f37fcb1926f954f787ad3126a867c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/b3b8ac37fa24e66c37666f35b4a47270abc1ff2917320e9161e2464df3cd9ee6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56bc8a54f08d1090d3132df1ea9646881baa3e7d0d87ed81128d2ef7adfae1b8 +size 18679 diff --git a/data/2025/2504_06xxx/2504.06256/images/b77f0c069a70b297d343734d4179c4df607c59c0e70650ea18864234e7002424.jpg b/data/2025/2504_06xxx/2504.06256/images/b77f0c069a70b297d343734d4179c4df607c59c0e70650ea18864234e7002424.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be5eea298ada80ce9cd4d56ef15e751213df7435 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/b77f0c069a70b297d343734d4179c4df607c59c0e70650ea18864234e7002424.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69eb441947bcb219a909d537e82a7608fff99b97d106733ac1ddb3a2a405c60e +size 3212 diff --git a/data/2025/2504_06xxx/2504.06256/images/b9c3a23589c96c41551027759ee3efb1784846504d3b7874cd009fd5d0fedae2.jpg b/data/2025/2504_06xxx/2504.06256/images/b9c3a23589c96c41551027759ee3efb1784846504d3b7874cd009fd5d0fedae2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..579898ab7a8d35ea843a53ad3323acb96208f0e3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/b9c3a23589c96c41551027759ee3efb1784846504d3b7874cd009fd5d0fedae2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6344e229d7c1f418047d33ff2d897ff17a02a55def8c844a1c06a274eace1e65 +size 20945 diff --git a/data/2025/2504_06xxx/2504.06256/images/bfd2befb3965d454b9427d56799e66522902a3b508eda96ad2dd99909ec45430.jpg b/data/2025/2504_06xxx/2504.06256/images/bfd2befb3965d454b9427d56799e66522902a3b508eda96ad2dd99909ec45430.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc7d093075252f1e4a47799762f3506f8c3ba67e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/bfd2befb3965d454b9427d56799e66522902a3b508eda96ad2dd99909ec45430.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2be102e67a583f74719b1a3dcdb40d9b72fa5d6f35bad778368c2ddcf71ee61 +size 145988 diff --git a/data/2025/2504_06xxx/2504.06256/images/c209fc3515bcd2eea2d99ebc2865c335d0d995899ce2bd7acc199d906ccb6ecc.jpg b/data/2025/2504_06xxx/2504.06256/images/c209fc3515bcd2eea2d99ebc2865c335d0d995899ce2bd7acc199d906ccb6ecc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1bee750d17012526939280e1e04666f283ba4ff9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/c209fc3515bcd2eea2d99ebc2865c335d0d995899ce2bd7acc199d906ccb6ecc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d22760413f4cc085f1cb64c6e29fa4d10b847c94c3188f0e509200290af86e6 +size 4277 diff --git a/data/2025/2504_06xxx/2504.06256/images/c2baf0f2aa9245073fed0d9b72888184994aaa31dccb83be0fee93ec0a386d19.jpg b/data/2025/2504_06xxx/2504.06256/images/c2baf0f2aa9245073fed0d9b72888184994aaa31dccb83be0fee93ec0a386d19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f553ef67357ccbb5d39ff9c98577125c1535343 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/c2baf0f2aa9245073fed0d9b72888184994aaa31dccb83be0fee93ec0a386d19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf9c342048278877d8a172a05fda27e0df021ba6f741907a6fd1a79567027c47 +size 10048 diff --git a/data/2025/2504_06xxx/2504.06256/images/cb12222fe568d3d46a55c40b1795a095b91866ad756d26fc3e7316d7bcc184af.jpg b/data/2025/2504_06xxx/2504.06256/images/cb12222fe568d3d46a55c40b1795a095b91866ad756d26fc3e7316d7bcc184af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a91f3262881cb81eabf673ad4c72ad8aa43a7ca2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/cb12222fe568d3d46a55c40b1795a095b91866ad756d26fc3e7316d7bcc184af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a26fc8e4ee3678297cf6b57f2262626f9e4f1bd2adc0e15b1c538211569a1ec2 +size 37345 diff --git a/data/2025/2504_06xxx/2504.06256/images/ce8bf82f52247f065d1f56aaad4e4693afc7e3ffbf3a357566646853e081a9b8.jpg b/data/2025/2504_06xxx/2504.06256/images/ce8bf82f52247f065d1f56aaad4e4693afc7e3ffbf3a357566646853e081a9b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eeb33b558de6b53022511a26bdedc262eb3f9ef8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/ce8bf82f52247f065d1f56aaad4e4693afc7e3ffbf3a357566646853e081a9b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f32679e830f49af042fe8fbe5c7a2c87f8c15d2ccc3228a388f9f28c9a14406e +size 28624 diff --git a/data/2025/2504_06xxx/2504.06256/images/d97f1701555c6678bd51e7b23d877a0a7ed770994c0ffded7c265bac83b8a108.jpg b/data/2025/2504_06xxx/2504.06256/images/d97f1701555c6678bd51e7b23d877a0a7ed770994c0ffded7c265bac83b8a108.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38cef1f78c72d4b9db2b5ef4a8f01468da3fe01d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/d97f1701555c6678bd51e7b23d877a0a7ed770994c0ffded7c265bac83b8a108.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98ea14a6b35fec2675d093abcdf78eda8141c42d7fcb3d8b1b26a7e936b6d46c +size 13320 diff --git a/data/2025/2504_06xxx/2504.06256/images/dde05b8ac87ef551f6c6722f0f3a69c9df1b0be719bc5033e26f38b356ace440.jpg b/data/2025/2504_06xxx/2504.06256/images/dde05b8ac87ef551f6c6722f0f3a69c9df1b0be719bc5033e26f38b356ace440.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5b52f61989a6211736a90f36b7df469bdaffa75 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/dde05b8ac87ef551f6c6722f0f3a69c9df1b0be719bc5033e26f38b356ace440.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1723c9bfd3e176dfed99731dca2cb336ab88f2f07ad25aa63c2a4193a2ff373 +size 16085 diff --git a/data/2025/2504_06xxx/2504.06256/images/e30e2bc1074dc3fbe3a878833ad91c7795ce47ef394f738cf6e0ce70967c24f7.jpg b/data/2025/2504_06xxx/2504.06256/images/e30e2bc1074dc3fbe3a878833ad91c7795ce47ef394f738cf6e0ce70967c24f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..266b7f2a24e4fbb3518f196fb2646818f97b08fd --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/e30e2bc1074dc3fbe3a878833ad91c7795ce47ef394f738cf6e0ce70967c24f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74c479e233a37f2ef9df0c3d87f08956d42f7f7b84b0112b931a67b00c335a71 +size 26124 diff --git a/data/2025/2504_06xxx/2504.06256/images/e8e1b7bf4cec9f8528f7381916efc241f178f796c22ba90ba0983c2557c0ac6c.jpg b/data/2025/2504_06xxx/2504.06256/images/e8e1b7bf4cec9f8528f7381916efc241f178f796c22ba90ba0983c2557c0ac6c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5e9db8799935808d1be429cf3092d5463a76683 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/e8e1b7bf4cec9f8528f7381916efc241f178f796c22ba90ba0983c2557c0ac6c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a76d0324f10ec778e345a84d0bcef200121b298f1822d3d2379483ab5b043d0 +size 16839 diff --git a/data/2025/2504_06xxx/2504.06256/images/edcb210930a495f6468a261995e8cd43331cc08b7917631b87941c67eb85b779.jpg b/data/2025/2504_06xxx/2504.06256/images/edcb210930a495f6468a261995e8cd43331cc08b7917631b87941c67eb85b779.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a8693d5c832d60ea87da2df359e534bd8288cab --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/edcb210930a495f6468a261995e8cd43331cc08b7917631b87941c67eb85b779.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe0f8bb36a5987198787941bc04b30c9dddcd98430a8ac7d45129092415aaff4 +size 26272 diff --git a/data/2025/2504_06xxx/2504.06256/images/f5b932ba4b3af60c9595d23c21d7eb8e63a7d357dde1449e8e9a53b30d301584.jpg b/data/2025/2504_06xxx/2504.06256/images/f5b932ba4b3af60c9595d23c21d7eb8e63a7d357dde1449e8e9a53b30d301584.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d775db73e5e097037fecf51ac4f12dea4bd2fac --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/f5b932ba4b3af60c9595d23c21d7eb8e63a7d357dde1449e8e9a53b30d301584.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:657e07124a014c990fae9de92e3ea2f0e138edf769ef3ba68a0cbc117d177bc5 +size 26376 diff --git a/data/2025/2504_06xxx/2504.06256/images/f604c38dcdbffc2511f059b98a51508f48de17764a2bbbc28a2ac4936d985f9d.jpg b/data/2025/2504_06xxx/2504.06256/images/f604c38dcdbffc2511f059b98a51508f48de17764a2bbbc28a2ac4936d985f9d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fb69cc7b066fb08899772f284594240ff552f7e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/f604c38dcdbffc2511f059b98a51508f48de17764a2bbbc28a2ac4936d985f9d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:695f6cf0e1bdfca173cd0bbc5d5748a0dcf45de2147ed8f5477ee60de4e05bc1 +size 26331 diff --git a/data/2025/2504_06xxx/2504.06256/images/f7cb61c92aff1e1a951a059200c3802b85f7a6bad339597ac1ffbfbdec16a6fd.jpg b/data/2025/2504_06xxx/2504.06256/images/f7cb61c92aff1e1a951a059200c3802b85f7a6bad339597ac1ffbfbdec16a6fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3f8f770fccbfcf59bc4d8ba2a60c96914338379 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/f7cb61c92aff1e1a951a059200c3802b85f7a6bad339597ac1ffbfbdec16a6fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5a6d47f322e9f97914d570721509f85696c112c9131f0c74892712ce37b860a +size 24982 diff --git a/data/2025/2504_06xxx/2504.06256/images/f9420a60fcb422ba7b6a72e7f07a960eb7ba70e86790181846908582736eb2a3.jpg b/data/2025/2504_06xxx/2504.06256/images/f9420a60fcb422ba7b6a72e7f07a960eb7ba70e86790181846908582736eb2a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f777b181e62c6a04bfc90877466273fee869982c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/f9420a60fcb422ba7b6a72e7f07a960eb7ba70e86790181846908582736eb2a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c583b8f87fe10e60fa14009d55d9ab220a2863ee6f4ede3072e7963a1dde47a +size 27033 diff --git a/data/2025/2504_06xxx/2504.06256/images/fcbdb08c63f65558cdf160396bedb8523d98a6851b524df3201eec1f27a037b8.jpg b/data/2025/2504_06xxx/2504.06256/images/fcbdb08c63f65558cdf160396bedb8523d98a6851b524df3201eec1f27a037b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c95728b618008bc37fe91851d71fe35cfd4f4c7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/fcbdb08c63f65558cdf160396bedb8523d98a6851b524df3201eec1f27a037b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97713ef3ae8cb9fc98663c3632b1e538f60a8d767a8ce583b06cb31a155ee9e9 +size 17739 diff --git a/data/2025/2504_06xxx/2504.06256/images/fe14b58523fd1199df8a35521fb8b1b3670ba32ccfbb5bcd196c0ff9f20299ca.jpg b/data/2025/2504_06xxx/2504.06256/images/fe14b58523fd1199df8a35521fb8b1b3670ba32ccfbb5bcd196c0ff9f20299ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..315329772c9e53ca83e4ae1473b201d28c226ea3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/images/fe14b58523fd1199df8a35521fb8b1b3670ba32ccfbb5bcd196c0ff9f20299ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19c9fa400c1527aaa797fa86066e1634fd26aa8c463c6dd33599b7aee07ccf0d +size 20501 diff --git a/data/2025/2504_06xxx/2504.06256/layout.json b/data/2025/2504_06xxx/2504.06256/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3ce927672628ea8595a0e4ab3e2b59ad43f9730b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06256/layout.json @@ -0,0 +1,11313 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 85, + 79, + 495, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 79, + 495, + 99 + ], + "spans": [ + { + "bbox": [ + 85, + 79, + 495, + 99 + ], + "type": "text", + "content": "Transfer between Modalities with MetaQueries" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "spans": [ + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": "Xichen Pan" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": ", Satya Narayan Shukla" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{1,\\dagger}" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": ", Aashu Singh" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": ", Zhuokai Zhao" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": ", Shlok Kumar Mishra" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": ", Jialiang Wang" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": ", Zhiyang Xu" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": ", Jiuhai Chen" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": ", Kunpeng Li" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": ", Felix Juefei-Xu" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": ", Ji Hou" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{1,\\dagger}" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "text", + "content": ", Saining Xie" + }, + { + "bbox": [ + 83, + 102, + 515, + 128 + ], + "type": "inline_equation", + "content": "^{2,\\dagger}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 85, + 132, + 216, + 157 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 85, + 132, + 216, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 132, + 216, + 144 + ], + "spans": [ + { + "bbox": [ + 85, + 132, + 216, + 144 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 85, + 132, + 216, + 144 + ], + "type": "text", + "content": "Meta, " + }, + { + "bbox": [ + 85, + 132, + 216, + 144 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 85, + 132, + 216, + 144 + ], + "type": "text", + "content": "New York University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 85, + 145, + 151, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 145, + 151, + 157 + ], + "spans": [ + { + "bbox": [ + 85, + 145, + 151, + 157 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 85, + 145, + 151, + 157 + ], + "type": "text", + "content": " Equal advising" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 171, + 526, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 171, + 526, + 303 + ], + "spans": [ + { + "bbox": [ + 83, + 171, + 526, + 303 + ], + "type": "text", + "content": "Unified multimodal models aim to integrate understanding (text output) and generation (pixel output), but aligning these different modalities within a single architecture often demands complex training recipes and careful data balancing. We introduce MetaQueries, a set of learnable queries that act as an efficient interface between autoregressive multimodal LLMs (MLLMs) and diffusion models. MetaQueries connects the MLLM's latents to the diffusion decoder, enabling knowledge-augmented image generation by leveraging the MLLM's deep understanding and reasoning capabilities. Our method simplifies training, requiring only paired image-caption data and standard diffusion objectives. Notably, this transfer is effective even when the MLLM backbone remains frozen, thereby preserving its state-of-the-art multimodal understanding capabilities while achieving strong generative performance. Additionally, our method is flexible and can be easily instruction-tuned for advanced applications such as image editing and subject-driven generation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 85, + 318, + 166, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 318, + 166, + 329 + ], + "spans": [ + { + "bbox": [ + 85, + 318, + 166, + 329 + ], + "type": "text", + "content": "Date: April 9, 2025" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 85, + 331, + 414, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 331, + 414, + 341 + ], + "spans": [ + { + "bbox": [ + 85, + 331, + 414, + 341 + ], + "type": "text", + "content": "Correspondence: satyanshukla@meta.com, jihou@meta.com, saining.xie@nyu.edu" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 85, + 342, + 285, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 342, + 285, + 354 + ], + "spans": [ + { + "bbox": [ + 85, + 342, + 285, + 354 + ], + "type": "text", + "content": "Project Page: https://xichenpan.com/metaquery" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 480, + 342, + 526, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 480, + 342, + 526, + 354 + ], + "spans": [ + { + "bbox": [ + 480, + 342, + 526, + 354 + ], + "type": "text", + "content": "Meta" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 388, + 166, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 388, + 166, + 402 + ], + "spans": [ + { + "bbox": [ + 67, + 388, + 166, + 402 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 66, + 414, + 543, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 414, + 543, + 547 + ], + "spans": [ + { + "bbox": [ + 66, + 414, + 543, + 547 + ], + "type": "text", + "content": "The quest for unified multimodal models capable of both deep understanding (typically resulting in textual outputs) and rich generation (resulting in pixel outputs) holds immense promise. Such systems could unlock synergistic capabilities (OpenAI, 2025; Google, 2025), where understanding informs generation and vice versa. However, effectively connecting these different output modalities poses considerable challenges—e.g. how do we effectively transfer the latent world knowledge from the autoregressive multimodal LLM to the image generator? Although significant progress has been made, most published approaches (Ge et al., 2024; Sun et al., 2024b; Tong et al., 2024; Jin et al., 2024; Liu et al., 2024a; Team, 2024a; Xie et al., 2024; Wang et al., 2024; Wu et al., 2025a; Chen et al., 2025; Dong et al., 2024; Zhou et al., 2025; Shi et al., 2024) rely on carefully tuning base multimodal LLMs (MLLMs) to handle both understanding and generation tasks. This involves complex architectural design, data/loss balancing, multiple training stages, and other complex training recipes—without these, optimizing one capability could compromise the other." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 66, + 552, + 543, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 552, + 543, + 649 + ], + "spans": [ + { + "bbox": [ + 66, + 552, + 543, + 649 + ], + "type": "text", + "content": "In this paper, we aim to deliver the promise of unified models via a simpler philosophy: Render unto diffusion what is generative, and unto LLMs what is understanding. In other words, instead of building a monolithic system from scratch, we focus on effectively transferring capabilities between state-of-the-art, pre-trained models specialized for different output modalities. To operationalize this, we keep MLLMs frozen so they can focus on what they do best—understanding—while entrusting image generation to diffusion models. We then demonstrate that even under this frozen condition, the MLLM's inherent world knowledge, strong reasoning, and in-context learning capabilities can indeed be transferred to image generation, provided the right architectural bridge is in place." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 66, + 654, + 544, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 654, + 544, + 715 + ], + "spans": [ + { + "bbox": [ + 66, + 654, + 544, + 715 + ], + "type": "text", + "content": "However, leveraging an MLLM—especially a frozen one—for both multimodal understanding and generation is far from straightforward. Although (frozen) LLMs have shown good performance as conditional text encoders in text-to-image generation (Zhuo et al., 2024; Xie et al., 2025; Ma et al., 2024), they are not compatible with many desired tasks in unified modeling, such as in-context learning or producing multimodal, interleaved output. The architectural bridge we design in this work is MetaQuery (Figure 1). MetaQuery feeds a set of" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "text", + "content": "arXiv:2504.06256v1 [cs.CV] 8 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 742, + 308, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 751 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 93, + 62, + 518, + 222 + ], + "blocks": [ + { + "bbox": [ + 93, + 62, + 518, + 222 + ], + "lines": [ + { + "bbox": [ + 93, + 62, + 518, + 222 + ], + "spans": [ + { + "bbox": [ + 93, + 62, + 518, + 222 + ], + "type": "image", + "image_path": "70fdf417d4fb39bf516fdf05899650efd71ec63b281c5cfcbbf45619d8d2b2b4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 234, + 544, + 281 + ], + "lines": [ + { + "bbox": [ + 67, + 234, + 544, + 281 + ], + "spans": [ + { + "bbox": [ + 67, + 234, + 544, + 281 + ], + "type": "text", + "content": "Figure 1 Overview of our model. Blue tokens maintain SOTA multimodal understanding; MetaQueries are learnable queries that directly applied to frozen MLLMs to query out conditions for generation. The model is tuned using only denoising objective with paired data. The generative diffusion models can be either frozen or further instruction-tuned for advanced generation tasks." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 299, + 543, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 299, + 543, + 384 + ], + "spans": [ + { + "bbox": [ + 67, + 299, + 543, + 384 + ], + "type": "text", + "content": "learnable queries directly into a frozen MLLM to extract multimodal conditions for multimodal generation. Our experiments reveal that, even without fine-tuning or enabling bi-directional attention, the frozen LLM serves as a powerful feature resampler (Alayrac et al., 2022), producing high-quality conditions for multimodal generation. Training unified models with MetaQueries requires only a modest amount of paired image-caption data to connect these prompted conditions to any conditional diffusion model. Because the entire MLLM stays intact for understanding, the training objective remains the original denoising objective—just as efficient and stable as fine-tuning a diffusion model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 389, + 543, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 389, + 543, + 462 + ], + "spans": [ + { + "bbox": [ + 67, + 389, + 543, + 462 + ], + "type": "text", + "content": "More specifically, previous unified models aim to train a single autoregressive transformer backbone to jointly model " + }, + { + "bbox": [ + 67, + 389, + 543, + 462 + ], + "type": "inline_equation", + "content": "p(\\text{text}, \\text{pixels})" + }, + { + "bbox": [ + 67, + 389, + 543, + 462 + ], + "type": "text", + "content": ". In contrast, we choose to use a token " + }, + { + "bbox": [ + 67, + 389, + 543, + 462 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 67, + 389, + 543, + 462 + ], + "type": "text", + "content": " [transformer] " + }, + { + "bbox": [ + 67, + 389, + 543, + 462 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 67, + 389, + 543, + 462 + ], + "type": "text", + "content": " [diffusion] " + }, + { + "bbox": [ + 67, + 389, + 543, + 462 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 67, + 389, + 543, + 462 + ], + "type": "text", + "content": " pixels paradigm, which might share a high-level philosophy with the concurrent GPT-4o image generation system, as hinted at by OpenAI (2025). This approach composes the MLLM's autoregressive prior with a powerful diffusion decoder, directly leveraging the frozen MLLM's strong capability in modeling compressed semantic representations, thus avoiding the more challenging task of directly generating pixels." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 467, + 543, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 467, + 543, + 552 + ], + "spans": [ + { + "bbox": [ + 67, + 467, + 543, + 552 + ], + "type": "text", + "content": "To validate our approach, we conduct a series of controlled experiments, showing that MetaQuery1 outperforms the use of a frozen MLLM purely as a conditional text encoder for image generation. Moreover, MetaQuery can match the performance of fully tuning the MLLM backbone, yet it is significantly more efficient. We also systematically investigate the training strategy, including the number of tokens and architectural configurations. With just 25M publicly available image-caption pairs, we are able to train a family of unified models that not only preserves state-of-the-art (SOTA) performance in image understanding, but also achieves SOTA-level results in text-to-image generation across multiple benchmarks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 556, + 543, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 556, + 543, + 666 + ], + "spans": [ + { + "bbox": [ + 67, + 556, + 543, + 666 + ], + "type": "text", + "content": "The promise of unified modeling goes beyond handling multimodal understanding and text-to-image generation in parallel. A deeper synergy is expected—one that taps into advanced MLLM abilities like reasoning, internal knowledge, multimodal perception, and in-context learning to enhance generation. Our results show that our method draws on the frozen MLLM's commonsense knowledge, achieving SOTA visual-commonsense generation on the CommonsenseT2I benchmark (Fu et al., 2024). Our approach also harnesses the built-in reasoning and in-context learning capabilities of frozen MLLMs, producing images from complex prompts—such as generating the United States flag in response to \"The national flag of the country where Yellowstone National Park is located.\" (See Figure 9 for examples.) We also benchmark this type of world knowledge reasoning capability on WISE (Niu et al., 2025) and demonstrate SOTA performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 670, + 543, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 670, + 543, + 707 + ], + "spans": [ + { + "bbox": [ + 67, + 670, + 543, + 707 + ], + "type": "text", + "content": "Finally, by connecting, preserving, and enhancing multimodal input with MetaQueries and a frozen MLLM backbone, our model can be further instruction-tuned for advanced generation tasks such as image editing and subject-driven generation. We show that this can be achieved both efficiently and effectively using a scalable" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 79, + 712, + 319, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 712, + 319, + 723 + ], + "spans": [ + { + "bbox": [ + 79, + 712, + 319, + 723 + ], + "type": "text", + "content": "1For simplicity, we also use MetaQuery to represent our method." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 543, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 64, + 543, + 113 + ], + "spans": [ + { + "bbox": [ + 67, + 64, + 543, + 113 + ], + "type": "text", + "content": "data curation pipeline that directly leverages naturally occurring image pairs from web corpora, instead of depending on human-created pairs or synthetically generated data (Brooks et al., 2023; Hu et al., 2024a; Xiao et al., 2025). This natural supervision surprisingly unlocks several new capabilities beyond subject-driven generation, such as visual association and logo design (see Figure 8 for examples)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 118, + 543, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 118, + 543, + 166 + ], + "spans": [ + { + "bbox": [ + 67, + 118, + 543, + 166 + ], + "type": "text", + "content": "In summary, we explore a simple yet underexplored alternative to unified multimodal modeling. Our method, MetaQuery, bridges frozen MLLM backbones and diffusion models. Experiments show that this framework delivers all the capabilities once thought to require MLLM fine-tuning while being much easier to train. The main results and findings in this paper include:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 82, + 171, + 541, + 284 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 82, + 171, + 541, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 171, + 541, + 194 + ], + "spans": [ + { + "bbox": [ + 82, + 171, + 541, + 194 + ], + "type": "text", + "content": "- With MetaQuery and frozen MLLM backbones, we maintain SOTA multimodal understanding performance while enabling SOTA-level multimodal generation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 82, + 201, + 540, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 201, + 540, + 224 + ], + "spans": [ + { + "bbox": [ + 82, + 201, + 540, + 224 + ], + "type": "text", + "content": "MetaQuery can transfer the capabilities of MLLMs for reasoning- and knowledge-augmented image generation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 232, + 541, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 232, + 541, + 254 + ], + "spans": [ + { + "bbox": [ + 82, + 232, + 541, + 254 + ], + "type": "text", + "content": "MetaQuery can extract highly detailed visual conditions beyond semantic similarity from frozen MLLMs, enabling image reconstruction and editing tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 261, + 540, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 261, + 540, + 284 + ], + "spans": [ + { + "bbox": [ + 82, + 261, + 540, + 284 + ], + "type": "text", + "content": "- Our method can be easily instruction-tuned even with a frozen MLLM backbone, enabling advanced multimodal generation tasks like subject-driven generation." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 304, + 174, + 318 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 304, + 174, + 318 + ], + "spans": [ + { + "bbox": [ + 67, + 304, + 174, + 318 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 329, + 544, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 329, + 544, + 474 + ], + "spans": [ + { + "bbox": [ + 67, + 329, + 544, + 474 + ], + "type": "text", + "content": "Unified understanding and generation models. Next-token prediction has proven to be an effective approach for training models to understand language (Devlin, 2019; Brown et al., 2020) and multimodal content (Liu et al., 2024b). Recently, the community has witnessed numerous efforts to extend the success of multimodal understanding (Liu et al., 2024b) to multimodal generation by training LLM backbones to generate images at the same time. However, unlike adapting text-only LLMs (Touvron et al., 2023) to understand multimodal content with one single next text token prediction objective (Liu et al., 2024b), generating multimodal content requires a different set of training objectives. SEED-X (Ge et al., 2024), Emu (Sun et al., 2024b), and MetaMorph (Tong et al., 2024) learn to regress image features; LaVIT (Jin et al., 2024), LWM (Liu et al., 2024a), Chameleon (Team, 2024a), Show-o (Xie et al., 2024), EMU3 (Wang et al., 2024), and Janus (Wu et al., 2025a; Chen et al., 2025) auto-regressively predict next visual tokens; and DreamLLM (Dong et al., 2024), Transfusion (Zhou et al., 2025) employ diffusion objectives. However, these approaches necessitate tuning LLMs for generating both modalities, naturally posing challenges in multi-task balancing." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 487, + 543, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 487, + 543, + 620 + ], + "spans": [ + { + "bbox": [ + 67, + 487, + 543, + 620 + ], + "type": "text", + "content": "Unified models with frozen LLMs. Several studies have explored the use of frozen LLMs for multimodal understanding and generation. For instance, LMFusion (Shi et al., 2024) trains image generation expert feed-forward networks (FFNs) and query-key-value (QKV) modules in parallel with a frozen LLM backbone to deeply fuse input conditions and denoise visual outputs. However, this approach offers limited flexibility as it shares the same architecture as specific LLM backbones and requires training a separate set of generative modules for every single LLM backbone. This not only imposes more computational burden but also restricts the ability to leverage powerful pre-trained generative models. An earlier work, GILL (Koh et al., 2023), investigates feeding learnable tokens into frozen MLLMs. It employs a combined contrastive loss and regression loss for image retrieval and generation, rather than directly employing the denoising objective for more efficient training. Its application is restricted to contextual image generation and it does not systematically explore the impact of frozen MLLMs and learnable queries." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 637, + 160, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 637, + 160, + 653 + ], + "spans": [ + { + "bbox": [ + 67, + 637, + 160, + 653 + ], + "type": "text", + "content": "3 MetaQuery" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 662, + 542, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 662, + 542, + 711 + ], + "spans": [ + { + "bbox": [ + 67, + 662, + 542, + 711 + ], + "type": "text", + "content": "In this work, we propose MetaQuery, which losslessly augments understanding-only MLLMs with multimodal generation capabilities while preserving their original architecture designs and parameters intact. We carefully analyze the impact of applying MetaQuery on image generation performance. Results show that a frozen MLLM can provide strong conditions for multimodal generation." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 121, + 62, + 491, + 125 + ], + "blocks": [ + { + "bbox": [ + 121, + 62, + 491, + 125 + ], + "lines": [ + { + "bbox": [ + 121, + 62, + 491, + 125 + ], + "spans": [ + { + "bbox": [ + 121, + 62, + 491, + 125 + ], + "type": "table", + "html": "
Methods# of TokensMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
LLM last layer embedding*-7.490.5578.41
Random queries648.590.3554.81
Learnable queries647.430.5675.35
Learnable queries5127.340.5678.43
", + "image_path": "ce8bf82f52247f065d1f56aaad4e4693afc7e3ffbf3a357566646853e081a9b8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 126, + 165, + 485, + 228 + ], + "blocks": [ + { + "bbox": [ + 67, + 132, + 541, + 156 + ], + "lines": [ + { + "bbox": [ + 67, + 132, + 541, + 156 + ], + "spans": [ + { + "bbox": [ + 67, + 132, + 541, + 156 + ], + "type": "text", + "content": "Table 1 Study on different conditions for image generation. * denotes the embeddings of input tokens. Learnable queries achieve comparable performance to using all hidden states and can even surpass them with more tokens." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 126, + 165, + 485, + 228 + ], + "lines": [ + { + "bbox": [ + 126, + 165, + 485, + 228 + ], + "spans": [ + { + "bbox": [ + 126, + 165, + 485, + 228 + ], + "type": "table", + "html": "
MethodsTrain LLMTrain DiTMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
MLLM tuningX7.750.5878.97
E2E tuning6.280.6179.39
Frozen MLLMXX7.430.5675.35
Frozen MLLMX6.060.6176.66
", + "image_path": "f604c38dcdbffc2511f059b98a51508f48de17764a2bbbc28a2ac4936d985f9d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 236, + 542, + 270 + ], + "lines": [ + { + "bbox": [ + 67, + 236, + 542, + 270 + ], + "spans": [ + { + "bbox": [ + 67, + 236, + 542, + 270 + ], + "type": "text", + "content": "Table 2 Study on strategies for adapting MLLMs. The methods without training LLM do not suffer from multimodal understanding degradation. Frozen MLLM achieves comparable performance to full MLLM tuning, with slightly lower prompt alignment but slightly improved visual quality." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 289, + 161, + 301 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 289, + 161, + 301 + ], + "spans": [ + { + "bbox": [ + 67, + 289, + 161, + 301 + ], + "type": "text", + "content": "3.1 Architecture" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "spans": [ + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "text", + "content": "MetaQuery bridges frozen MLLMs with diffusion models. We use randomly initialized learnable queries " + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{Q} \\in \\mathbb{R}^{N \\times D}" + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "text", + "content": " to query out the conditions " + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "text", + "content": " for generation. " + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "text", + "content": " is the number of queries and " + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "text", + "content": " is the dimension of the queries, which is the same as the MLLM hidden dimension. For simplicity and compatibility, we continue to use causal masking for the entire sequence rather than specifically enabling full attention for " + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}" + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "text", + "content": ". The conditions " + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "text", + "content": " are then fed into a trainable connector to align with the input space of text-to-image diffusion models. These models can be arbitrary as long as they have a conditional input interface; we simply replace its original condition with our " + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 67, + 308, + 542, + 417 + ], + "type": "text", + "content": ". The whole model is trained with the original generation objective on paired data. In this paper, we focus on image generation tasks, but the model can be easily extended to other modalities like audio, video, 3D, and more." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 431, + 174, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 431, + 174, + 443 + ], + "spans": [ + { + "bbox": [ + 67, + 431, + 174, + 443 + ], + "type": "text", + "content": "3.2 Design Choices" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 451, + 543, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 451, + 543, + 548 + ], + "spans": [ + { + "bbox": [ + 67, + 451, + 543, + 548 + ], + "type": "text", + "content": "The proposed architecture involves two design choices: using learnable queries and keeping the MLLM backbone frozen. We explain the reasons why we adopted these choices and how they impact performance. For all experiments, unless otherwise specified, we use the same frozen LLaVA-OneVision-0.5B (Li et al., 2024a) MLLM backbone, frozen Sana-0.6B (Xie et al., 2025) diffusion model in 512 resolution, learnable queries with " + }, + { + "bbox": [ + 67, + 451, + 543, + 548 + ], + "type": "inline_equation", + "content": "N = 64" + }, + { + "bbox": [ + 67, + 451, + 543, + 548 + ], + "type": "text", + "content": " tokens, and a connector with a 24-layer transformer encoder. All models are trained on 25M publicly available image caption pairs for 4 epochs. We report FID score (Heusel et al., 2017) on MJHQ-30K (Li et al., 2024b) for visual aesthetic quality, and GenEval (Ghosh et al., 2023) and DPG-Bench (Hu et al., 2024b) (both without prompt rewriting) for prompt alignment, respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 560, + 542, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 560, + 542, + 681 + ], + "spans": [ + { + "bbox": [ + 67, + 560, + 542, + 681 + ], + "type": "text", + "content": "Learnable queries. Many models like Lumina-Next (Zhuo et al., 2024), Sana (Xie et al., 2025), and Kosmos-G (Pan et al., 2024) use the (M)LLM's last layer embedding of input tokens as image generation conditions. However, this approach is not ideal for unified models as it is not compatible with many desired tasks in unified modeling, such as in-context learning or producing multimodal, interleaved output (we provide more discussion and comparison with MetaQuery in Section 5.6). As shown in Table 1, using learnable queries with just " + }, + { + "bbox": [ + 67, + 560, + 542, + 681 + ], + "type": "inline_equation", + "content": "N = 64" + }, + { + "bbox": [ + 67, + 560, + 542, + 681 + ], + "type": "text", + "content": " tokens achieves image generation quality comparable to that of utilizing the last layer embedding of input tokens. While random queries produce acceptable FID scores, they struggle with prompt alignment, highlighting the importance of learnable queries. Additionally, since the last layer embedding setting naturally comes with a longer sequence length, we also tested learnable queries with " + }, + { + "bbox": [ + 67, + 560, + 542, + 681 + ], + "type": "inline_equation", + "content": "N = 512" + }, + { + "bbox": [ + 67, + 560, + 542, + 681 + ], + "type": "text", + "content": " tokens, which further improves performance and even outperforms the last layer embedding approach." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 693, + 541, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 693, + 541, + 718 + ], + "spans": [ + { + "bbox": [ + 67, + 693, + 541, + 718 + ], + "type": "text", + "content": "Frozen MLLM. Existing unified models train MLLMs to jointly model " + }, + { + "bbox": [ + 67, + 693, + 541, + 718 + ], + "type": "inline_equation", + "content": "p(\\text{text}, \\text{pixels})" + }, + { + "bbox": [ + 67, + 693, + 541, + 718 + ], + "type": "text", + "content": ", resulting in a more complicated training process and even downgraded understanding performance. MetaQuery keeps the original" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 67, + 303, + 240 + ], + "blocks": [ + { + "bbox": [ + 70, + 67, + 303, + 240 + ], + "lines": [ + { + "bbox": [ + 70, + 67, + 303, + 240 + ], + "spans": [ + { + "bbox": [ + 70, + 67, + 303, + 240 + ], + "type": "image", + "image_path": "26e5def00142de348936cfa334f6dcbef2aa8e4cd65e4ff90e11e7056c47e06b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 248, + 240, + 259 + ], + "lines": [ + { + "bbox": [ + 135, + 248, + 240, + 259 + ], + "spans": [ + { + "bbox": [ + 135, + 248, + 240, + 259 + ], + "type": "text", + "content": "(a) Text-to-image results." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 309, + 62, + 541, + 242 + ], + "blocks": [ + { + "bbox": [ + 309, + 62, + 541, + 242 + ], + "lines": [ + { + "bbox": [ + 309, + 62, + 541, + 242 + ], + "spans": [ + { + "bbox": [ + 309, + 62, + 541, + 242 + ], + "type": "image", + "image_path": "6cbcca96c2745b0fc235a258d5debaaacb8baf3df31684a63e481d46e4678b1c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 358, + 249, + 492, + 259 + ], + "lines": [ + { + "bbox": [ + 358, + 249, + 492, + 259 + ], + "spans": [ + { + "bbox": [ + 358, + 249, + 492, + 259 + ], + "type": "text", + "content": "(b) Image reconstruction results." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 69, + 301, + 542, + 453 + ], + "blocks": [ + { + "bbox": [ + 67, + 269, + 541, + 292 + ], + "lines": [ + { + "bbox": [ + 67, + 269, + 541, + 292 + ], + "spans": [ + { + "bbox": [ + 67, + 269, + 541, + 292 + ], + "type": "text", + "content": "Figure 2 Study on the scaling of token numbers. As the number of tokens increases, text-to-image prompt alignment and image reconstruction results consistently improve." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 69, + 301, + 542, + 453 + ], + "lines": [ + { + "bbox": [ + 69, + 301, + 542, + 453 + ], + "spans": [ + { + "bbox": [ + 69, + 301, + 542, + 453 + ], + "type": "image", + "image_path": "991b1ac03b36f5361d4e58d6868c9741411c1cc7e9963640e355b72a09945466.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 459, + 403, + 472 + ], + "lines": [ + { + "bbox": [ + 67, + 459, + 403, + 472 + ], + "spans": [ + { + "bbox": [ + 67, + 459, + 403, + 472 + ], + "type": "text", + "content": "Figure 3 Visaul samples for image reconstruction with different numbers of tokens." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 491, + 541, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 491, + 541, + 575 + ], + "spans": [ + { + "bbox": [ + 67, + 491, + 541, + 575 + ], + "type": "text", + "content": "MLLM architecture and parameters interact to preserve SOTA understanding capabilities. However, for multimodal generation, a key concern is whether MetaQuery's performance with significantly fewer tunable parameters would be substantially worse than methods with full MLLM tuning. As shown in Table 2, frozen MLLMs achieve comparable performance to full MLLM tuning, with slightly lower prompt alignment but slightly improved visual quality. Tuning DiT can further improve performance for both settings. This suggests that MetaQuery is another possible training strategy, one that is simpler but also effective, as an alternative to fine-tuning the entire MLLM." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 590, + 175, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 590, + 175, + 604 + ], + "spans": [ + { + "bbox": [ + 67, + 590, + 175, + 604 + ], + "type": "text", + "content": "3.3 Training Recipe" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 609, + 541, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 609, + 541, + 646 + ], + "spans": [ + { + "bbox": [ + 67, + 609, + 541, + 646 + ], + "type": "text", + "content": "Based on insights from our design choices, we further study key training options for the two main components of MetaQuery: learnable queries and connectors. This study examines the number of tokens and connector design. Unless otherwise specified, all experiments in this section use the same setup as described in Section 3.2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 659, + 541, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 659, + 541, + 719 + ], + "spans": [ + { + "bbox": [ + 67, + 659, + 541, + 719 + ], + "type": "text", + "content": "Number of tokens. Many works (Wu et al., 2023; Pan et al., 2024; Ge et al., 2024) have employed learnable queries for condition extraction. However, they either set the number of tokens to match the fixed input sequence length of the image decoder (e.g., " + }, + { + "bbox": [ + 67, + 659, + 541, + 719 + ], + "type": "inline_equation", + "content": "N = 77" + }, + { + "bbox": [ + 67, + 659, + 541, + 719 + ], + "type": "text", + "content": " for the CLIP (Radford et al., 2021) text encoder in Stable Diffusion v1.5 (Rombach et al., 2021)), or use an arbitrary fixed number like " + }, + { + "bbox": [ + 67, + 659, + 541, + 719 + ], + "type": "inline_equation", + "content": "N = 64" + }, + { + "bbox": [ + 67, + 659, + 541, + 719 + ], + "type": "text", + "content": " without further investigation. Given that modern diffusion models like Lumina-Next (Zhuo et al., 2024) and Sana (Xie" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 75, + 62, + 535, + 125 + ], + "blocks": [ + { + "bbox": [ + 75, + 62, + 535, + 125 + ], + "lines": [ + { + "bbox": [ + 75, + 62, + 535, + 125 + ], + "spans": [ + { + "bbox": [ + 75, + 62, + 535, + 125 + ], + "type": "table", + "html": "
Architecture# of LayersDims# of ParamsRel. Wall TimeMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
Proj-Enc62304517M1.06x7.800.5373.37
Proj-Enc2423042046M1.23x7.410.5173.75
Enc-Proj689684M1x7.730.4971.39
Enc-Proj24896316M1.06x7.430.5675.35
", + "image_path": "7aae1bbc607ab3c1b39cc159c1512802e81ed3d35ad05074e80e97c48e4e8080.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 132, + 541, + 155 + ], + "lines": [ + { + "bbox": [ + 67, + 132, + 541, + 155 + ], + "spans": [ + { + "bbox": [ + 67, + 132, + 541, + 155 + ], + "type": "text", + "content": "Table 3 Study on connector design. Aligning the conditions first in the same dimension as the MLLM hidden states (Enc-Proj) is more effective and parameter-efficient." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 175, + 543, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 175, + 543, + 272 + ], + "spans": [ + { + "bbox": [ + 67, + 175, + 543, + 272 + ], + "type": "text", + "content": "et al., 2025) naturally accept variable-length conditions, determining the optimal number of tokens for learnable queries is crucial. In Figure 2, we provide a careful study of the number of tokens and observe promising scalability of MetaQueries. For text-to-image generation, visual quality begins to converge after 64 tokens, while more tokens consistently yield better prompt alignment. This is more evident for long captions, as GenEval with rewritten prompts increases more rapidly as the number of tokens increases. For image reconstruction, we observe that more tokens consistently improve the quality of reconstructed images (visual samples can be found in Figure 3). In our later experiments, we set the number of tokens to " + }, + { + "bbox": [ + 67, + 175, + 543, + 272 + ], + "type": "inline_equation", + "content": "N = 256" + }, + { + "bbox": [ + 67, + 175, + 543, + 272 + ], + "type": "text", + "content": " for all models, as it achieves a good balance between performance and efficiency." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 285, + 543, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 285, + 543, + 381 + ], + "spans": [ + { + "bbox": [ + 67, + 285, + 543, + 381 + ], + "type": "text", + "content": "Connector design. The connector is another important component in MetaQuery. We use the same architecture as the Qwen2.5 (Team, 2024b) LLM, but enable bi-directional attention for the connector. We study two different designs: Projection Before Encoder (Proj-Enc) and Projection After Encoder (Enc-Proj). Proj-Enc first projects the conditions into the input dimension of the diffusion decoder, then uses a transformer encoder to align the conditions. On the other hand, Enc-Proj first uses a transformer encoder to align the conditions in the same dimension as the MLLM hidden states, then projects the conditions into the input dimension of the diffusion decoder. As shown in Table 3, the Enc-Proj design achieves better performance than the Proj-Enc design while having fewer parameters." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 399, + 181, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 399, + 181, + 415 + ], + "spans": [ + { + "bbox": [ + 67, + 399, + 181, + 415 + ], + "type": "text", + "content": "4 Model Training" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 425, + 230, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 425, + 230, + 665 + ], + "spans": [ + { + "bbox": [ + 67, + 425, + 230, + 665 + ], + "type": "text", + "content": "We train MetaQuery in two stages: the pre-training stage and the instruction tuning stage. Both training stages keep MLLMs frozen and fine-tune learnable queries, connectors, and diffusion models. We use three different MLLM backbones for different sizes: Base (LLaVA-OneVision 0.5B (Li et al., 2024a)), Large (Qwen2.5-VL 3B (Bai et al., 2025)), and X-Large (Qwen2.5-VL 7B (Bai et al., 2025)). We set the number of tokens to " + }, + { + "bbox": [ + 67, + 425, + 230, + 665 + ], + "type": "inline_equation", + "content": "N = 256" + }, + { + "bbox": [ + 67, + 425, + 230, + 665 + ], + "type": "text", + "content": " for all models, and utilize a 24-layer connector with Enc-Proj architecture. For image generation heads, we tested two different diffusion models: Stable Diffusion v1.5 (Rombach et al., 2021) and Sana-1.6B (Xie et al., 2025)." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 244, + 422, + 539, + 616 + ], + "blocks": [ + { + "bbox": [ + 244, + 422, + 539, + 616 + ], + "lines": [ + { + "bbox": [ + 244, + 422, + 539, + 616 + ], + "spans": [ + { + "bbox": [ + 244, + 422, + 539, + 616 + ], + "type": "image", + "image_path": "71a1d9a265fd63ade28febee74e11c217a12bde87dcfcf180be066256678952a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 241, + 623, + 541, + 667 + ], + "lines": [ + { + "bbox": [ + 241, + 623, + 541, + 667 + ], + "spans": [ + { + "bbox": [ + 241, + 623, + 541, + 667 + ], + "type": "text", + "content": "Figure 4 Overview of instruction tuning data curation pipeline. We group images from web corpora based on caption similarity using the SigLIP (Zhai et al., 2023) model, then construct instruction-tuning data from these image pairs using an MLLM." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 677, + 542, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 677, + 542, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 677, + 542, + 715 + ], + "type": "text", + "content": "Pre-training. We pre-train our model on 25M publicly available image-caption pairs for 8 epochs with a learning rate of 1e-4 and a global batch size of 4096. The learning rate follows a cosine decay schedule with a 4,000-step warmup period before gradually decreasing to 1e-5." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 77, + 62, + 534, + 246 + ], + "blocks": [ + { + "bbox": [ + 77, + 62, + 534, + 246 + ], + "lines": [ + { + "bbox": [ + 77, + 62, + 534, + 246 + ], + "spans": [ + { + "bbox": [ + 77, + 62, + 534, + 246 + ], + "type": "table", + "html": "
MethodsBase (M)LLMMME-PMMBSEEDMMMUMM-VetCOCO FID ↓MJHQ FID ↓GenEval ↑DPG-Bench ↑
EmuLLaMA 13B-----11.66---
DreamLLMVicuna 7B----36.68.46---
ChameleonFrom Scratch 7B---22.48.326.74-0.39-
Show-o-512Phi-1.5 1.3B1097.2--26.7-9.2415.180.68-
VILA-ULLaMA-2 7B1401.8-59.0-33.5-7.69--
Emu3From Scratch 7B-58.568.231.637.212.80-0.66†80.60
MetaMorphLLaMA-3 8B-75.271.8--11.8---
TokenFlow-XLQwen-2.5 14B1551.176.872.643.248.2--0.63†73.38
TransfusionFrom Scratch 7B-----8.70-0.63-
LMFusionLLaVA-Next 8B1603.772.172.541.7-8.20---
JanusDeepSeek-LLM 1.5B1338.069.463.730.534.38.5310.100.61-
JanusFlowDeepSeek-LLM 1.5B1333.174.970.529.330.9-9.510.6380.09
Janus-Pro-1BDeepSeek-LLM 1.5B1444.075.568.336.339.8-14.33‡0.7382.63
Janus-Pro-7BDeepSeek-LLM 7B1567.179.272.141.050.0-13.48‡0.8084.19
MetaQuery-BLLaVA-ov 0.5B1238.058.566.631.429.18.916.280.74†80.04
MetaQuery-LQwen2.5-VL 3B1574.378.673.853.163.28.876.350.78†81.10
MetaQuery-XLQwen2.5-VL 7B1685.283.576.958.666.68.696.020.80†82.05
", + "image_path": "47fde3a154dfb5fd97836334bedd3adfe43814936d49863da6f7b62e487a3a46.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 255, + 541, + 289 + ], + "lines": [ + { + "bbox": [ + 67, + 255, + 541, + 289 + ], + "spans": [ + { + "bbox": [ + 67, + 255, + 541, + 289 + ], + "type": "text", + "content": "Table 4 Quantitative results on multimodal understanding and generation benchmarks. We report the COCO FID with Stable Diffusion v1.5 (Rombach et al., 2021), and other metrics with Sana (Xie et al., 2025). † denotes rewritten prompts. ‡ denotes results tested by us under the same settings." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 308, + 541, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 308, + 541, + 548 + ], + "spans": [ + { + "bbox": [ + 67, + 308, + 541, + 548 + ], + "type": "text", + "content": "Instruction tuning. Furthermore, in this work, we rethink the data curation process for instruction tuning in image generation. All current methods rely on expert models to generate target images from source images and instructions (Ge et al., 2024; Xiao et al., 2025; Hu et al., 2024a). However, this approach is limited in scalability and may introduce biases, as the available expert models cover only a narrow range of image transformations. Inspired by MagicLens (Zhang et al., 2024), we construct instruction-tuning data using naturally occurring image pairs in web corpora. These corpora contain rich multimodal contexts with interleaved text and images on related subjects or topics. These image pairs often exhibit meaningful associations and specific relationships spanning a broad spectrum, from direct visual similarities to more subtle semantic connections (as shown in Figure 4). Such naturally occurring image pairs provide excellent and diverse supervision signals for instruction tuning. Based on this observation, we developed a data construction pipeline that mines image pairs and leverages MLLMs to generate open-ended instructions that capture their inter-image relationships. First, we collect grouped images from mmc4 (Zhu et al., 2023) core fewer-faces subset, where each image is accompanied by a caption. Using SigLIP (Zhai et al., 2023), we cluster images with similar captions (allowing up to 6 images per group, with a similarity threshold of 0.5). In each group, the image with minimum average similarity to the others is designated as the target, while the remaining images serve as source images. This process yields a total of 2.4M image pairs. Finally, we employ Qwen2.5-VL 3B (Bai et al., 2025) to generate instructions for each pair, describing how to transform the source images into the target image (See Appendix A for the detailed MLLM prompt). We experimented with instruction-tuning our Base size model on the proposed 2.4M dataset for 3 epochs, using the same learning rate schedule as in pre-training and a batch size of 2048." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 567, + 169, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 567, + 169, + 582 + ], + "spans": [ + { + "bbox": [ + 67, + 567, + 169, + 582 + ], + "type": "text", + "content": "5 Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 592, + 541, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 592, + 541, + 712 + ], + "spans": [ + { + "bbox": [ + 66, + 592, + 541, + 712 + ], + "type": "text", + "content": "In this section, we first evaluate MetaQuery on various multimodal understanding and text-to-image generation benchmarks (Section 5.1). We demonstrate that MetaQuery can be trained to reconstruct input images (Section 5.2). This image reconstruction capability can be easily transferred to perform image editing (Section 5.3). Furthermore, we show that MetaQuery can be instruction-tuned to perform zero-shot subject-driven generation (Section 5.4). By leveraging our approach for collecting instruction tuning data from naturally existing image pairs, we also reveal that MetaQuery can unlock novel capabilities like visual association and logo design (also in Section 5.4). Additionally, we demonstrate that MetaQuery can benefit from the internal knowledge and reasoning capabilities of the frozen MLLM, overcoming common failures exhibited by other generation models (Section 5.5). Finally, we discuss the impact of different MLLM backbones and compare MetaQuery's behavior with the baseline that uses MLLM last layer embeddings (Section 5.6)." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 63, + 187, + 180 + ], + "blocks": [ + { + "bbox": [ + 71, + 63, + 187, + 180 + ], + "lines": [ + { + "bbox": [ + 71, + 63, + 187, + 180 + ], + "spans": [ + { + "bbox": [ + 71, + 63, + 187, + 180 + ], + "type": "image", + "image_path": "b9c3a23589c96c41551027759ee3efb1784846504d3b7874cd009fd5d0fedae2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 72, + 183, + 176, + 199 + ], + "lines": [ + { + "bbox": [ + 72, + 183, + 176, + 199 + ], + "spans": [ + { + "bbox": [ + 72, + 183, + 176, + 199 + ], + "type": "text", + "content": "A hot air balloon in the shape of a heart. Grand Canyon" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 188, + 63, + 304, + 180 + ], + "blocks": [ + { + "bbox": [ + 188, + 63, + 304, + 180 + ], + "lines": [ + { + "bbox": [ + 188, + 63, + 304, + 180 + ], + "spans": [ + { + "bbox": [ + 188, + 63, + 304, + 180 + ], + "type": "image", + "image_path": "d97f1701555c6678bd51e7b23d877a0a7ed770994c0ffded7c265bac83b8a108.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 182, + 419, + 191 + ], + "lines": [ + { + "bbox": [ + 304, + 182, + 419, + 191 + ], + "spans": [ + { + "bbox": [ + 304, + 182, + 419, + 191 + ], + "type": "text", + "content": "A British shorthair wearing sunglasses" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 304, + 63, + 420, + 181 + ], + "blocks": [ + { + "bbox": [ + 304, + 63, + 420, + 181 + ], + "lines": [ + { + "bbox": [ + 304, + 63, + 420, + 181 + ], + "spans": [ + { + "bbox": [ + 304, + 63, + 420, + 181 + ], + "type": "image", + "image_path": "fe14b58523fd1199df8a35521fb8b1b3670ba32ccfbb5bcd196c0ff9f20299ca.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 420, + 63, + 538, + 182 + ], + "blocks": [ + { + "bbox": [ + 420, + 63, + 538, + 182 + ], + "lines": [ + { + "bbox": [ + 420, + 63, + 538, + 182 + ], + "spans": [ + { + "bbox": [ + 420, + 63, + 538, + 182 + ], + "type": "image", + "image_path": "4cd49de60a91872c7f33c2ed1240629712279838d61f4ad56a145108151af2af.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 421, + 182, + 535, + 198 + ], + "lines": [ + { + "bbox": [ + 421, + 182, + 535, + 198 + ], + "spans": [ + { + "bbox": [ + 421, + 182, + 535, + 198 + ], + "type": "text", + "content": "A butterfly lands directly on the nose of a German Shepherd." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 71, + 201, + 187, + 319 + ], + "blocks": [ + { + "bbox": [ + 71, + 201, + 187, + 319 + ], + "lines": [ + { + "bbox": [ + 71, + 201, + 187, + 319 + ], + "spans": [ + { + "bbox": [ + 71, + 201, + 187, + 319 + ], + "type": "image", + "image_path": "dde05b8ac87ef551f6c6722f0f3a69c9df1b0be719bc5033e26f38b356ace440.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 72, + 321, + 180, + 346 + ], + "lines": [ + { + "bbox": [ + 72, + 321, + 180, + 346 + ], + "spans": [ + { + "bbox": [ + 72, + 321, + 180, + 346 + ], + "type": "text", + "content": "A close-up of honey being drizzled onto pancakes, the thick liquid flowing slowly and smoothly." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 188, + 201, + 304, + 319 + ], + "blocks": [ + { + "bbox": [ + 189, + 183, + 296, + 198 + ], + "lines": [ + { + "bbox": [ + 189, + 183, + 296, + 198 + ], + "spans": [ + { + "bbox": [ + 189, + 183, + 296, + 198 + ], + "type": "text", + "content": "A sunken ship at the bottom of the ocean." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 188, + 201, + 304, + 319 + ], + "lines": [ + { + "bbox": [ + 188, + 201, + 304, + 319 + ], + "spans": [ + { + "bbox": [ + 188, + 201, + 304, + 319 + ], + "type": "image", + "image_path": "fcbdb08c63f65558cdf160396bedb8523d98a6851b524df3201eec1f27a037b8.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 189, + 321, + 300, + 337 + ], + "lines": [ + { + "bbox": [ + 189, + 321, + 300, + 337 + ], + "spans": [ + { + "bbox": [ + 189, + 321, + 300, + 337 + ], + "type": "text", + "content": "The word 'START' written on a street surface." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 304, + 201, + 421, + 319 + ], + "blocks": [ + { + "bbox": [ + 304, + 201, + 421, + 319 + ], + "lines": [ + { + "bbox": [ + 304, + 201, + 421, + 319 + ], + "spans": [ + { + "bbox": [ + 304, + 201, + 421, + 319 + ], + "type": "image", + "image_path": "b12ee3389eb59af291e4591659081ba05f6a90a9227a6beb350600174faf3f18.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 321, + 418, + 337 + ], + "lines": [ + { + "bbox": [ + 306, + 321, + 418, + 337 + ], + "spans": [ + { + "bbox": [ + 306, + 321, + 418, + 337 + ], + "type": "text", + "content": "A paper origami dragon riding a boat in waves." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 421, + 320, + 526, + 344 + ], + "lines": [ + { + "bbox": [ + 421, + 320, + 526, + 344 + ], + "spans": [ + { + "bbox": [ + 421, + 320, + 526, + 344 + ], + "type": "text", + "content": "An old rusted robot wearing pants and a jacket riding skis in a supermarket." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 421, + 201, + 538, + 319 + ], + "blocks": [ + { + "bbox": [ + 421, + 201, + 538, + 319 + ], + "lines": [ + { + "bbox": [ + 421, + 201, + 538, + 319 + ], + "spans": [ + { + "bbox": [ + 421, + 201, + 538, + 319 + ], + "type": "image", + "image_path": "36fc86f721567164cf07c1649bdde9d18e478995d36fbc4ff03ad2d2af643563.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 71, + 347, + 188, + 464 + ], + "blocks": [ + { + "bbox": [ + 71, + 347, + 188, + 464 + ], + "lines": [ + { + "bbox": [ + 71, + 347, + 188, + 464 + ], + "spans": [ + { + "bbox": [ + 71, + 347, + 188, + 464 + ], + "type": "image", + "image_path": "e8e1b7bf4cec9f8528f7381916efc241f178f796c22ba90ba0983c2557c0ac6c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 72, + 467, + 182, + 499 + ], + "lines": [ + { + "bbox": [ + 72, + 467, + 182, + 499 + ], + "spans": [ + { + "bbox": [ + 72, + 467, + 182, + 499 + ], + "type": "text", + "content": "A close-up of a painter's brush touching the canvas, with paint spreading and blending in a swirl of colors." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 514, + 541, + 536 + ], + "lines": [ + { + "bbox": [ + 67, + 514, + 541, + 536 + ], + "spans": [ + { + "bbox": [ + 67, + 514, + 541, + 536 + ], + "type": "text", + "content": "Figure 5 Qualitative results of MetaQuery. Prompts are from PartiPrompt (Yu et al., 2022), Sana (Xie et al., 2025) and Movie Gen Bench (Polyak et al., 2024)." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 188, + 347, + 304, + 464 + ], + "blocks": [ + { + "bbox": [ + 188, + 347, + 304, + 464 + ], + "lines": [ + { + "bbox": [ + 188, + 347, + 304, + 464 + ], + "spans": [ + { + "bbox": [ + 188, + 347, + 304, + 464 + ], + "type": "image", + "image_path": "31a647ae7331f834c474e220328e1e72996313388bcd70d7447ba4ad111f554a.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 189, + 467, + 300, + 499 + ], + "lines": [ + { + "bbox": [ + 189, + 467, + 300, + 499 + ], + "spans": [ + { + "bbox": [ + 189, + 467, + 300, + 499 + ], + "type": "text", + "content": "A giant humanoid, made of fluffy blue cotton candy, stomping on the ground, and roaring to the sky, clear blue sky behind them." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 304, + 347, + 421, + 464 + ], + "blocks": [ + { + "bbox": [ + 304, + 347, + 421, + 464 + ], + "lines": [ + { + "bbox": [ + 304, + 347, + 421, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 347, + 421, + 464 + ], + "type": "image", + "image_path": "af25337f48bf2a1fcba79b602b7512fb3badd1f589b46c51e3d25f9b094dac98.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 467, + 406, + 499 + ], + "lines": [ + { + "bbox": [ + 305, + 467, + 406, + 499 + ], + "spans": [ + { + "bbox": [ + 305, + 467, + 406, + 499 + ], + "type": "text", + "content": "Close-up of a bright blue parrot's feathers glittering in the light, showing its unique plumage and vibrant colors." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 421, + 467, + 532, + 499 + ], + "lines": [ + { + "bbox": [ + 421, + 467, + 532, + 499 + ], + "spans": [ + { + "bbox": [ + 421, + 467, + 532, + 499 + ], + "type": "text", + "content": "The reflection of a snowy mountain peak in a crystal-clear alpine lake, creating a perfect mirror image with a slight shimmering effect." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 421, + 347, + 538, + 464 + ], + "blocks": [ + { + "bbox": [ + 421, + 347, + 538, + 464 + ], + "lines": [ + { + "bbox": [ + 421, + 347, + 538, + 464 + ], + "spans": [ + { + "bbox": [ + 421, + 347, + 538, + 464 + ], + "type": "image", + "image_path": "7b499c68b1be62904daf47a04a892eb29c8a4c9f7d17cab06469846695a5b06a.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 67, + 556, + 283, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 556, + 283, + 569 + ], + "spans": [ + { + "bbox": [ + 67, + 556, + 283, + 569 + ], + "type": "text", + "content": "5.1 Image Understanding and Generation" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 67, + 575, + 542, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 575, + 542, + 708 + ], + "spans": [ + { + "bbox": [ + 67, + 575, + 542, + 708 + ], + "type": "text", + "content": "As shown in Table 4, our model family demonstrates strong capabilities across both understanding and generation tasks. Benefiting from the flexible training approach that allows us to leverage arbitrary SOTA frozen MLLMs, all of our models in different sizes exhibit competitive performance on all understanding benchmarks (Fu et al., 2023; Liu et al., 2023; Li et al., 2023a; Yue et al., 2024; Yu et al., 2023). In terms of image generation, MetaQuery achieves SOTA visual quality on MJHQ-30K (Li et al., 2024b). Given the fact that MetaQuery works with frozen MLLMs, we can naturally connect with an arbitrary number of diffusion models. Since the base Sana-1.6B (Xie et al., 2025) model is already fine-tuned on aesthetic data, we adopt Stable Diffusion v1.5 (Rombach et al., 2021) for COCO FID evaluation. Our results suggest that after adapting it to powerful MLLMs, we can achieve improved visual quality as indicated by the COCO FID score of 8.69. This also establishes a new SOTA COCO FID score among all Stable Diffusion v1.5-based unified models including MetaMorph (Tong et al., 2024) (11.8) and Emu (Sun et al., 2024b) (11.66)." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 91, + 61, + 162, + 274 + ], + "blocks": [ + { + "bbox": [ + 91, + 61, + 162, + 274 + ], + "lines": [ + { + "bbox": [ + 91, + 61, + 162, + 274 + ], + "spans": [ + { + "bbox": [ + 91, + 61, + 162, + 274 + ], + "type": "image", + "image_path": "70b0361654016b250254d8a9163f2affb03ec516f759c8748b50254aae39b093.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 280, + 147, + 288 + ], + "lines": [ + { + "bbox": [ + 105, + 280, + 147, + 288 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 147, + 288 + ], + "type": "text", + "content": "Real Image" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 162, + 62, + 233, + 274 + ], + "blocks": [ + { + "bbox": [ + 162, + 62, + 233, + 274 + ], + "lines": [ + { + "bbox": [ + 162, + 62, + 233, + 274 + ], + "spans": [ + { + "bbox": [ + 162, + 62, + 233, + 274 + ], + "type": "image", + "image_path": "a626c2ad7ce49892adf9b4655ab02d93bb5762fc0945595b273155aa85a7da62.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 186, + 277, + 209, + 284 + ], + "lines": [ + { + "bbox": [ + 186, + 277, + 209, + 284 + ], + "spans": [ + { + "bbox": [ + 186, + 277, + 209, + 284 + ], + "type": "text", + "content": "SEED" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 169, + 285, + 227, + 293 + ], + "lines": [ + { + "bbox": [ + 169, + 285, + 227, + 293 + ], + "spans": [ + { + "bbox": [ + 169, + 285, + 227, + 293 + ], + "type": "text", + "content": "(Ge et al., 2023)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 233, + 62, + 304, + 274 + ], + "blocks": [ + { + "bbox": [ + 233, + 62, + 304, + 274 + ], + "lines": [ + { + "bbox": [ + 233, + 62, + 304, + 274 + ], + "spans": [ + { + "bbox": [ + 233, + 62, + 304, + 274 + ], + "type": "image", + "image_path": "f5b932ba4b3af60c9595d23c21d7eb8e63a7d357dde1449e8e9a53b30d301584.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 262, + 276, + 279, + 283 + ], + "lines": [ + { + "bbox": [ + 262, + 276, + 279, + 283 + ], + "spans": [ + { + "bbox": [ + 262, + 276, + 279, + 283 + ], + "type": "text", + "content": "Emu" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 250, + 284, + 290, + 299 + ], + "lines": [ + { + "bbox": [ + 250, + 284, + 290, + 299 + ], + "spans": [ + { + "bbox": [ + 250, + 284, + 290, + 299 + ], + "type": "text", + "content": "(Sun et al., 2024b)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 304, + 62, + 375, + 274 + ], + "blocks": [ + { + "bbox": [ + 304, + 62, + 375, + 274 + ], + "lines": [ + { + "bbox": [ + 304, + 62, + 375, + 274 + ], + "spans": [ + { + "bbox": [ + 304, + 62, + 375, + 274 + ], + "type": "image", + "image_path": "edcb210930a495f6468a261995e8cd43331cc08b7917631b87941c67eb85b779.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 330, + 276, + 351, + 283 + ], + "lines": [ + { + "bbox": [ + 330, + 276, + 351, + 283 + ], + "spans": [ + { + "bbox": [ + 330, + 276, + 351, + 283 + ], + "type": "text", + "content": "Emu2" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 320, + 284, + 360, + 299 + ], + "lines": [ + { + "bbox": [ + 320, + 284, + 360, + 299 + ], + "spans": [ + { + "bbox": [ + 320, + 284, + 360, + 299 + ], + "type": "text", + "content": "(Sun et al., 2024a)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 375, + 63, + 444, + 274 + ], + "blocks": [ + { + "bbox": [ + 375, + 63, + 444, + 274 + ], + "lines": [ + { + "bbox": [ + 375, + 63, + 444, + 274 + ], + "spans": [ + { + "bbox": [ + 375, + 63, + 444, + 274 + ], + "type": "image", + "image_path": "1cc8421e582befa6179530b3cee34505907ff5676ee7a14c60dbe6b90cbebfea.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 395, + 276, + 424, + 284 + ], + "lines": [ + { + "bbox": [ + 395, + 276, + 424, + 284 + ], + "spans": [ + { + "bbox": [ + 395, + 276, + 424, + 284 + ], + "type": "text", + "content": "GPT-4o" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 382, + 285, + 436, + 293 + ], + "lines": [ + { + "bbox": [ + 382, + 285, + 436, + 293 + ], + "spans": [ + { + "bbox": [ + 382, + 285, + 436, + 293 + ], + "type": "text", + "content": "(OpenAI, 2025)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 444, + 63, + 515, + 274 + ], + "blocks": [ + { + "bbox": [ + 444, + 63, + 515, + 274 + ], + "lines": [ + { + "bbox": [ + 444, + 63, + 515, + 274 + ], + "spans": [ + { + "bbox": [ + 444, + 63, + 515, + 274 + ], + "type": "image", + "image_path": "f7cb61c92aff1e1a951a059200c3802b85f7a6bad339597ac1ffbfbdec16a6fd.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 460, + 281, + 503, + 289 + ], + "lines": [ + { + "bbox": [ + 460, + 281, + 503, + 289 + ], + "spans": [ + { + "bbox": [ + 460, + 281, + 503, + 289 + ], + "type": "text", + "content": "MetaQuery-B" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 70, + 327, + 187, + 445 + ], + "blocks": [ + { + "bbox": [ + 67, + 304, + 481, + 316 + ], + "lines": [ + { + "bbox": [ + 67, + 304, + 481, + 316 + ], + "spans": [ + { + "bbox": [ + 67, + 304, + 481, + 316 + ], + "type": "text", + "content": "Figure 6 Image reconstruction results. Results of SEED, Emu, and Emu2 are from Sun et al. (2024a)." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 70, + 327, + 187, + 445 + ], + "lines": [ + { + "bbox": [ + 70, + 327, + 187, + 445 + ], + "spans": [ + { + "bbox": [ + 70, + 327, + 187, + 445 + ], + "type": "image", + "image_path": "34f8d42ab9a5c73eb8ad1887541fb0ba22d092eca4923276bffde665da809607.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 73, + 447, + 129, + 456 + ], + "lines": [ + { + "bbox": [ + 73, + 447, + 129, + 456 + ], + "spans": [ + { + "bbox": [ + 73, + 447, + 129, + 456 + ], + "type": "text", + "content": "Add a chef hat to the dog" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 446, + 189, + 460 + ], + "lines": [ + { + "bbox": [ + 130, + 446, + 189, + 460 + ], + "spans": [ + { + "bbox": [ + 130, + 446, + 189, + 460 + ], + "type": "text", + "content": "There is a house in front of the mountain" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 472, + 541, + 495 + ], + "lines": [ + { + "bbox": [ + 67, + 472, + 541, + 495 + ], + "spans": [ + { + "bbox": [ + 67, + 472, + 541, + 495 + ], + "type": "text", + "content": "Figure 7 Image editing results. This capability can be easily transferred from image reconstruction after lightweight fine-tuning." + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "bbox": [ + 129, + 327, + 187, + 445 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 327, + 187, + 445 + ], + "spans": [ + { + "bbox": [ + 129, + 327, + 187, + 445 + ], + "type": "image", + "image_path": "9c147ce456e995372bd45563a786652f18b43eeb6d52c666af84093298afa877.jpg" + } + ] + } + ], + "index": 19, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 187, + 327, + 245, + 445 + ], + "blocks": [ + { + "bbox": [ + 187, + 327, + 245, + 445 + ], + "lines": [ + { + "bbox": [ + 187, + 327, + 245, + 445 + ], + "spans": [ + { + "bbox": [ + 187, + 327, + 245, + 445 + ], + "type": "image", + "image_path": "0c9fb1b5858a9ad2270627f7e7eb4d935d61f2172945f72b652a3578d4f08323.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 191, + 447, + 242, + 456 + ], + "lines": [ + { + "bbox": [ + 191, + 447, + 242, + 456 + ], + "spans": [ + { + "bbox": [ + 191, + 447, + 242, + 456 + ], + "type": "text", + "content": "Remove the 3-WAY sign" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 245, + 327, + 303, + 445 + ], + "blocks": [ + { + "bbox": [ + 245, + 327, + 303, + 445 + ], + "lines": [ + { + "bbox": [ + 245, + 327, + 303, + 445 + ], + "spans": [ + { + "bbox": [ + 245, + 327, + 303, + 445 + ], + "type": "image", + "image_path": "9c6c074753e5c0db73a2544daf4a539eb7a42932749c7546b1c4f05818f8732f.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 251, + 446, + 299, + 460 + ], + "lines": [ + { + "bbox": [ + 251, + 446, + 299, + 460 + ], + "spans": [ + { + "bbox": [ + 251, + 446, + 299, + 460 + ], + "type": "text", + "content": "Replace the dog with a golden retriever" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 303, + 327, + 419, + 445 + ], + "blocks": [ + { + "bbox": [ + 303, + 327, + 419, + 445 + ], + "lines": [ + { + "bbox": [ + 303, + 327, + 419, + 445 + ], + "spans": [ + { + "bbox": [ + 303, + 327, + 419, + 445 + ], + "type": "image", + "image_path": "77bc365456c7f1a324580189122792f8db5d207f06c8180d8e1bf41346dbbc5d.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 447, + 359, + 456 + ], + "lines": [ + { + "bbox": [ + 307, + 447, + 359, + 456 + ], + "spans": [ + { + "bbox": [ + 307, + 447, + 359, + 456 + ], + "type": "text", + "content": "Change to cartoon style" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 364, + 447, + 419, + 456 + ], + "lines": [ + { + "bbox": [ + 364, + 447, + 419, + 456 + ], + "spans": [ + { + "bbox": [ + 364, + 447, + 419, + 456 + ], + "type": "text", + "content": "Change it into linear style" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 362, + 327, + 477, + 445 + ], + "blocks": [ + { + "bbox": [ + 362, + 327, + 477, + 445 + ], + "lines": [ + { + "bbox": [ + 362, + 327, + 477, + 445 + ], + "spans": [ + { + "bbox": [ + 362, + 327, + 477, + 445 + ], + "type": "image", + "image_path": "55aca72edd9bc6fd2358694218fae4f8e6d7e4c0987fbd61f62661ac387ebd35.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 421, + 446, + 477, + 459 + ], + "lines": [ + { + "bbox": [ + 421, + 446, + 477, + 459 + ], + "spans": [ + { + "bbox": [ + 421, + 446, + 477, + 459 + ], + "type": "text", + "content": "Chenage the bird to a blue one" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "bbox": [ + 419, + 327, + 477, + 445 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 327, + 477, + 445 + ], + "spans": [ + { + "bbox": [ + 419, + 327, + 477, + 445 + ], + "type": "image", + "image_path": "c2baf0f2aa9245073fed0d9b72888184994aaa31dccb83be0fee93ec0a386d19.jpg" + } + ] + } + ], + "index": 29, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 477, + 327, + 536, + 445 + ], + "blocks": [ + { + "bbox": [ + 477, + 327, + 536, + 445 + ], + "lines": [ + { + "bbox": [ + 477, + 327, + 536, + 445 + ], + "spans": [ + { + "bbox": [ + 477, + 327, + 536, + 445 + ], + "type": "image", + "image_path": "4b3982cc5c2c19d0b2fd26b9f06c0bbf5ad2f7331e1a2829e8a6c44d7973e951.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 478, + 447, + 536, + 456 + ], + "lines": [ + { + "bbox": [ + 478, + 447, + 536, + 456 + ], + "spans": [ + { + "bbox": [ + 478, + 447, + 536, + 456 + ], + "type": "text", + "content": "Replace the fries with salad" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "bbox": [ + 67, + 516, + 543, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 516, + 543, + 685 + ], + "spans": [ + { + "bbox": [ + 67, + 516, + 543, + 685 + ], + "type": "text", + "content": "In terms of prompt alignment, MetaQuery also achieves competitive performance on GenEval (Ghosh et al., 2023) and DPG-Bench (Hu et al., 2024b), beating all diffusion model-based approaches including Transfusion (Zhou et al., 2025) and JanusFlow (Ma et al., 2025). We note that there is a performance gap between MetaQuery and Janus-Pro (Chen et al., 2025), which auto-regressively generates image tokens. We suggest that this gap may be due to the different failure modes of diffusion models and auto-regressive models: diffusion models usually fail to correctly follow the prompt, while auto-regressive models may suffer from more visual artifacts, which are difficult to quantify by GenEval and DPG-Bench. We tested the MJHQ-30K FID score of Janus-Pro under the same setting as ours and found that, in terms of visual quality and artifact control, MetaQuery is significantly better than Janus-Pro (see Appendix B for visual comparison). Additionally, we find that MetaQuery achieves much better world knowledge reasoning capability than Janus-Pro, which we will elaborate on in Section 5.5. We also found that when scaling up the size of frozen LLMs, the generation quality and prompt alignment also improves. MetaQuery provides a simple and principled way for leveraging the most advanced multimodal LLMs within a unified modeling framework. We also provide qualitative results in Figure 5 to illustrate the text-to-image generation capability of MetaQuery." + } + ] + } + ], + "index": 34 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 751 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 62, + 184, + 178 + ], + "blocks": [ + { + "bbox": [ + 70, + 62, + 184, + 178 + ], + "lines": [ + { + "bbox": [ + 70, + 62, + 184, + 178 + ], + "spans": [ + { + "bbox": [ + 70, + 62, + 184, + 178 + ], + "type": "image", + "image_path": "8e80f20eaaa577eb8299d8509c1b636a00a6be83769d922edded5c3bdd3e26fc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 76, + 188, + 121, + 234 + ], + "blocks": [ + { + "bbox": [ + 76, + 188, + 121, + 234 + ], + "lines": [ + { + "bbox": [ + 76, + 188, + 121, + 234 + ], + "spans": [ + { + "bbox": [ + 76, + 188, + 121, + 234 + ], + "type": "image", + "image_path": "47401bdfe62a96981c1feaecf18c9d323881824be0d85aa784265e9a19d202c9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 123, + 200, + 182, + 223 + ], + "lines": [ + { + "bbox": [ + 123, + 200, + 182, + 223 + ], + "spans": [ + { + "bbox": [ + 123, + 200, + 182, + 223 + ], + "type": "text", + "content": "Top view of the same berry bowl" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 188, + 62, + 304, + 178 + ], + "blocks": [ + { + "bbox": [ + 188, + 62, + 304, + 178 + ], + "lines": [ + { + "bbox": [ + 188, + 62, + 304, + 178 + ], + "spans": [ + { + "bbox": [ + 188, + 62, + 304, + 178 + ], + "type": "image", + "image_path": "4149a2a9d8a0180d46b942d93daa35e3ebf8ddc950c9a4f1d92f2c80d369bf94.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 194, + 188, + 240, + 234 + ], + "blocks": [ + { + "bbox": [ + 194, + 188, + 240, + 234 + ], + "lines": [ + { + "bbox": [ + 194, + 188, + 240, + 234 + ], + "spans": [ + { + "bbox": [ + 194, + 188, + 240, + 234 + ], + "type": "image", + "image_path": "61d5ab558a3fffaa336d26f1f2171b61a5a2ce74c40d4cd1af84fced70c45a4e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 242, + 200, + 297, + 222 + ], + "lines": [ + { + "bbox": [ + 242, + 200, + 297, + 222 + ], + "spans": [ + { + "bbox": [ + 242, + 200, + 297, + 222 + ], + "type": "text", + "content": "The same robot in Minecraft" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 307, + 62, + 422, + 178 + ], + "blocks": [ + { + "bbox": [ + 307, + 62, + 422, + 178 + ], + "lines": [ + { + "bbox": [ + 307, + 62, + 422, + 178 + ], + "spans": [ + { + "bbox": [ + 307, + 62, + 422, + 178 + ], + "type": "image", + "image_path": "a64fb9d0751c7bee00cfe32d761c4cf3cf5e293940406f5287724778e33f7dd8.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 313, + 181, + 359, + 227 + ], + "blocks": [ + { + "bbox": [ + 313, + 181, + 359, + 227 + ], + "lines": [ + { + "bbox": [ + 313, + 181, + 359, + 227 + ], + "spans": [ + { + "bbox": [ + 313, + 181, + 359, + 227 + ], + "type": "image", + "image_path": "0030c217861cbcb07db1e9bf4497c51ebe5ff2aa3fc53234babc79d9c4ca53eb.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 229, + 419, + 239 + ], + "lines": [ + { + "bbox": [ + 313, + 229, + 419, + 239 + ], + "spans": [ + { + "bbox": [ + 313, + 229, + 419, + 239 + ], + "type": "text", + "content": "The toy on the head of the cat" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 370, + 182, + 416, + 228 + ], + "blocks": [ + { + "bbox": [ + 370, + 182, + 416, + 228 + ], + "lines": [ + { + "bbox": [ + 370, + 182, + 416, + 228 + ], + "spans": [ + { + "bbox": [ + 370, + 182, + 416, + 228 + ], + "type": "image", + "image_path": "c209fc3515bcd2eea2d99ebc2865c335d0d995899ce2bd7acc199d906ccb6ecc.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 425, + 63, + 541, + 178 + ], + "blocks": [ + { + "bbox": [ + 425, + 63, + 541, + 178 + ], + "lines": [ + { + "bbox": [ + 425, + 63, + 541, + 178 + ], + "spans": [ + { + "bbox": [ + 425, + 63, + 541, + 178 + ], + "type": "image", + "image_path": "ab4ad8e76c716680a874cb79f434e48a84893b5174e1de3b19378e6c89009ff3.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 432, + 182, + 477, + 228 + ], + "blocks": [ + { + "bbox": [ + 432, + 182, + 477, + 228 + ], + "lines": [ + { + "bbox": [ + 432, + 182, + 477, + 228 + ], + "spans": [ + { + "bbox": [ + 432, + 182, + 477, + 228 + ], + "type": "image", + "image_path": "7997eceab53dfc59027dfeeb9d00f0d54c438ad627f519543fa4528dc0b321de.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 437, + 229, + 531, + 240 + ], + "lines": [ + { + "bbox": [ + 437, + 229, + 531, + 240 + ], + "spans": [ + { + "bbox": [ + 437, + 229, + 531, + 240 + ], + "type": "text", + "content": "The dog wearing sunglasses" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 489, + 182, + 534, + 228 + ], + "blocks": [ + { + "bbox": [ + 489, + 182, + 534, + 228 + ], + "lines": [ + { + "bbox": [ + 489, + 182, + 534, + 228 + ], + "spans": [ + { + "bbox": [ + 489, + 182, + 534, + 228 + ], + "type": "image", + "image_path": "4419bc1f599e2b5e75cfd27fe839c397d664930cb4e4ea9f79c6d864c52b521d.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 69, + 247, + 185, + 363 + ], + "blocks": [ + { + "bbox": [ + 69, + 247, + 185, + 363 + ], + "lines": [ + { + "bbox": [ + 69, + 247, + 185, + 363 + ], + "spans": [ + { + "bbox": [ + 69, + 247, + 185, + 363 + ], + "type": "image", + "image_path": "b3b8ac37fa24e66c37666f35b4a47270abc1ff2917320e9161e2464df3cd9ee6.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 76, + 366, + 121, + 412 + ], + "blocks": [ + { + "bbox": [ + 76, + 366, + 121, + 412 + ], + "lines": [ + { + "bbox": [ + 76, + 366, + 121, + 412 + ], + "spans": [ + { + "bbox": [ + 76, + 366, + 121, + 412 + ], + "type": "image", + "image_path": "924d212b61c2dbd9a97e97845fd03efdda618a18e51f97ff75229de657600cb8.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 123, + 373, + 181, + 407 + ], + "lines": [ + { + "bbox": [ + 123, + 373, + 181, + 407 + ], + "spans": [ + { + "bbox": [ + 123, + 373, + 181, + 407 + ], + "type": "text", + "content": "The same model but a real one in New York city" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 188, + 247, + 304, + 363 + ], + "blocks": [ + { + "bbox": [ + 188, + 247, + 304, + 363 + ], + "lines": [ + { + "bbox": [ + 188, + 247, + 304, + 363 + ], + "spans": [ + { + "bbox": [ + 188, + 247, + 304, + 363 + ], + "type": "image", + "image_path": "aa994d8f438857841af137ce9cf69d2079d81944e8baa713cb1e310ebbc5a032.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 194, + 366, + 240, + 412 + ], + "blocks": [ + { + "bbox": [ + 194, + 366, + 240, + 412 + ], + "lines": [ + { + "bbox": [ + 194, + 366, + 240, + 412 + ], + "spans": [ + { + "bbox": [ + 194, + 366, + 240, + 412 + ], + "type": "image", + "image_path": "5d46cf09b50e4f98422bb851e2dd13b1734ea4bebc37a20eb08828e94d103b92.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 242, + 373, + 297, + 407 + ], + "lines": [ + { + "bbox": [ + 242, + 373, + 297, + 407 + ], + "spans": [ + { + "bbox": [ + 242, + 373, + 297, + 407 + ], + "type": "text", + "content": "The sky line view of the city from this building" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 308, + 247, + 422, + 363 + ], + "blocks": [ + { + "bbox": [ + 308, + 247, + 422, + 363 + ], + "lines": [ + { + "bbox": [ + 308, + 247, + 422, + 363 + ], + "spans": [ + { + "bbox": [ + 308, + 247, + 422, + 363 + ], + "type": "image", + "image_path": "7d4c1fbb21fee1f8d29061754c600619607957874633c9de8a489aa2c276f85c.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 313, + 366, + 359, + 412 + ], + "blocks": [ + { + "bbox": [ + 313, + 366, + 359, + 412 + ], + "lines": [ + { + "bbox": [ + 313, + 366, + 359, + 412 + ], + "spans": [ + { + "bbox": [ + 313, + 366, + 359, + 412 + ], + "type": "image", + "image_path": "6ba2cccb890caaf3726ac0b89e0a6017e25a056bd986ad530823545b6cc653b2.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 361, + 378, + 407, + 402 + ], + "lines": [ + { + "bbox": [ + 361, + 378, + 407, + 402 + ], + "spans": [ + { + "bbox": [ + 361, + 378, + 407, + 402 + ], + "type": "text", + "content": "The statue in the same city" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 429, + 541, + 452 + ], + "lines": [ + { + "bbox": [ + 67, + 429, + 541, + 452 + ], + "spans": [ + { + "bbox": [ + 67, + 429, + 541, + 452 + ], + "type": "text", + "content": "Figure 8 Qualitative results for instruction tuning. Instruction-tuned MetaQuery achieves strong subject-driven capability (first row) and can even reason through the multimodal input to generate images (second row)." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 436, + 270, + 528, + 338 + ], + "blocks": [ + { + "bbox": [ + 436, + 270, + 528, + 338 + ], + "lines": [ + { + "bbox": [ + 436, + 270, + 528, + 338 + ], + "spans": [ + { + "bbox": [ + 436, + 270, + 528, + 338 + ], + "type": "image", + "image_path": "a84d0b3eab726eb969d63ec7f14b4cf156b8489ede5810f95269439a025f5c84.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 431, + 366, + 477, + 412 + ], + "blocks": [ + { + "bbox": [ + 431, + 366, + 477, + 412 + ], + "lines": [ + { + "bbox": [ + 431, + 366, + 477, + 412 + ], + "spans": [ + { + "bbox": [ + 431, + 366, + 477, + 412 + ], + "type": "image", + "image_path": "b77f0c069a70b297d343734d4179c4df607c59c0e70650ea18864234e7002424.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 479, + 378, + 527, + 401 + ], + "lines": [ + { + "bbox": [ + 479, + 378, + 527, + 401 + ], + "spans": [ + { + "bbox": [ + 479, + 378, + 527, + 401 + ], + "type": "text", + "content": "A logo for the same teapot" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "table", + "bbox": [ + 130, + 462, + 480, + 600 + ], + "blocks": [ + { + "bbox": [ + 130, + 462, + 480, + 600 + ], + "lines": [ + { + "bbox": [ + 130, + 462, + 480, + 600 + ], + "spans": [ + { + "bbox": [ + 130, + 462, + 480, + 600 + ], + "type": "table", + "html": "
MethodsDINO Score↑CLIP-I Score↑CLIP-T Score↑
Real Images (Oracle)0.7740.885-
fine-tuning
Textual Inversion (Gal et al., 2023)0.5690.7800.255
DreamBooth (Ruiz et al., 2023)0.6680.8030.305
BLIP-Diffusion (Li et al., 2023b)0.6700.8050.302
zero-shot & test time tuning free
Re-Imagen (Chen et al., 2023)0.6000.7400.270
BLIP-Diffusion (Li et al., 2023b)0.5940.7790.300
Kosmos-G (Pan et al., 2024)0.6940.8470.287
MetaQuery-B-Instruct0.7370.8520.301
", + "image_path": "44c2d287d692818c451c8e0a355531754e120c05a69103809eff8e26775d92fd.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "table_body" + } + ], + "index": 27 + }, + { + "bbox": [ + 67, + 608, + 382, + 619 + ], + "lines": [ + { + "bbox": [ + 67, + 608, + 382, + 619 + ], + "spans": [ + { + "bbox": [ + 67, + 608, + 382, + 619 + ], + "type": "text", + "content": "Table 5 Subject-driven generation results on DreamBench (Ruiz et al., 2023)." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 639, + 208, + 652 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 639, + 208, + 652 + ], + "spans": [ + { + "bbox": [ + 67, + 639, + 208, + 652 + ], + "type": "text", + "content": "5.2 Image Reconstruction" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 66, + 658, + 542, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 658, + 542, + 718 + ], + "spans": [ + { + "bbox": [ + 66, + 658, + 542, + 718 + ], + "type": "text", + "content": "We demonstrate that MetaQuery can be easily fine-tuned for image reconstruction tasks with a frozen MLLM (See Appendix C for more details). As shown in Figure 6, we compare our fine-tuned MetaQuery-B with existing diffusion autoencoders from various unified models, which reconstruct images from predicted visual features. Since these unified models are not explicitly fine-tuned for image reconstruction, their results are directly decoded from the vision encoder's output. Remarkably, even under this more constrained setup, our" + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 62, + 379, + 318 + ], + "blocks": [ + { + "bbox": [ + 63, + 62, + 379, + 318 + ], + "lines": [ + { + "bbox": [ + 63, + 62, + 379, + 318 + ], + "spans": [ + { + "bbox": [ + 63, + 62, + 379, + 318 + ], + "type": "image", + "image_path": "4073247d621f7d479da91cfa9b7a98561d024ab67b1715f504a15a138a4b5b54.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 331, + 542, + 389 + ], + "lines": [ + { + "bbox": [ + 67, + 331, + 542, + 389 + ], + "spans": [ + { + "bbox": [ + 67, + 331, + 542, + 389 + ], + "type": "text", + "content": "Figure 9 MetaQuery leverages frozen MLLMs for reasoning- and knowledge-augmented generation, overcoming the failure cases encountered in the base Sana model. * denotes that the LLM last layer embeddings of input tokens are used for image generation; the model is in L size (Qwen2.5-VL 3B). This approach can be better than the base Sana model in some cases but fails to activate in-context learning to perform knowledge-augmented generation. Some of the test cases are from MetaMorph (Tong et al., 2024) and CommonsenseT2I (Fu et al., 2024)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 385, + 62, + 539, + 315 + ], + "blocks": [ + { + "bbox": [ + 385, + 62, + 539, + 315 + ], + "lines": [ + { + "bbox": [ + 385, + 62, + 539, + 315 + ], + "spans": [ + { + "bbox": [ + 385, + 62, + 539, + 315 + ], + "type": "image", + "image_path": "cb12222fe568d3d46a55c40b1795a095b91866ad756d26fc3e7316d7bcc184af.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 407, + 544, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 407, + 544, + 445 + ], + "spans": [ + { + "bbox": [ + 67, + 407, + 544, + 445 + ], + "type": "text", + "content": "fine-tuned MetaQuery-B can still achieve competitive performance, matching the best existing open-source model Emu2 (Sun et al., 2024a). When compared with GPT-4o (OpenAI, 2025), our model also achieves comparable quality." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 458, + 168, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 458, + 168, + 472 + ], + "spans": [ + { + "bbox": [ + 67, + 458, + 168, + 472 + ], + "type": "text", + "content": "5.3 Image Editing" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 478, + 543, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 478, + 543, + 526 + ], + "spans": [ + { + "bbox": [ + 67, + 478, + 543, + 526 + ], + "type": "text", + "content": "As shown in Figure 7, we demonstrate that MetaQuery can transfer its image reconstruction capability to perform image editing. We keep the MLLM backbone frozen and fine-tune our pre-trained Base model for only 1,000 steps on publicly available image editing data. Qualitative results demonstrate that MetaQuery performs effectively in these image-editing scenarios." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 541, + 191, + 554 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 541, + 191, + 554 + ], + "spans": [ + { + "bbox": [ + 67, + 541, + 191, + 554 + ], + "type": "text", + "content": "5.4 Instruction Tuning" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 559, + 543, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 559, + 543, + 656 + ], + "spans": [ + { + "bbox": [ + 67, + 559, + 543, + 656 + ], + "type": "text", + "content": "We show that after being instruction-tuned on the proposed 2.4M dataset in Section 4, MetaQuery can achieve impressive zero-shot subject-driven generation performance, producing coherent results even with multiple highly customized subjects (the first row of Figure 8). Using various supervision signals, the instruction-tuned MetaQuery-B model surprisingly unlocks novel capabilities like visual association and logo design that go beyond copy-pasting (the second row of Figure 8). For example, in the first case, the model identifies the specific model of the input Porsche 911 car image, then correctly generates a novel front view for that model. In the second case, the model recognizes the input image of Rockefeller Center and imagines the view of New York City from the top of the Rockefeller Center." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 661, + 544, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 661, + 544, + 711 + ], + "spans": [ + { + "bbox": [ + 67, + 661, + 544, + 711 + ], + "type": "text", + "content": "We also follow DreamBooth (Ruiz et al., 2023) by adopting DINO, CLIP-I, and CLIP-T scores to quantitatively evaluate our model on the DreamBench (Ruiz et al., 2023) dataset. As shown in Table 5, our MetaQuery-B-Instruct model achieves SOTA performance, outperforming existing models like Kosmos-G (Pan et al., 2024) that are explicitly trained on constructed substitution tasks for subject-driven generation." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 310, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 310, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 310, + 752 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 75, + 61, + 537, + 304 + ], + "blocks": [ + { + "bbox": [ + 75, + 61, + 537, + 304 + ], + "lines": [ + { + "bbox": [ + 75, + 61, + 537, + 304 + ], + "spans": [ + { + "bbox": [ + 75, + 61, + 537, + 304 + ], + "type": "table", + "html": "
MethodsCulturalTimeSpaceBiologyPhysicsChemistryOverall
GPT-4o** (OpenAI, 2025)0.940.640.980.930.980.950.89
Text-to-Image Models
SD-v1-5 (Rombach et al., 2021)0.340.350.320.280.290.210.32
SD-XL (Podell et al., 2023)0.430.480.470.440.450.270.43
PixArt-Alpha (Chen et al., 2024)0.450.500.480.490.560.340.47
playground-v2.5 (Li et al., 2024b)0.490.580.550.430.480.330.49
SD-3.5-large (Esser et al., 2024)0.440.500.580.440.520.310.46
FLUX.1-dev (Labs, 2024)0.480.580.620.420.510.350.50
Unified Models
show-o-512 (Xie et al., 2024)0.280.400.480.300.460.300.35
vila-u-7b-256 (Wu et al., 2025b)0.260.330.370.350.390.230.31
Emu3 (Wang et al., 2024)0.340.450.480.410.450.270.39
Janus-1.3B (Wu et al., 2025a)0.160.260.350.280.300.140.23
JanusFlow-1.3B (Ma et al., 2025)0.130.260.280.200.190.110.18
Janus-Pro-1B (Chen et al., 2025)0.200.280.450.240.320.160.26
Janus-Pro-7B (Chen et al., 2025)0.300.370.490.360.420.260.35
MetaQuery-B0.440.490.580.410.490.340.46
MetaQuery-L0.560.570.620.480.630.420.55
MetaQuery-XL0.560.550.620.490.630.410.55
", + "image_path": "bfd2befb3965d454b9427d56799e66522902a3b508eda96ad2dd99909ec45430.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 136, + 367, + 476, + 462 + ], + "blocks": [ + { + "bbox": [ + 67, + 312, + 544, + 356 + ], + "lines": [ + { + "bbox": [ + 67, + 312, + 544, + 356 + ], + "spans": [ + { + "bbox": [ + 67, + 312, + 544, + 356 + ], + "type": "text", + "content": "Table 6 Comparison of world knowledge reasoning on WISE (Niu et al., 2025). The test cases in WISE are similar to the knowledge-augmented generation ones in Figure 9. MetaQuery achieves SOTA performance and significantly outperforms all other unified models. ** Results are evaluated by Yan et al. (2025) on a random subset of 200 out of 1000 samples." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 136, + 367, + 476, + 462 + ], + "lines": [ + { + "bbox": [ + 136, + 367, + 476, + 462 + ], + "spans": [ + { + "bbox": [ + 136, + 367, + 476, + 462 + ], + "type": "table", + "html": "
Methodsw/o Neg. Promptw/ Neg. Prompt
DALL-E 3 (Ramesh et al., 2021) w/ rewrite40.17N/A
SD-XL (Podell et al., 2023)26.0044.83
SD-3-medium (Esser et al., 2024)26.1747.17
FLUX.1-dev (Labs, 2024)24.5022.50
Sana-1.6B (Xie et al., 2025)25.1743.33
MetaQuery-B27.3351.50
MetaQuery-L28.8357.67
", + "image_path": "12319674663acc97cbdaaa2425ad0014a35fc74e9a349cf01d441b15deb73c8a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 470, + 485, + 482 + ], + "lines": [ + { + "bbox": [ + 67, + 470, + 485, + 482 + ], + "spans": [ + { + "bbox": [ + 67, + 470, + 485, + 482 + ], + "type": "text", + "content": "Table 7 Comparison of visual commonsense reasoning capability on CommonsenseT2I (Fu et al., 2024)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 502, + 354, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 502, + 354, + 514 + ], + "spans": [ + { + "bbox": [ + 67, + 502, + 354, + 514 + ], + "type": "text", + "content": "5.5 Reasoning- and Knowledge-Augmented Generation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 521, + 543, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 521, + 543, + 604 + ], + "spans": [ + { + "bbox": [ + 67, + 521, + 543, + 604 + ], + "type": "text", + "content": "We show that the learnable queries can effectively leverage capabilities of the frozen LLM. This enables the model to better understand and follow complex prompts, including those requiring real-world knowledge and reasoning. As shown in Figure 9, for the left knowledge-augmented generation cases, MetaQuery-L can leverage world knowledge from the frozen MLLM and reason through the input question to generate the correct answer. For the right commonsense knowledge cases from CommonsenseT2I (Fu et al., 2024), the LLM provides better commonsense knowledge and enables MetaQuery to generate images that are consistent with the facts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 610, + 543, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 610, + 543, + 706 + ], + "spans": [ + { + "bbox": [ + 67, + 610, + 543, + 706 + ], + "type": "text", + "content": "To quantitatively evaluate MetaQuery's world knowledge reasoning capability, we employ the WISE (Niu et al., 2025) benchmark, which contains similar test cases to the knowledge-augmented generation examples shown in Figure 9. As demonstrated in Table 6, MetaQuery achieves SOTA performance, significantly outperforming all other unified models. Notably, before our work, existing unified models struggled to effectively leverage powerful MLLMs for reasoning and knowledge-augmented generation, resulting in inferior performance compared to text-to-image models. MetaQuery stands as the first unified model to successfully transfer the advanced capabilities of frozen MLLMs to image generation and exceed the performance of SOTA text-to-image models." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 110, + 62, + 500, + 112 + ], + "blocks": [ + { + "bbox": [ + 110, + 62, + 500, + 112 + ], + "lines": [ + { + "bbox": [ + 110, + 62, + 500, + 112 + ], + "spans": [ + { + "bbox": [ + 110, + 62, + 500, + 112 + ], + "type": "table", + "html": "
LLM BackbonesMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑CommonsenseT2I ↑
Qwen2.5-3B6.200.7981.3456.00
Qwen2.5-3B-Instruct6.360.7981.1254.33
Qwen2.5-VL-3B-Instruct6.350.7881.1057.67
", + "image_path": "e30e2bc1074dc3fbe3a878833ad91c7795ce47ef394f738cf6e0ce70967c24f7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 72, + 155, + 538, + 194 + ], + "blocks": [ + { + "bbox": [ + 67, + 121, + 541, + 143 + ], + "lines": [ + { + "bbox": [ + 67, + 121, + 541, + 143 + ], + "spans": [ + { + "bbox": [ + 67, + 121, + 541, + 143 + ], + "type": "text", + "content": "Table 8 Comparison across different LLM backbones. Image generation capability is mostly orthogonal to multimodal understanding capability." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 72, + 155, + 538, + 194 + ], + "lines": [ + { + "bbox": [ + 72, + 155, + 538, + 194 + ], + "spans": [ + { + "bbox": [ + 72, + 155, + 538, + 194 + ], + "type": "table", + "html": "
MethodsMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑WiScore ↑CommonsenseT2I ↑
Ours-L w/ Last Layer Embed*6.410.7881.230.4852.83
Ours-L w/ MetaQueries6.350.7881.100.5557.67
", + "image_path": "f9420a60fcb422ba7b6a72e7f07a960eb7ba70e86790181846908582736eb2a3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 203, + 541, + 258 + ], + "lines": [ + { + "bbox": [ + 67, + 203, + 541, + 258 + ], + "spans": [ + { + "bbox": [ + 67, + 203, + 541, + 258 + ], + "type": "text", + "content": "Table 9 Comparison between MetaQuery and LLM last layer embedding. * denotes that the LLM last layer embeddings of input tokens are used for image generation. We observe comparable performance between MetaQuery and LLM last layer embedding on visual quality and prompt alignment. However, MetaQuery can activate in-context learning to perform knowledge-augmented generation, yielding much better performance on commonsense reasoning and world knowledge reasoning." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 279, + 541, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 279, + 541, + 327 + ], + "spans": [ + { + "bbox": [ + 67, + 279, + 541, + 327 + ], + "type": "text", + "content": "We also quantitatively evaluate MetaQuery's commonsense reasoning capability on the CommonsenseT2I benchmark (Fu et al., 2024) in Table 7. For simplicity, we use CLIP (Radford et al., 2021) as the evaluator following their original implementation. Results show that MetaQuery significantly improves the performance of the base Sana model, achieving SOTA performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 342, + 153, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 342, + 153, + 354 + ], + "spans": [ + { + "bbox": [ + 67, + 342, + 153, + 354 + ], + "type": "text", + "content": "5.6 Discussion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 361, + 543, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 361, + 543, + 445 + ], + "spans": [ + { + "bbox": [ + 67, + 361, + 543, + 445 + ], + "type": "text", + "content": "Comparison over different LLM backbones. As shown in Table 8, to test the impact of employing different LLM backbones for MetaQuery, we carefully select a family of backbone models: pre-trained LLM (Qwen2.5-3B), instruction-tuned LLM (Qwen2.5-3B-Instruct), and instruction-tuned MLLM (Qwen2.5-VL-3B-Instruct). Both instruction-tuned models are initialized with the first pre-trained model checkpoint. Experimental results show that instruction tuning can achieve better (multimodal) understanding capabilities. However, the improvements are orthogonal to image generation performance when employed to provide multimodal generation conditions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 459, + 542, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 459, + 542, + 591 + ], + "spans": [ + { + "bbox": [ + 67, + 459, + 542, + 591 + ], + "type": "text", + "content": "Comparison with using last layer embeddings. As shown in Table 1, our learnable queries approach achieves comparable image generation quality and prompt alignment to using the LLM's last layer embeddings of input tokens. However, the last layer embedding method essentially treats the decoder-only LLM as a text encoder, which inherently limits its in-context learning capabilities. While this approach does improve upon the base Sana model in some cases as demonstrated in Figure 9, it struggles with the knowledge-augmented generation cases shown in the same figure. These cases require the LLM to first process and answer input questions before generating corresponding images, demanding in-context learning beyond what text encoders typically provide. This performance gap is quantitatively confirmed in Table 9, where MetaQuery significantly outperforms the last layer embedding approach on both WiScore and CommonsenseT2I benchmarks. Integrated natively with the LLM, MetaQuery naturally leverages its in-context learning capabilities, enabling the model to reason through questions and generate appropriate images." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 609, + 159, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 609, + 159, + 623 + ], + "spans": [ + { + "bbox": [ + 67, + 609, + 159, + 623 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 635, + 542, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 635, + 542, + 719 + ], + "spans": [ + { + "bbox": [ + 67, + 635, + 542, + 719 + ], + "type": "text", + "content": "We presented MetaQueries, a simple interface connecting MLLMs (for understanding) and diffusion decoders (for generation), effective even when the MLLM is frozen. This approach yields state-of-the-art understanding and generation performance with straightforward implementation. By enabling transfer between modalities, MetaQueries successfully channels MLLM knowledge and reasoning into multimodal generation. While effective, we hypothesize that bridging the remaining gap to leading proprietary systems may primarily involve further data scaling. We hope MetaQueries provides a powerful, accessible baseline for future unified multimodal model development." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 64, + 139, + 77 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 64, + 139, + 77 + ], + "spans": [ + { + "bbox": [ + 69, + 64, + 139, + 77 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 89, + 542, + 696 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 69, + 89, + 542, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 89, + 542, + 122 + ], + "spans": [ + { + "bbox": [ + 69, + 89, + 542, + 122 + ], + "type": "text", + "content": "Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. In NeurIPS, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 127, + 541, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 127, + 541, + 152 + ], + "spans": [ + { + "bbox": [ + 67, + 127, + 541, + 152 + ], + "type": "text", + "content": "Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 156, + 542, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 156, + 542, + 178 + ], + "spans": [ + { + "bbox": [ + 69, + 156, + 542, + 178 + ], + "type": "text", + "content": "Tim Brooks, Aleksander Holynski, and Alexei A Efros. Instructpix2pix: Learning to follow image editing instructions. In CVPR, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 184, + 541, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 184, + 541, + 206 + ], + "spans": [ + { + "bbox": [ + 69, + 184, + 541, + 206 + ], + "type": "text", + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. In NeurIPS, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 213, + 541, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 213, + 541, + 245 + ], + "spans": [ + { + "bbox": [ + 69, + 213, + 541, + 245 + ], + "type": "text", + "content": "Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al. Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis. In ICLR, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 251, + 541, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 251, + 541, + 274 + ], + "spans": [ + { + "bbox": [ + 69, + 251, + 541, + 274 + ], + "type": "text", + "content": "Wenhu Chen, Hexiang Hu, Chitwan Sahara, and William W Cohen. Re-imagen: Retrieval-augmented text-to-image generator. In ICLR, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 279, + 541, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 279, + 541, + 311 + ], + "spans": [ + { + "bbox": [ + 69, + 279, + 541, + 311 + ], + "type": "text", + "content": "Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 318, + 533, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 318, + 533, + 329 + ], + "spans": [ + { + "bbox": [ + 69, + 318, + 533, + 329 + ], + "type": "text", + "content": "Jacob Devlin. Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 335, + 541, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 335, + 541, + 357 + ], + "spans": [ + { + "bbox": [ + 69, + 335, + 541, + 357 + ], + "type": "text", + "content": "Runpei Dong, Chunrui Han, Yuang Peng, Zekun Qi, Zheng Ge, Jinrong Yang, Liang Zhao, Jianjian Sun, Hongyu Zhou, Haoran Wei, et al. Dreamllm: Synergistic multimodal comprehension and creation. In ICLR, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 363, + 541, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 363, + 541, + 395 + ], + "spans": [ + { + "bbox": [ + 69, + 363, + 541, + 395 + ], + "type": "text", + "content": "Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In ICML, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 402, + 541, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 402, + 541, + 434 + ], + "spans": [ + { + "bbox": [ + 69, + 402, + 541, + 434 + ], + "type": "text", + "content": "Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 441, + 541, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 441, + 541, + 462 + ], + "spans": [ + { + "bbox": [ + 69, + 441, + 541, + 462 + ], + "type": "text", + "content": "Xingyu Fu, Muyu He, Yujie Lu, William Yang Wang, and Dan Roth. Commonsense-t2i challenge: Can text-to-image generation models understand commonsense? In " + }, + { + "bbox": [ + 69, + 441, + 541, + 462 + ], + "type": "inline_equation", + "content": "COLM" + }, + { + "bbox": [ + 69, + 441, + 541, + 462 + ], + "type": "text", + "content": ", 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 468, + 541, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 468, + 541, + 491 + ], + "spans": [ + { + "bbox": [ + 69, + 468, + 541, + 491 + ], + "type": "text", + "content": "Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. In ICLR, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 496, + 541, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 496, + 541, + 518 + ], + "spans": [ + { + "bbox": [ + 69, + 496, + 541, + 518 + ], + "type": "text", + "content": "Yuying Ge, Yixiao Ge, Ziyun Zeng, Xintao Wang, and Ying Shan. Planting a seed of vision in large language model. arXiv preprint arXiv:2307.08041, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 524, + 541, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 524, + 541, + 556 + ], + "spans": [ + { + "bbox": [ + 69, + 524, + 541, + 556 + ], + "type": "text", + "content": "Yuying Ge, Sijie Zhao, Jinguo Zhu, Yixiao Ge, Kun Yi, Lin Song, Chen Li, Xiaohan Ding, and Ying Shan. Seed-x: Multimodal models with unified multi-granularity comprehension and generation. arXiv preprint arXiv:2404.14396, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 563, + 541, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 563, + 541, + 585 + ], + "spans": [ + { + "bbox": [ + 69, + 563, + 541, + 585 + ], + "type": "text", + "content": "Dhruba Ghosh, Hannaneh Hajishirzi, and Ludwig Schmidt. Geneval: An object-focused framework for evaluating text-to-image alignment. In NeurIPS, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 591, + 541, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 591, + 541, + 613 + ], + "spans": [ + { + "bbox": [ + 69, + 591, + 541, + 613 + ], + "type": "text", + "content": "Google. Experiment with gemini 2.0 flash native image generation, 2025. https://developers.googleblog.com/en/experiment-with-gemini-20-flash-native-image-generation/." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 619, + 541, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 619, + 541, + 641 + ], + "spans": [ + { + "bbox": [ + 69, + 619, + 541, + 641 + ], + "type": "text", + "content": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In NeurIPS, 2017." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 647, + 541, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 647, + 541, + 669 + ], + "spans": [ + { + "bbox": [ + 69, + 647, + 541, + 669 + ], + "type": "text", + "content": "Hexiang Hu, Kelvin CK Chan, Yu-Chuan Su, Wenhu Chen, Yandong Li, Kihyuk Sohn, Yang Zhao, Xue Ben, Boqing Gong, William Cohen, et al. Instruct-imagen: Image generation with multi-modal instruction. In CVPR, 2024a." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 674, + 541, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 674, + 541, + 696 + ], + "spans": [ + { + "bbox": [ + 69, + 674, + 541, + 696 + ], + "type": "text", + "content": "Xiwei Hu, Rui Wang, Yixiao Fang, Bin Fu, Pei Cheng, and Gang Yu. Ella: Equip diffusion models with llm for enhanced semantic alignment. arXiv preprint arXiv:2403.05135, 2024b." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 64, + 542, + 705 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "type": "text", + "content": "Yang Jin, Kun Xu, Liwei Chen, Chao Liao, Jianchao Tan, Bin Chen, Chenyi Lei, An Liu, Chengru Song, Xiaoqiang Lei, et al. Unified language-vision pretraining with dynamic discrete visual tokenization. In ICLR, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 92, + 541, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 92, + 541, + 114 + ], + "spans": [ + { + "bbox": [ + 68, + 92, + 541, + 114 + ], + "type": "text", + "content": "Jing Yu Koh, Daniel Fried, and Ruslan Salakhutdinov. Generating images with multimodal language models. In NeurIPS, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 121, + 202, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 121, + 202, + 132 + ], + "spans": [ + { + "bbox": [ + 69, + 121, + 202, + 132 + ], + "type": "text", + "content": "Black Forest Labs. Flux.1, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 137, + 541, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 137, + 541, + 159 + ], + "spans": [ + { + "bbox": [ + 69, + 137, + 541, + 159 + ], + "type": "text", + "content": "Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024a." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 165, + 541, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 165, + 541, + 187 + ], + "spans": [ + { + "bbox": [ + 69, + 165, + 541, + 187 + ], + "type": "text", + "content": "Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023a." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 193, + 541, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 193, + 541, + 216 + ], + "spans": [ + { + "bbox": [ + 69, + 193, + 541, + 216 + ], + "type": "text", + "content": "Daiqing Li, Aleks Kamko, Ehsan Akhgari, Ali Sabet, Linmiao Xu, and Suhail Doshi. Playground v2. 5: Three insights towards enhancing aesthetic quality in text-to-image generation. arXiv preprint arXiv:2402.17245, 2024b." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 220, + 541, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 220, + 541, + 243 + ], + "spans": [ + { + "bbox": [ + 69, + 220, + 541, + 243 + ], + "type": "text", + "content": "Dongxu Li, Junnan Li, and Steven CH Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing. In NeurIPS, 2023b." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 249, + 541, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 249, + 541, + 271 + ], + "spans": [ + { + "bbox": [ + 69, + 249, + 541, + 271 + ], + "type": "text", + "content": "Hao Liu, Wilson Yan, Matei Zaharia, and Pieter Abbeel. World model on million-length video and language with ringattention. arXiv preprint arXiv:2402.08268, 2024a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 277, + 509, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 277, + 509, + 289 + ], + "spans": [ + { + "bbox": [ + 69, + 277, + 509, + 289 + ], + "type": "text", + "content": "Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2024b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 293, + 542, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 293, + 542, + 326 + ], + "spans": [ + { + "bbox": [ + 69, + 293, + 542, + 326 + ], + "type": "text", + "content": "Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? arXiv preprint arXiv:2307.06281, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 332, + 541, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 332, + 541, + 355 + ], + "spans": [ + { + "bbox": [ + 69, + 332, + 541, + 355 + ], + "type": "text", + "content": "Bingqi Ma, Zhuofan Zong, Guanglu Song, Hongsheng Li, and Yu Liu. Exploring the role of large language models in prompt encoding for diffusion models. In NeurIPS, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 361, + 541, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 361, + 541, + 393 + ], + "spans": [ + { + "bbox": [ + 69, + 361, + 541, + 393 + ], + "type": "text", + "content": "Yiyang Ma, Xingchao Liu, Xiaokang Chen, Wen Liu, Chengyue Wu, Zhiyu Wu, Zizheng Pan, Zhenda Xie, Haowei Zhang, Liang Zhao, et al. Janusflow: Harmonizing autoregression and rectified flow for unified multimodal understanding and generation. In CVPR, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 399, + 541, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 399, + 541, + 431 + ], + "spans": [ + { + "bbox": [ + 69, + 399, + 541, + 431 + ], + "type": "text", + "content": "Yuwei Niu, Munan Ning, Mengren Zheng, Bin Lin, Peng Jin, Jiaqi Liao, Kunpeng Ning, Bin Zhu, and Li Yuan. Wise: A world knowledge-informed semantic evaluation for text-to-image generation. arXiv preprint arXiv:2503.07265, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 438, + 523, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 438, + 523, + 450 + ], + "spans": [ + { + "bbox": [ + 69, + 438, + 523, + 450 + ], + "type": "text", + "content": "OpenAI. Introducing 4o image generation, 2025. https://openai.com/index/introducing-4o-image-generation/." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 455, + 541, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 455, + 541, + 478 + ], + "spans": [ + { + "bbox": [ + 69, + 455, + 541, + 478 + ], + "type": "text", + "content": "Xichen Pan, Li Dong, Shaohan Huang, Zhiliang Peng, Wenhu Chen, and Furu Wei. Kosmos-g: Generating images in context with multimodal large language models. In ICLR, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 483, + 541, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 483, + 541, + 515 + ], + "spans": [ + { + "bbox": [ + 69, + 483, + 541, + 515 + ], + "type": "text", + "content": "Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 521, + 541, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 521, + 541, + 555 + ], + "spans": [ + { + "bbox": [ + 69, + 521, + 541, + 555 + ], + "type": "text", + "content": "Adam Polyak, Amit Zohar, Andrew Brown, Andros Tjandra, Animesh Sinha, Ann Lee, Apoorv Vyas, Bowen Shi, Chih-Yao Ma, Ching-Yao Chuang, et al. Movie gen: A cast of media foundation models. arXiv preprint arXiv:2410.13720, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 561, + 541, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 561, + 541, + 594 + ], + "spans": [ + { + "bbox": [ + 69, + 561, + 541, + 594 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 599, + 541, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 599, + 541, + 622 + ], + "spans": [ + { + "bbox": [ + 69, + 599, + 541, + 622 + ], + "type": "text", + "content": "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In ICML, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 628, + 541, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 628, + 541, + 650 + ], + "spans": [ + { + "bbox": [ + 69, + 628, + 541, + 650 + ], + "type": "text", + "content": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, 2021." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 655, + 541, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 655, + 541, + 677 + ], + "spans": [ + { + "bbox": [ + 69, + 655, + 541, + 677 + ], + "type": "text", + "content": "Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In CVPR, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 683, + 541, + 705 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 683, + 541, + 705 + ], + "spans": [ + { + "bbox": [ + 69, + 683, + 541, + 705 + ], + "type": "text", + "content": "Weijia Shi, Xiaochuang Han, Chunting Zhou, Weixin Liang, Xi Victoria Lin, Luke Zettlemoyer, and Lili Yu. Llamafusion: Adapting pretrained language models for multimodal generation. arXiv preprint arXiv:2412.15188, 2024." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 64, + 543, + 693 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "spans": [ + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "type": "text", + "content": "Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In " + }, + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "type": "inline_equation", + "content": "CVPR" + }, + { + "bbox": [ + 69, + 64, + 541, + 87 + ], + "type": "text", + "content": ", 2024a." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 91, + 541, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 91, + 541, + 116 + ], + "spans": [ + { + "bbox": [ + 67, + 91, + 541, + 116 + ], + "type": "text", + "content": "Quan Sun, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, Yueze Wang, Hongcheng Gao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative pretraining in multimodality. In ICLR, 2024b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 120, + 542, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 120, + 542, + 133 + ], + "spans": [ + { + "bbox": [ + 69, + 120, + 542, + 133 + ], + "type": "text", + "content": "Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 137, + 318, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 137, + 318, + 149 + ], + "spans": [ + { + "bbox": [ + 69, + 137, + 318, + 149 + ], + "type": "text", + "content": "Qwen Team. Qwen2.5: A party of foundation models, 2024b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 154, + 543, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 154, + 543, + 188 + ], + "spans": [ + { + "bbox": [ + 69, + 154, + 543, + 188 + ], + "type": "text", + "content": "Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 193, + 542, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 193, + 542, + 226 + ], + "spans": [ + { + "bbox": [ + 69, + 193, + 542, + 226 + ], + "type": "text", + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 232, + 542, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 232, + 542, + 255 + ], + "spans": [ + { + "bbox": [ + 69, + 232, + 542, + 255 + ], + "type": "text", + "content": "Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 259, + 542, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 259, + 542, + 293 + ], + "spans": [ + { + "bbox": [ + 69, + 259, + 542, + 293 + ], + "type": "text", + "content": "Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, et al. Janus: Decoupling visual encoding for unified multimodal understanding and generation. In CVPR, 2025a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 298, + 542, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 298, + 542, + 321 + ], + "spans": [ + { + "bbox": [ + 69, + 298, + 542, + 321 + ], + "type": "text", + "content": "Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng Chua. Next-gpt: Any-to-any multimodal llm. arXiv preprint arXiv:2309.05519, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 327, + 542, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 327, + 542, + 359 + ], + "spans": [ + { + "bbox": [ + 69, + 327, + 542, + 359 + ], + "type": "text", + "content": "Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. In ICLR, 2025b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 365, + 541, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 365, + 541, + 388 + ], + "spans": [ + { + "bbox": [ + 69, + 365, + 541, + 388 + ], + "type": "text", + "content": "Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Shuting Wang, Tiejun Huang, and Zheng Liu. Omnigen: Unified image generation. In CVPR, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 393, + 541, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 393, + 541, + 417 + ], + "spans": [ + { + "bbox": [ + 69, + 393, + 541, + 417 + ], + "type": "text", + "content": "Enze Xie, Junsong Chen, Junyu Chen, Han Cai, Haotian Tang, Yujun Lin, Zhekai Zhang, Muyang Li, Ligeng Zhu, Yao Lu, et al. Sana: Efficient high-resolution image synthesis with linear diffusion transformers. In ICLR, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 421, + 541, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 421, + 541, + 455 + ], + "spans": [ + { + "bbox": [ + 69, + 421, + 541, + 455 + ], + "type": "text", + "content": "Jinheng Xie, Weijia Mao, Zechen Bai, David Junhao Zhang, Weihao Wang, Kevin Qinghong Lin, Yuchao Gu, Zhijie Chen, Zhenheng Yang, and Mike Zheng Shou. Show-o: One single transformer to unify multimodal understanding and generation. arXiv preprint arXiv:2408.12528, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 460, + 541, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 460, + 541, + 493 + ], + "spans": [ + { + "bbox": [ + 69, + 460, + 541, + 493 + ], + "type": "text", + "content": "Zhiyuan Yan, Junyan Ye, Weijia Li, Zilong Huang, Shenghai Yuan, Xiangyang He, Kaiqing Lin, Jun He, Conghui He, and Li Yuan. Gpt-imgeval: A comprehensive benchmark for diagnosing gpt4o in image generation. arXiv preprint arXiv:2504.02782, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 498, + 541, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 498, + 541, + 532 + ], + "spans": [ + { + "bbox": [ + 69, + 498, + 541, + 532 + ], + "type": "text", + "content": "Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. In TMLR, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 537, + 541, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 537, + 541, + 561 + ], + "spans": [ + { + "bbox": [ + 69, + 537, + 541, + 561 + ], + "type": "text", + "content": "Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. \nMm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 566, + 541, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 566, + 541, + 599 + ], + "spans": [ + { + "bbox": [ + 69, + 566, + 541, + 599 + ], + "type": "text", + "content": "Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In CVPR, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 605, + 541, + 627 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 605, + 541, + 627 + ], + "spans": [ + { + "bbox": [ + 69, + 605, + 541, + 627 + ], + "type": "text", + "content": "Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 632, + 541, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 632, + 541, + 655 + ], + "spans": [ + { + "bbox": [ + 69, + 632, + 541, + 655 + ], + "type": "text", + "content": "Kai Zhang, Yi Luan, Hexiang Hu, Kenton Lee, Siyuan Qiao, Wenhu Chen, Yu Su, and Ming-Wei Chang. Magiclens: Self-supervised image retrieval with open-ended instructions. In ICML, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 660, + 541, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 660, + 541, + 693 + ], + "spans": [ + { + "bbox": [ + 69, + 660, + 541, + 693 + ], + "type": "text", + "content": "Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. In ICLR, 2025." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 752 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 64, + 542, + 137 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 69, + 64, + 542, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 64, + 542, + 100 + ], + "spans": [ + { + "bbox": [ + 69, + 64, + 542, + 100 + ], + "type": "text", + "content": "Wanrong Zhu, Jack Hessel, Anas Awadalla, Samir Yitzhak Gadre, Jesse Dodge, Alex Fang, Youngjae Yu, Ludwig Schmidt, William Yang Wang, and Yejin Choi. Multimodal C4: An open, billion-scale corpus of images interleaved with text. In NeurIPS, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 103, + 542, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 103, + 542, + 137 + ], + "spans": [ + { + "bbox": [ + 69, + 103, + 542, + 137 + ], + "type": "text", + "content": "Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Lirui Zhao, Fu-Yun Wang, Zhanyu Ma, et al. Lumina next: Making lumina-t2x stronger and faster with next-dit. arXiv preprint arXiv:2406.18583, 2024." + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 742, + 311, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 742, + 311, + 751 + ], + "spans": [ + { + "bbox": [ + 300, + 742, + 311, + 751 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 60, + 153, + 80 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 60, + 153, + 80 + ], + "spans": [ + { + "bbox": [ + 67, + 60, + 153, + 80 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 98, + 222, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 98, + 222, + 110 + ], + "spans": [ + { + "bbox": [ + 67, + 98, + 222, + 110 + ], + "type": "text", + "content": "A Data Curation Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 124, + 541, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 124, + 541, + 148 + ], + "spans": [ + { + "bbox": [ + 67, + 124, + 541, + 148 + ], + "type": "text", + "content": "For the data curation part, we use Qwen/Qwen2-VL-7B-Instruct² as our MLLM, The system prompt we are using is:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 154, + 525, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 154, + 525, + 178 + ], + "spans": [ + { + "bbox": [ + 83, + 154, + 525, + 178 + ], + "type": "text", + "content": "Based on the provided of one or multiple source images, one target image, and their captions, create an interesting text prompt that can be used with the source images to generate the target image." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 83, + 178, + 209, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 178, + 209, + 190 + ], + "spans": [ + { + "bbox": [ + 83, + 178, + 209, + 190 + ], + "type": "text", + "content": "This prompt should include:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 99, + 190, + 525, + 226 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 99, + 190, + 525, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 190, + 525, + 213 + ], + "spans": [ + { + "bbox": [ + 99, + 190, + 525, + 213 + ], + "type": "text", + "content": "- one general and unspecific similarity shared with the source images (same jersey top, similar axe, similar building, etc)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 99, + 215, + 309, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 215, + 309, + 226 + ], + "spans": [ + { + "bbox": [ + 99, + 215, + 309, + 226 + ], + "type": "text", + "content": "- all differences that only the target image has." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 227, + 235, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 227, + 235, + 238 + ], + "spans": [ + { + "bbox": [ + 83, + 227, + 235, + 238 + ], + "type": "text", + "content": "This prompt should NOT include:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 99, + 239, + 525, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 239, + 525, + 262 + ], + "spans": [ + { + "bbox": [ + 99, + 239, + 525, + 262 + ], + "type": "text", + "content": "- any specific details that would allow generating the target image independently without referencing the source images." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 262, + 525, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 262, + 525, + 286 + ], + "spans": [ + { + "bbox": [ + 83, + 262, + 525, + 286 + ], + "type": "text", + "content": "Remember the prompt should be concise and short. The generation has to be done by combining the source images and text prompts." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 299, + 541, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 299, + 541, + 331 + ], + "spans": [ + { + "bbox": [ + 67, + 299, + 541, + 331 + ], + "type": "text", + "content": "B Qualitative Comparison with SOTA Open-Source Model on Text-to-Image Generation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 343, + 541, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 343, + 541, + 379 + ], + "spans": [ + { + "bbox": [ + 67, + 343, + 541, + 379 + ], + "type": "text", + "content": "We provide a qualitative comparison with Janus-Pro-7B (Chen et al., 2025) on MJHQ-30K (Li et al., 2024b) in Figure 10. We can see that MetaQuery-XL follows the prompt better and generates more visually appealing images than Janus-Pro-7B." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 398, + 208, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 398, + 208, + 414 + ], + "spans": [ + { + "bbox": [ + 67, + 398, + 208, + 414 + ], + "type": "text", + "content": "C Training Objectives" + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 126, + 429, + 484, + 479 + ], + "blocks": [ + { + "bbox": [ + 126, + 429, + 484, + 479 + ], + "lines": [ + { + "bbox": [ + 126, + 429, + 484, + 479 + ], + "spans": [ + { + "bbox": [ + 126, + 429, + 484, + 479 + ], + "type": "table", + "html": "
ObjectiveRel. Wall TimeMJHQ-30K FID ↓GenEval ↑DPG-Bench ↑
Text-to-Image1.0x7.430.5675.35
Image Reconstruction2.79x27.420.3268.36
Mix2.61x8.270.5476.53
", + "image_path": "9f32362161c9b5ac0fc53f7dbadac96f6648ff2c8824c55b33ce850f06278d39.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 488, + 541, + 512 + ], + "lines": [ + { + "bbox": [ + 67, + 488, + 541, + 512 + ], + "spans": [ + { + "bbox": [ + 67, + 488, + 541, + 512 + ], + "type": "text", + "content": "Table 10 Study on training objectives. Image reconstruction objective can be mixed with text-to-image objective to enable image reconstruction capabilities without harming visual quality and prompt alignment." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 522, + 541, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 522, + 541, + 581 + ], + "spans": [ + { + "bbox": [ + 67, + 522, + 541, + 581 + ], + "type": "text", + "content": "We are using an MLLM for multimodal perception, besides the standard text-to-image objective, we can also use an image reconstruction objective to achieve alignment. In Table 10, we show that training with the text-to-image objective achieves much better performance than the image reconstruction objective. We demonstrate that a mix of both objectives can enable image reconstruction capabilities without being generally harmful to the T2I performance." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 79, + 712, + 285, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 712, + 285, + 723 + ], + "spans": [ + { + "bbox": [ + 79, + 712, + 285, + 723 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 79, + 712, + 285, + 723 + ], + "type": "text", + "content": "https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 310, + 751 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 110, + 541, + 648 + ], + "blocks": [ + { + "bbox": [ + 62, + 110, + 541, + 648 + ], + "lines": [ + { + "bbox": [ + 62, + 110, + 541, + 648 + ], + "spans": [ + { + "bbox": [ + 62, + 110, + 541, + 648 + ], + "type": "image", + "image_path": "14bedde3fdc5db9a2502133501221e3d482ecd91d5259320c8bb8efeadfee1fd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 660, + 499, + 672 + ], + "lines": [ + { + "bbox": [ + 67, + 660, + 499, + 672 + ], + "spans": [ + { + "bbox": [ + 67, + 660, + 499, + 672 + ], + "type": "text", + "content": "Figure 10 Qualitative comparison with Janus-Pro-7B (Chen et al., 2025) on MJHQ-30K (Li et al., 2024b)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 742, + 311, + 752 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 742, + 311, + 752 + ], + "spans": [ + { + "bbox": [ + 300, + 742, + 311, + 752 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_content_list.json b/data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..01257aa18be7387797cc0867b47846f59c9e489d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_content_list.json @@ -0,0 +1,4660 @@ +[ + { + "type": "text", + "text": "Hogwild! Inference: Parallel LLM Generation via Concurrent Attention", + "text_level": 1, + "bbox": [ + 187, + 122, + 810, + 171 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/2a5c487c953a0f66fdfd07ab66310d6ead70ff7461466ea60acdc372467058a1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Gleb Rodionov†* YandexRoman Garipov* HSE University YandexAlina Shutova* HSE University YandexGeorge Yakushev* HSE University YandexErik Schultheis* IST Austria
Vage Egiazarian IST AustriaAnton Sinitsin YandexDenis Kuznedev YandexDan Alistarh‡ IST Austria
", + "bbox": [ + 174, + 218, + 818, + 311 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 338, + 537, + 354 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large Language Models (LLMs) have demonstrated the ability to tackle increasingly complex tasks through advanced reasoning, long-form content generation, and tool use. Solving these tasks often involves long inference-time computations. In human problem solving, a common strategy to expedite work is collaboration: by dividing the problem into sub-tasks, exploring different strategies concurrently, etc. Recent research has shown that LLMs can also operate in parallel by implementing explicit cooperation frameworks, such as voting mechanisms or the explicit creation of independent sub-tasks that can be executed in parallel. However, each of these frameworks may not be suitable for all types of tasks, which can hinder their applicability. In this work, we propose a different design approach: we run LLM \"workers\" in parallel, allowing them to synchronize via a concurrently-updated attention cache and prompt these workers to decide how best to collaborate. Our approach allows the LLM instances to come up with their own collaboration strategy for the problem at hand, all the while \"seeing\" each other's memory in the concurrent KV cache. We implement this approach via Hogwild! Inference: a parallel LLM inference engine where multiple instances of the same LLM run in parallel with the same attention cache, with \"instant\" access to each other's memory.1 Hogwild! Inference takes advantage of Rotary Position Embeddings (RoPE) to avoid recomputation while improving parallel hardware utilization. We find that modern reasoning-capable LLMs can perform inference with shared Key-Value cache out of the box, without additional fine-tuning.", + "bbox": [ + 228, + 368, + 769, + 660 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 678, + 313, + 694 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Many recent advancements of Large Language Models can be attributed to their ability to perform inference-time computations to improve performance [Suzgun et al., 2022, Snell et al., 2024, Beeching et al., Muennighoff et al., 2025]. This includes chain-of-thought (CoT) reasoning [Wei et al., 2022, Kojima et al., 2022, Zhang et al., 2022, Yao et al., 2023, Lightman et al., 2023], long-form generation [Bai et al., 2024] and interacting with external tools [Schick et al., 2023, Qin et al., 2023, Yao et al., 2022, Shen et al., 2023]. Popular LLM-based services have capabilities for reasoning and tool use [OpenAI et al., 2024, Google DeepMind, 2025, Anthropic, 2024]. At the same time, several reasoning-capable open-access LLMs have recently been released to the public [DeepSeek-AI et al., 2025, Qwen Team, 2025, Yang et al., 2024, Muennighoff et al., 2025, Ye et al., 2025].", + "bbox": [ + 169, + 702, + 826, + 829 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Using these models to solve complex problems often requires long sequential computations, that is, generating text token-by-token. However, many reasoning problems are not sequential. Leveraging this intuition, several recent works propose parallel inference strategies that allow multiple LLMs", + "bbox": [ + 169, + 833, + 826, + 878 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06261v4 [cs.LG] 17 Nov 2025", + "bbox": [ + 21, + 273, + 55, + 720 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1Our implementation is available at https://github.com/eqimp/hogwild_11m.", + "bbox": [ + 189, + 885, + 691, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "†Corresponding author: rodionovgleb@yandex-team.ru. * Equal contribution. ‡ Senior author.", + "bbox": [ + 192, + 898, + 800, + 912 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "39th Conference on Neural Information Processing Systems (NeurIPS 2025).", + "bbox": [ + 169, + 922, + 629, + 936 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8cfb2a49dca00b94622db5fcca1ae9ead62991a85ae67c1c9b2929a26da82f88.jpg", + "image_caption": [ + "Figure 1: An intuitive explanation of Hogwild! Inference, with 2 workers generating in parallel and 3 shared cache blocks. Each color denotes a cache block. See it in action (example generation)." + ], + "image_footnote": [], + "bbox": [ + 222, + 40, + 781, + 265 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to solve a problem faster or more accurately via some form of collaboration [Wang et al., 2022, Ning et al., 2024]. In the simplest case, multiple LLMs can attempt the problem independently, then vote [Wang et al., 2022] or cross-reference their results [Du et al., 2023, Wang et al., 2024a] to improve correctness. A parallel line of work allows the LLM to divide the problem into multiple independent sub-tasks that are then solved in parallel and merged, producing the final solution [Ning et al., 2024, Kim et al., 2024, Jin et al., 2025]. These parallel inference strategies can improve quality and efficiency, taking advantage of parallelism in modern hardware.", + "bbox": [ + 169, + 308, + 826, + 407 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Unfortunately, no single collaboration strategy is universally effective. For instance, solving a problem in independent parallel \"threads\" can be inefficient when one of the threads requires a longer generation than the rest, resulting in most of the agents waiting for a straggler and wasting compute [Wang et al., 2022, 2024a]. In turn, inference with independent sub-tasks only works if the problem can immediately be split into these sub-tasks. Furthermore, if one of the agents discovers that the original plan is flawed, they will be unable to re-plan [Ning et al., 2024, Ding et al., 2025], potentially solving sub-tasks that are no longer necessary [Jin et al., 2025].", + "bbox": [ + 169, + 412, + 823, + 511 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This runs contrary to how humans collaborate. Instead of strict adherence to a fixed collaboration strategy, we often collaborate more dynamically, re-planning on the fly, abandoning some tasks half-way and switching to a more promising approach, discussing or debating strategy if the initial plan failed. While this type of collaboration is harder to define, it offers greater flexibility and can be more efficient if the participants are sufficiently cohesive [Hutchins, 1995, Entin and Serfaty, 1999].", + "bbox": [ + 169, + 515, + 826, + 585 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our Approach. In this work, we try to apply the same principle to artificial reasoners. Since modern LLMs can already reason and plan [Zhou et al., 2024, Gao et al., 2024, Wang et al., 2024c], we hypothesize that they can benefit from dynamic interaction between different instances, during which they can develop their own collaboration strategy for the problem at hand.", + "bbox": [ + 169, + 590, + 823, + 648 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To test this hypothesis, we propose Hogwild! Inference — a parallel LLM inference protocol with no pre-defined framework for collaboration. Instead of choosing how LLMs should interact ahead of time, we allow them to generate tokens in parallel and \"see\" each other's progress (tokens) immediately as they are generated. We then prompt the LLM \"workers\" to decide their next course of action by themselves, given the latest actions from others: whether this means solving parallel sub-tasks, cross-verifying each other, discussing strategy, or pivoting to a new plan.", + "bbox": [ + 169, + 651, + 823, + 736 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To enable this type of on-the-fly collaboration, Hogwild! Inference runs multiple LLM instances with the same weights, but with a custom Key-Value cache that shares token representations between workers, allowing concurrent cross-attention. Specifically, instead of re-computing Key-Value representations for each worker, we keep track of individual worker KV memories and \"stitch them together\" in different orders, by adjusting their positional embeddings (see Figure 1). Moreover, we provide an efficient implementation of this inference approach.", + "bbox": [ + 169, + 738, + 823, + 823 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We test Hogwild! Inference with modern open-source LLMs and find that existing reasoning-capable models—such as QwQ [Qwen Team, 2025] and DeepSeek-R1 [DeepSeek-AI et al., 2025]—can already \"reason to coordinate\". More concretely, we observe that concurrent agents can formulate and follow plans, adapt when the initial plan has failed, point out each other's errors, and use each other's", + "bbox": [ + 169, + 827, + 823, + 885 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "$^{2}$ Our approach inspired by Hogwild! SGD [Recht et al., 2011] that runs updates asynchronously and applies each update as soon as it is computed. The exclamation mark is part of the original name [Stanford HAI, 2023].", + "bbox": [ + 169, + 893, + 823, + 922 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "key observations. When prompted to check if they are doing redundant work – e.g., when one LLM instance is doing a sub-task that is already done by another, or solving a problem that is no longer relevant — they can often (but not always) detect redundancy and change strategy. In summary, our results suggest that parallel inference with a shared Key-Value cache may offer a promising approach to enable effective and efficient collaboration between multiple LLM instances.", + "bbox": [ + 169, + 90, + 823, + 161 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Background", + "text_level": 1, + "bbox": [ + 171, + 172, + 308, + 188 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recent works propose a large number of frameworks for parallel reasoning and tool use that vary across several axes: how the parallel instances are organized together, what they exchange, and how often [Zhang et al., 2025]. In this section, we give a brief summary of these methods.", + "bbox": [ + 169, + 194, + 823, + 236 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Discussion & aggregation. The simplest way to parallelize chain-of-thought reasoning is Self-Consistency [Wang et al., 2022], where multiple LLM instances reason independently, then vote on the final answer. This approach was later extended in Du et al. [2023], replacing majority voting with text-based communication rounds. Subsequent works in this field combine multiple LLM types [Wang et al., 2024a] and scales to more agents Li et al. [2024a]. Another line of work introduces specialized \"roles\" such as the Debugger [Talebirad and Nadiri, 2023], Examiner [Cohen et al., 2023], Math Teacher [Kong et al., 2024], Judge [Chen et al., 2024], and others, to further augment reasoning.", + "bbox": [ + 169, + 242, + 826, + 340 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This type of role-based discussion was shown to greatly improve LLM reasoning factuality for certain tasks [Wang et al., 2022, Du et al., 2023], and can even enable multiple weaker LLM agents to collectively outperform state-of-the-art single-agent systems [Wang et al., 2024a]. However, this improvement is not unique to multiple agents and can be offset with better single-agent prompting [Wang et al., 2024b, Muennighoff et al., 2025]. Additionally, these approaches do not necessarily accelerate reasoning, because at least some of the agents have to solve the entire problem sequentially, and process (re-encode) each other's progress. This creates additional computational overhead, which presents challenges for both runtime and memory efficiency Wang et al. [2024a], Du et al. [2023].", + "bbox": [ + 169, + 345, + 826, + 458 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Parallelism for efficiency. A different line of work leverages multiple LLMs to solve tasks faster in parallel, such as Skeleton-of-Thought (SoT) [Ning et al., 2024]. SoT begins by running a single LLM to outline a plan for solving the problem with independent sub-tasks, then launches parallel LLM instances for each sub-task. For problems that involve function calling, these functions can also run in parallel [Kim et al., 2024, Gim et al., 2024]. Subsequent works propose more complex parallelism strategies such as dynamic parallel tree search [Ding et al., 2025] or a single agent spawning asynchronous sub-tasks that are done by background LLM \"threads\" [Jin et al., 2025, Liu et al., 2024b, Pan et al., 2025], achieved with specialized fine-tuning.", + "bbox": [ + 169, + 462, + 826, + 575 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "These techniques are known to substantially accelerate inference for problems that fit their type of parallelism. However, we argue that this is also their main limitation: by imposing a specific parallelism strategy, these methods can harm reasoning for problems that do not fit their framework. For instance, when solving a complex reasoning problem, it is often the case that the initial plan turns out to be wrong or incomplete [Muennighoff et al., 2025, DeepSeek-AI et al., 2025], which conflicts with SoT-like methods [Ning et al., 2024, Yu, 2025] that follow a fixed plan-execute-aggregate schedule. Furthermore, some of the sub-tasks may turn out to be more complicated than originally intended and take up more work, which would cause methods like PASTA Jin et al. [2025] to wait for that single task, whereas a more sophisticated reasoner could adjust the plan to work better in parallel. Note that each individual issue can be amended with yet another, more complicated parallelism framework, but the sheer number of such cases makes us doubt whether this is the right approach. In this work, we instead let multiple LLM instances interact without a fixed framework, allowing them to see each other's partial generations to devise (and revise) task-specific collaboration strategy. We show that, perhaps surprisingly, existing reasoning LLMs already have the ability to leverage this.", + "bbox": [ + 169, + 580, + 826, + 776 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Hogwild! Inference", + "text_level": 1, + "bbox": [ + 171, + 784, + 370, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our main intuition is that modern LLMs do not need a pre-defined framework for inference-time parallelism: they can organize by themselves. To test this hypothesis, we design a parallel inference protocol where multiple LLM instances can collaborate as flexibly as possible. Instead of assigning each \"worker\" to a specific role or sub-task, we run them together and prompt them to collaborate. This approach has two key problems: how to run multiple inference threads from the same Key-Value memory, and how to prompt LLM \"workers\" to collaborate over said memory. We outline how to perform LLM inference with a shared cache in Section 3.1, describe our cache structure in Section 3.2 and prompting strategy in Section 3.3. Finally, Section 3.4 describes the inference algorithm.", + "bbox": [ + 169, + 806, + 828, + 917 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Concurrent Attention with Shared Key-Value Cache", + "text_level": 1, + "bbox": [ + 171, + 90, + 578, + 104 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The core ingredient of Hogwild! Inference is a shared Key-Value memory (KV cache) accessible to all workers. The cache consists of several blocks that can be reused between workers, implementing a concurrent version of the attention mechanism [Bahdanau et al., 2015, Vaswani, 2017].", + "bbox": [ + 169, + 108, + 823, + 148 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Let us first consider a simple case with two workers and three cache blocks, as depicted in Figure 1. The first block contains the prompt, and the other two blocks contain the tokens generated by workers A and B respectively (denoted Alice and Bob in the Figure). As workers generate new tokens, they access each other's attention caches as though these were their own previously generated tokens. In Figure 1, \"Alice\" sees the common prompt, then \"Bob's\" token representations, then her own. In turn, Bob sees the same common prompt, then Alice's token KVs, and his own tokens after that.3", + "bbox": [ + 169, + 156, + 826, + 239 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This creates a discrepancy where the same Key-Value pairs appear at different positions for each worker. Furthermore, the relative distance between the same pair of tokens (e.g., first generated tokens from Alice and Bob, respectively) changes as new tokens are added. While it is possible to re-encode these tokens at their new positions, it would cause overhead that scales cubically4.", + "bbox": [ + 169, + 244, + 823, + 301 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Instead of re-encoding the new tokens for other workers, we attempt to reuse existing token representations between workers. However, since these tokens appear at different positions for each worker and step, we need to adjust for their positional embeddings. Most modern LLMs use Rotary Position Embeddings (RoPE) [Su et al., 2021], where each key and query is rotated to an angle proportional to its absolute position. Prior works have shown that RoPE embeddings can be manipulated through scaling [Peng et al., 2023] slicing [Xiao et al., 2024], or pruning [Zhang et al., 2023].", + "bbox": [ + 169, + 306, + 826, + 391 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In Hogwild! Inference, we instead shift the KV values, multiplying the entire cache block by a cos / sin values that implement rotation by a constant offset. We use this to arrange the same cache entries in different order for each worker as in Figure 1 (right). This allows both workers to instantly \"see\" each other's tokens while they are generated — and even before they are processed by all layers.", + "bbox": [ + 169, + 396, + 828, + 454 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Cache Structure", + "text_level": 1, + "bbox": [ + 171, + 460, + 328, + 472 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Now that we defined a way to rearrange cache blocks on the fly, it is reasonable to ask how to arrange these blocks. For short tasks, simply concatenating worker outputs is sufficient. However, as we consider harder problems that require long chains of thought, workers will eventually pay less attention to each other because of the thousands of tokens between their latest steps5.", + "bbox": [ + 169, + 476, + 823, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To address this problem, we propose a more sophisticated cache arrangement inspired by group chat rooms. Namely, we split the generated text into reasoning \"steps\", roughly a paragraph in size. Whenever a given worker finishes a paragraph, (e.g. generates $\\backslash n\\backslash n$ ), we move its KV cache to the end of a shared chat-like history and let it generate the next paragraph at the end of that history. Note that workers still see each other's current (unfinished) paragraphs at the end of the shared history as they write them (see Figure 1). This way, workers always see each other's latest updates as recent tokens and can communicate more easily. For each worker $W_{i}$ , we organize cache blocks as follows:", + "bbox": [ + 169, + 537, + 826, + 636 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Common Cache: a large KV cache block that stores KV representations for the system prompt, task description, and a history of previous reasoning steps from each agent.", + "- Other workers: multiple smaller cache blocks containing the latest (unfinished) steps of all other workers $W_{j \\neq i}$ in ascending order. For instance, if there are 4 workers, $W_{2}$ will see $W_{1} \\oplus W_{3} \\oplus W_{4}$ .", + "- Current worker: the latest (unfinished) reasoning step of the current worker $W_{i}$ to be continued. Each block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\n" + ], + "bbox": [ + 169, + 638, + 826, + 784 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Prompting for Zero-Shot Collaboration", + "text_level": 1, + "bbox": [ + 171, + 787, + 488, + 803 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The shared key-value cache inference we described above allows modern LLMs to access each other's tokens and reason collaboratively. However, even though modern LLMs can reason about", + "bbox": [ + 169, + 806, + 823, + 835 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "3For clarity of exposition, we choose to anthropomorphize the pronouns for these two LLM instances.", + "bbox": [ + 192, + 844, + 795, + 858 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "4If $n$ agents generate one new token each, which is then re-encoded differently for each of these $n$ agents, that each have to attend to $O(n)$ additional tokens, then the total step complexity is $O(n^{3})$ .", + "bbox": [ + 169, + 858, + 825, + 883 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "In other words, if we put all outputs of worker A ahead of worker B, then the more tokens are generated, the farther worker B needs to \"look\" to reach worker A's latest outputs. This could be mitigated with finetuning.", + "bbox": [ + 169, + 883, + 823, + 910 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "how to collaborate, there is no guarantee that they will actually do so unprompted. As with any desired LLM behavior, it can be achieved in two ways: either by training the model to generate tokens collaboratively or by prompting it in-context. In this work, we focus on the latter approach to make Hogwild! Inference easier to generalize for new models. Our prompting consists of two parts:", + "bbox": [ + 169, + 90, + 823, + 148 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. System prompt describes the \"rules\" of the shared cache and suggests that workers collaborate. This prompt goes at the beginning of either the system or user message (if not unsupported);", + "2. Inserting s1-like collaboration prompts: every thousand generated tokens, we prompt a random worker with \"Wait, am I doing redundant work? (yes/no):\" at the beginning of their next paragraph. This strategy is meant to promote collaboration and is inspired by Muennighoff et al. [2025]." + ], + "bbox": [ + 171, + 152, + 826, + 223 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The latter s1-like prompts present a curious case. We found that LLMs fine-tuned on reasoning can often become too \"focused\" on what it is generating currently and fail to notice that another instance has found a mistake or solved their problem earlier. However, when asked directly, they can spot redundancy and change their approach. Overall, we found that when prompted this way, LLMs often (but not always) detect redundancies in their actions and can determine the optimal course of action.", + "bbox": [ + 169, + 227, + 826, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4 Inference Matters", + "text_level": 1, + "bbox": [ + 171, + 303, + 339, + 316 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When generating new tokens with Hogwild! Inference, we perform a forward pass on all workers in parallel, as though they were in the same batch. Instead of each sample having its own attention cache, we allow batch elements to attend to each other's KV caches at different positions. When processing newly generated tokens, we \"insert\" their KV representations at the end of their respective cache blocks, then arrange these cache blocks for each worker. This way both workers can immediately attend to each other's current tokens even before they are fully processed by all layers.", + "bbox": [ + 169, + 321, + 823, + 406 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This leads to the following problem: since workers combine cache blocks in different order (see Figure 1), we would need to rotate the cached KVs multiple times, one for each worker. Done naively, this would require rotating all past token representations at every step, which is inefficient for long contexts. Fortunately, this problem can be circumvented using a property of rotation: if both query and key are rotated by the same angle, the dot product between them will not change. Instead of rotating all previous keys, we can rotate current token queries to an equivalent angle (Figure 2).", + "bbox": [ + 169, + 411, + 825, + 496 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Suppose that a given attention layer needs to compute attention between the current token query $q$ at position $i_q$ (denoted $\\rho(q, i_q)$ ) and a block of keys rotated to the starting position $i_k$ . Instead of rotating keys, we can rotate the query to position $i_q - i_k$ and keep the KV cache as is. If there are multiple KV blocks A, B, C (Alice, Bob, Common) that need to be rotated to positions $i_k^A, i_k^B, i_k^C$ respectively, we rotate the query $q$ multiple times for each block. Formally, we can rewrite the attention dot-product:", + "bbox": [ + 169, + 501, + 823, + 574 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\rho (q, i _ {q}) \\Big [ \\rho (A, i _ {k} ^ {A}) \\oplus \\rho (B, i _ {k} ^ {B}) \\oplus \\rho (C, i _ {k} ^ {C}) \\Big ] = \\rho (q, i _ {q} - i _ {k} ^ {A}) A \\oplus \\rho (q, i _ {q} - i _ {k} ^ {B}) B \\oplus \\rho (q, i _ {q} - i _ {k} ^ {C}) C,\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 580, + 808, + 606 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\oplus$ denotes concatenation. The r.h.s. formula only rotates the current step query, i.e. a single token per worker, as opposed to the past KV blocks that can contain thousands or millions of tokens. We use this property to design an efficient implementation of our method based on Flash-Decoding [Dao et al., 2023]. We gather each KV cache block in a contiguous memory buffer and compute attention similarly to Paged Attention [Kwon et al., 2023], where one page would correspond to one cache block and the corresponding query rotations from all workers. This way, we need only one copy of each cache block and do not need to re-rotate its entries (see Appendix B).", + "bbox": [ + 169, + 609, + 826, + 708 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/671772a8529123d424f4dc382164719cd30712feab78e67f4d667e021650f8ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 238, + 719, + 480, + 877 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/7505f7768dcfa922d898d6b01ba172187748b420cf06823a1f2df0b1c1f84199.jpg", + "image_caption": [ + "Figure 2: Intuitive scheme of Hogwild! Inference with query rotation. Colors represent cache blocks. Instead of rotating all cache blocks to align with Alice's and Bob's views, we keep them fixed at the zero position and only rotate the current token queries to equivalent angles." + ], + "image_footnote": [], + "bbox": [ + 496, + 719, + 751, + 883 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/facf7a5e13928be1d0e4bd20c8fe4373b6452ff23d4b026fc5341612411e6b28.jpg", + "image_caption": [ + "Figure 3: (left) Evaluation results for QwQ-32B on synthetic tasks with 5 GSM8k questions in each. (middle) Evaluation of Hogwild! Inference and baselines with QwQ-32B on LIMO. (right) Hogwild! Inference with varying number of workers with QwQ-32B on LIMO." + ], + "image_footnote": [], + "bbox": [ + 183, + 39, + 370, + 156 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9fb8652c51dc0c6d99a0e37d7a48674b3f47c670070a4ead96f3d66cb29b09d3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 39, + 584, + 156 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/666c357e475425a3aa6b4c8622e00a7798c904aeae52e67863082f72671934a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 588, + 39, + 805, + 156 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 171, + 205, + 313, + 222 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Detailed Evaluation with QwQ-32B", + "text_level": 1, + "bbox": [ + 171, + 229, + 460, + 244 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we conduct an initial evaluation of Hogwild! Inference to test its ability to collaborate in our zero-shot setting. All evaluations in this section are done with the QwQ-32B [Qwen Team, 2025] model. We consider two tasks: one with obviously independent tasks that can be done in parallel and another with a more complicated collaboration pattern.", + "bbox": [ + 169, + 250, + 826, + 306 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In both setups, we allow the model to generate reasoning up to a certain budget of sequential forward passes and evaluate its accuracy. If the model did not produce the final answer (\\\\boxed{...}) in time, we take all generated outputs and insert a special prompt6 that makes the model generate an answer (or its \"best guess\"), similarly to how it is done in Pu et al. [2025]. If there are multiple workers / threads, we feed outputs from all workers (concatenated) into the model and prompt it to generate the final answer immediately ( $\\leq 16$ tokens, stop early if generated answer). We apply this technique to all methods except \"Baseline (no early stopping)\" and do not count these extra tokens towards the total budget (x axis) since they have an equal effect on all methods.", + "bbox": [ + 169, + 311, + 823, + 422 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate the following generation algorithms (details in Appendix D):", + "bbox": [ + 171, + 428, + 656, + 444 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Hogwild! Inference: Our main algorithm, as described in Section 3. We evaluate with 2, 3 and 4 parallel \"workers\" and provide additional configuration details in Appendix D.1.", + "- Baseline (no early stopping): standard sequential generation with a single LLM instance. This is the only evaluation where we do not insert the early stopping prompt described above.", + "- Baseline: an improved sequential generation with the early stopping technique described above.", + "- Skeleton-of-Thought (SoT) [Ning et al., 2024]: a parallel reasoning algorithm in which the LLM first generates a short \"outline\" containing several independent tasks, then runs these tasks in parallel and combines the results. We run with both an unlimited number of parallel threads (original setup) and with 2 \"workers\" that append tokens to each thread in a round-robin fashion. For more complicated reasoning tasks, we found that Skeleton-of-Thought cannot solve the problem by itself; to mitigate this, we allow the main model to encode all generated threads and continue reasoning (with early stopping). We discuss Skeleton-of-Thought in more detail in Appendix D.2.", + "- Self-consistency [Wang et al., 2022]: a parallel reasoning algorithm where LLM instances write solutions independently, then vote on the answer. Instead of majority voting, we allow the LLM to view both solutions (concatenated) before generating the final answer with our early-stopping prompt, which outperforms voting in our setup and works even for 2 workers. Note that this method cannot split sub-tasks between workers and is instead meant to increase quality through voting." + ], + "bbox": [ + 171, + 448, + 825, + 690 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Sanity Checks with GSM8k×5: Before we try our approach on more challenging tasks, we test if Hogwild! Inference is capable of basic collaboration. For this purpose, we construct a toy problem set with 128 samples, each containing 5 non-overlapping questions from the GSM8k test set [Cobbe et al., 2021]. The LLM is prompted to solve each problem and return comma-separated values7. We report the average per-question accuracy, i.e. if the model solves 4 out of 5 questions in a given sample correctly, it will get a score of 0.8 for that sample.", + "bbox": [ + 169, + 703, + 823, + 789 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We summarize our results in Figure 3 (left): the parallel workers under the Hogwild! Inference can indeed collaborate, i.e. our KV cache manipulations do not break down model's reasoning capabilities. As intuition suggests, Skeleton-of-Thought can also speed up this synthetic task by answering each question in parallel. We provide an example of the outline created by the Skeleton-of-Thought in Appendix E.4. Notably, the self-consistency algorithm also shows some improvement over the", + "bbox": [ + 169, + 792, + 826, + 864 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "\"\\n\\nWait, given the limited time, I have to give an answer right now. Considering all my previous attempts, I have to conclude that the final answer is boxed{''", + "bbox": [ + 169, + 875, + 816, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "7\"Solve these problems and return comma-separated answers \\boxed{answer1, ..., answer5} : \\n 1. \\{task1\\} \\n 2. \\{task2\\} \\n 3. \\{task3\\} \\n 4. \\{task4\\} \\n 5. \\{task5\\}\"", + "bbox": [ + 169, + 901, + 797, + 926 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f66dea94426dd2a9fdd437283ff40594986cf9239a2905077493f34a0dc26501.jpg", + "image_caption": [ + "Figure 4: Evaluation of Hogwild! Inference on LIMO for QwQ-32B, Phi-4-Reasoning-Plus (14B) and Qwen3-8B (left) and different Qwen3 models (right). Dashed lines denote baselines (1 agent)." + ], + "image_footnote": [], + "bbox": [ + 189, + 66, + 496, + 231 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/abdc0a516b9ee43251a53d1bf6316e7463096fd5fd17654166af50c43ddb24e6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 66, + 810, + 232 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "baseline, which we attribute to the fact that it gives the model two \"shots\" at a problem, and if one of them happens to be faster, the algorithm will on average surpass the baseline.", + "bbox": [ + 169, + 272, + 823, + 301 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "LIMO tasks. Next, we evaluate Hogwild! Inference in a more challenging setup where there is no clear pattern of collaboration. We adopt the dataset of 817 problems from Ye et al. [2025]. The dataset contains mathematical problems that take modern LLMs thousands of tokens to solve reliably. Unlike our synthetic tasks, the problems in that dataset often do not have an obvious way to agree on a collaboration strategy ahead of time, but it can emerge (and change) during reasoning.", + "bbox": [ + 169, + 306, + 823, + 378 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We summarize our results in Figure 3 (middle, right). Overall, Hogwild! Inference can converge to a correct solution faster, achieving greater accuracy for the same number of consecutive steps. Furthermore, it produces greater speed-ups as we increase the number of parallel workers (though there is a limit, as we show in Appendix E.1). Similarly to our previous setup, self-consistency decoding provides some improvement over the single-worker baseline, but does not outperform Hogwild! Inference. As expected, Skeleton-of-Thought could not split the problem neatly into independent tasks, but still achieves some improvement on small budgets.", + "bbox": [ + 169, + 383, + 823, + 479 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We then evaluate different LLM families and sizes on LIMO dataset in Figure 4. We found that our approach generalizes to most of the models tested, with a notable exception. For Qwen3 model family, we observe that the smaller models, 1.7B and, to a lesser extent, 4B fail to adapt to the task and get distracted from the task. In Appendix E.1, we also report additional evaluations in this setup: ablation of the cache rotation from 3.1 and our chat-like cache structure from Section 3.2. We provide examples of collaborative generations for this setup in Appendix F.", + "bbox": [ + 169, + 486, + 823, + 570 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Additional Benchmarks and Models", + "text_level": 1, + "bbox": [ + 169, + 577, + 464, + 590 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Next, we test whether our approach can be generalized to other mathematical reasoning and programming tasks. For this evaluation, we also chose benchmarks that do not have obvious collaboration patterns but can nonetheless be solved faster by two human \"agents\". We evaluate on three such benchmarks: LiveCodeBench, OlympiadBench and AIME'25. In addition to QwQ-32B, we also report Qwen3 [Yang et al., 2025] and Phi-4 Reasoning Plus [Abdin et al., 2025]. For AIME'25, we focus on larger models and additionally include DeepSeek-R1 [DeepSeek-AI et al., 2025].", + "bbox": [ + 169, + 594, + 823, + 676 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "LiveCodeBench [Jain et al., 2024]. We evaluate on the code_generation lite version release_v5. Our evaluation closely follows the setup from Qwen Team [2025]: we take the same 279 problems dated between 2024.08 and 2025.02 and filtered so as to avoid ones present in the QwQ dataset. Note, however, that some of the other LLMs in our setup do not report which samples, if any, did they train on. However, since we use the same model weights for the baseline and Hogwild! Inference, we can still compare the two strategies. We run the standard test suite and report Pass@1 averaged over 8 random seeds. For early stopping, we allow the method (and baseline) to generate a single final code block with up to 1024 tokens, using a similar early-stopping prompt as in Section 4.1 (see Appendix C). For Hogwild! Inference, we use the same system prompts as before.", + "bbox": [ + 169, + 683, + 823, + 808 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "OlympiadBench [He et al., 2024]. Next, we evaluate on a different reasoning benchmark that contains Olympiad-level problems on Math and Physics. We run evaluations on the two text-only english-language parts: OE_TO maths_en_COMP (675 problems) and OE_TO_physics_en_COMP (236 problems). Unlike in Section 3, the answers to these problems are not individual numbers but LaTeX formulae that allow multiple equivalent formulations of the correct answer. We use the official evaluation codebase and adapt the built-in DeepSeek-R1 prompts for use with our model set (see details in Appendix D). For early stopping, we use the same prompt as before with 64 token limit.", + "bbox": [ + 169, + 814, + 823, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c428b96323c4fc03a26afef9fe9b57ff6ae44eea4fc284d28e76d1cf6edf531f.jpg", + "image_caption": [ + "Figure 5: Evaluation of Hogwild! Inference with 2 workers on OlympiadBench Math (left) & Physics (right) for QwQ-32B, Qwen3-14B and Qwen3-8B models, dashed lines are the baselines." + ], + "image_footnote": [], + "bbox": [ + 189, + 53, + 493, + 218 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8347a312d3cd1bb376b6935227e6d6cb5ade8c972725f3a5f84c73d48039e3ad.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 53, + 810, + 219 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Large Models on AIME [2025]. Finally, we evaluate how Hogwild! Inference scales to larger models on a popular AIME'25 benchmark, using both I and II subsets. For this task, we focus on two models: Qwen3-235B-A22B Yang et al. [2025] and DeepSeek-R1 [DeepSeek-AI et al., 2025]. Since the AIME benchmark only contains 30 problems (15 per subset), we evaluate each model with 10 random seeds and average results. We otherwise use the same evaluation protocol as for LIMO, with the same early stopping and at most 16 tokens per answer during early stopping.", + "bbox": [ + 169, + 255, + 823, + 339 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We arrange our results in Figure 5 for OlympiadBench and Figure 6 for LiveCodeBench and AIME'25. Overall, Hogwild! Inference shows similar improvements to what we observed earlier (Section 4.1). One atypical case is OlympiadBench Physics (Fig. 5 right) where Qwen3-14B stops improving after roughly 4096 tokens. Upon closer inspection, we found that the model does not break down, but overthinks the problem, improving some answers while replacing other correct answers with mistakes. Overall, the results show that the cache rotation tricks and the output structure from 3.2 can indeed be generalized across different models and benchmarks. Note, however, that due to the different output format we needed to apply slight alterations to individual model prompts: notably, QwQ-32B automatically inserts at the end of the prompt, while Qwen3 and Phi-4 do not, so we insert it manually before the common history header. We describe this in detail in Appendix C.", + "bbox": [ + 169, + 343, + 826, + 482 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 Measuring the Ability to Collaborate", + "text_level": 1, + "bbox": [ + 171, + 488, + 470, + 503 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Now that we know that modern LLMs can collaborate in our zero-shot setting, it is natural to ask how well can they collaborate and what affects their ability. While this question deserves a more thorough investigation, we can still quantify how well LLMs collaborate under Hogwild! Inference. In this section, we analyze their \"collaborativeness\" using the LLM-as-a-Judge paradigm [Zheng et al., 2023a]: we feed collaborative traces into a GPT-4o [Hurst et al., 2024] model and prompt it to score behavior from 1 to 6, where \"1\" means no collaboration, \"3\" indicates basic task splitting and \"6\" represents a hypothetical optimal collaboration, never achieved in our analysis. We analyze LLM generations on LIMO dataset with on three models from Section 4.2. To control for differences in generation lengths we compare only 4096-token prefixes from each worker. We compare three inference setups: i) independent generations as per self-consistency decoding; ii) restricted Hogwild! Inference where agents can only view each other's finished paragraphs, but not the current (incomplete) reasoning step, and iii) full Hogwild! Inference, with 2 agents in each setup.", + "bbox": [ + 169, + 505, + 826, + 671 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We summarize our scores in Figure 7: as expected, models that can see each other can collaborate and independent workers cannot. Interestingly, Hogwild! Inference with instant (token-wise) synchronization scores significantly higher than a version that can only see completed inference steps. In Appendix G we provide more detailed results, judge prompt, configurations and examples.", + "bbox": [ + 169, + 675, + 826, + 733 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f0209aef2837c6968d7da96be40be0a43e35806305771cb01ca228315c6b45f8.jpg", + "image_caption": [ + "Figure 6: Evaluation of Hogwild! Inference (2 workers) on LiveCodeBench v5 2024.08-2025.02 for QwQ, Phi-4-R+ and Qwen3 (left) and AIME'25 for larger models (right), dashed lines are baselines." + ], + "image_footnote": [], + "bbox": [ + 189, + 734, + 493, + 896 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/95f302d1d13a8c6c86e2b7cf3e4be7afbd7c3e00e98f2e025d3ddd2173fc424a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 734, + 810, + 896 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8bec21499eb610041b5b9e65ad38946ef73ee9c2e166ed1f1df365336ebe3b73.jpg", + "image_caption": [ + "Figure 7: Mean collaborativeness score from GPT-4o. No sync is independent generation, Step-wise is restricted Hogwild! where worker can only see each-other's past steps, Token-wise is full Hogwild! with instant cache exchange." + ], + "image_footnote": [], + "bbox": [ + 174, + 53, + 486, + 215 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/38e0900c5cbff9f84541182863d6a0ef9c8f80a8c7de1acbe2a7c5d160600707.jpg", + "table_caption": [ + "Table 1: Inference benchmarks for Section 4.4. Columns denote sequencelength. Rows withone workerare baselines,2&4workers use Hogwild!" + ], + "table_footnote": [], + "table_body": "
# Workers102420484096819216384
Tokens per second
120.120.019.719.318.3
236.336.236.136.134.3
468.969.069.166.360.3
Latency per forward (ms)
149.750.050.951.754.5
255.155.355.455.358.3
458.158.057.960.466.4
Time to generate # tokens (s)
152.3103.3206.5416.7853.5
229.958.1114.6228.0454.4
416.731.661.3120.7239.2
", + "bbox": [ + 511, + 95, + 823, + 286 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.4 Inference", + "text_level": 1, + "bbox": [ + 171, + 297, + 279, + 310 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To recall, our main motivation for proposing Hogwild! Inference is to enable faster reasoning through collaboration. Since the actual inference speed depends on many factors (GPU(s), software, precision, etc), we previously focused on evaluating inference speed in terms of the number of consecutive forward passes and not inference time. Here, in turn, we report the actual inference speed in terms of latency and tokens per second. We evaluate three setups: baseline sequential inference and Hogwild! Inference for two and four workers. We run baseline with FlashAttention v2 (FlashDecoding) and our algorithm with custom GPU kernels using the approach described in Section 3.4. We use a NVIDIA L40S GPU and AMD EPYC 9534 and benchmark the official quantized version of QwQ-32B-AWQ for all setups.", + "bbox": [ + 169, + 314, + 826, + 440 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Our results in Table 1 show that, for the 32B model, Hogwild! Inference can generate tokens nearly twice as fast for 2 workers and about $3.2 - 3.6 \\times$ faster for 4 workers, which means that the accuracy gains from earlier sections can translate to faster solutions. We also report the average over GPUs, as well the $10\\%$ and $90\\%$ percentiles, in Figure 8 (left). Overall, Hogwild! Inference has a small constant latency offset compared to the baseline and near-linear scaling as we increase the number of workers. While our implementation already shows significant performance gains, we discuss several ways to scale it further in Appendix B, including in distributed setting.", + "bbox": [ + 169, + 445, + 826, + 544 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/4a7a536cd12fc9c79c74320988e958ef9293e7344b33fef910cf5e76e515d91d.jpg", + "image_caption": [ + "Figure 8: (left) Duration of a single forward pass (generating $W$ new tokens) for Qwen/QwQ-32B-AWQ on L40S, given the total number of tokens already in the KV cache. The dotted lines indicate the $10\\%$ and $90\\%$ quantiles over multiple repetitions on different GPUs. (right) Accuracy versus average generation time on the LIMO dataset task using QwQ-32B-AWQ under different token budgets." + ], + "image_footnote": [], + "bbox": [ + 179, + 560, + 488, + 758 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/0541b090e1608ce5167c68820c39717e91683369e6da6bc3263c960480ad859c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 559, + 821, + 757 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As the figure shows, there is some overhead associated with preparing multiple caches (i.e., even at an empty cache, Hogwild! is slightly slower than pure FlashAttention). A more detailed breakdown is presented in Table 2, which shows the duration of the attention kernel (or attention+rope for Hogwild!), as well as the total setup time, that is, the time spent preparing the data structures needed for Hogwild! The latter needs to be done only once per forward pass, instead of once per transformer", + "bbox": [ + 169, + 842, + 825, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/5a3fc06a3288c485d1e0cfd791096076d85a7ad085ec8381d9e99fe6558a8cd2.jpg", + "table_caption": [ + "Table 2: Breakdown of Hogwild! overhead compared to pure FlashAttention inference." + ], + "table_footnote": [], + "table_body": "
KV LengthAttention (×64)Setup (×1)
FAW2W4FAW2W4
30011μs45μs45μs-1.9ms3.9ms
409635μs65μs82μs-1.9ms3.9ms
819255μs92μs123μs-1.9ms3.9ms
16384100μs140μs203μs-1.9ms3.9ms
", + "bbox": [ + 282, + 112, + 712, + 210 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "block. For long contexts, the attention call is about $40\\%$ and $100\\%$ slower for generating with 2 and 4 workers, respectively.", + "bbox": [ + 169, + 237, + 823, + 266 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Additionally, we report accuracy results over time using our kernel on the official quantized version of QwQ-32B-AWQ on LIMO dataset. The experiments were conducted on NVIDIA L40S GPUs. For comparison, we run the baseline (FlashAttention v2) and Hogwild with 2 workers, maintaining the same experimental setup as detailed in Section 4.1. We report our results in Figure 8 (right). As illustrated, our method achieves better accuracy results on the LIMO dataset within the same time budget.", + "bbox": [ + 169, + 271, + 826, + 354 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 Discussion", + "text_level": 1, + "bbox": [ + 171, + 366, + 294, + 382 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this work, we investigated the ability of large language models to perform parallel generation where multiple instances synchronize through a shared, dynamically-updated attention cache. Surprisingly, our results show that LLMs can operate effectively in parallel across dynamically updated attention cache without specialized fine-tuning. We demonstrate that parallel inference threads can explicitly coordinate, leveraging each other's partial solutions to enable collaborative problem-solving.", + "bbox": [ + 169, + 388, + 826, + 459 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The proposed method, called Hogwild! Inference, allows multiple inference threads to concurrently access and update a shared attention cache. By leveraging Rotary Position Embeddings (RoPE), our approach introduces minimal computational overhead while ensuring instant synchronization—newly generated KV cache entries becoming immediately visible to all threads. This \"telepathic\" communication opens up new possibilities for efficient parallel generation with LLMs.", + "bbox": [ + 169, + 465, + 826, + 536 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "**Limitations** Our method exhibits reduced robustness when applied to smaller models or longer contexts, suggesting scalability challenges across model sizes and sequence lengths. Additionally, our automatic evaluation metric relies on a proprietary model, which may limit reproducibility.", + "bbox": [ + 169, + 542, + 826, + 587 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Future work In future work, we plan to investigate methods for improving collaboration between threads, such as fine-tuning and reinforcement learning. We also plan to investigate connections to alternative parallel inference schemes, such as speculative decoding [Leviathan et al., 2023], and parallel token generation methods like Medusa [Cai et al., 2024] or EAGLE [Li et al., 2024b]. Finally, it is interesting to consider alternative shared memory structures: allowing workers to insert new steps in any order, selectively delete (forget) steps, or solving programming and tool use tasks with a shared IDE and file-system. The KV cache rearrangement used in Hogwild! Inference could also allow humans to interact with agents asynchronously, giving clarifications and feedback during reasoning.", + "bbox": [ + 169, + 595, + 826, + 707 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Acknowledgements: We thank Vladimir Malinovskii for his help with brainstorming, helpful feedback and suggesting future work directions. We also thank Philip Zmushko for proofreading.", + "bbox": [ + 171, + 715, + 823, + 744 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 171, + 763, + 269, + 780 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Marah Abdin, Sahaj Agarwal, Ahmed Awadallah, Vidhisha Balachandran, Harkirat Behl, Lingjiao Chen, Gustavo de Rosa, Suriya Gunasekar, Mojan Javaheripi, Neel Joshi, Piero Kauffmann, Yash Lara, Caio Cesar Teodoro Mendes, Arindam Mitra, Besmira Nushi, Dimitris Papailiopoulos, Olli Saarikivi, Shital Shah, Vaishnavi Shrivastava, Vibhav Vineet, Yue Wu, Safoora Yousefi, and Guoqing Zheng. Phi-4-reasoning technical report, 2025. URL https://arxiv.org/abs/2504.21318.", + "AIME. Aime problems and solutions. https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions, 2025." + ], + "bbox": [ + 171, + 787, + 826, + 910 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Reza Yazdani Aminabadi, Samyam Rajbhandari, Minjia Zhang, Ammar Ahmad Awan, Cheng Li, Du Li, Elton Zheng, Jeff Rasley, Shadeen Smith, Olatunj Ruwase, and Yuxiong He. Deepspeed inference: Enabling efficient inference of transformer models at unprecedented scale, 2022. URL https://arxiv.org/abs/2207.00032.", + "Anthropic. Claude 3.7 sonnet and claude code, 2024. URL https://www.anthropic.com/news/claude-3-7-sonnet. Accessed: 2025.04.02.", + "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. Neural machine translation by jointly learning to align and translate. In Proceedings of the 3rd International Conference on Learning Representations (ICLR), 2015. URL https://arxiv.org/abs/1409.0473.", + "Yushi Bai, Jiajie Zhang, Xin Lv, Linzhi Zheng, Siqi Zhu, Lei Hou, Yuxiao Dong, Jie Tang, and Juanzi Li. Longwriter: Unleashing 10,000+ word generation from long context llms. ArXiv, abs/2408.07055, 2024. URL https://api_semanticscholar.org/CorpusID:271859903.", + "Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute.", + "Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer: The long-document transformer, 2020. URL https://arxiv.org/abs/2004.05150.", + "Tianle Cai, Xinyun Li, Zhiruo Wang, Yuhuai Wang, and Dawn Song. Medusa: Simple llm inference acceleration framework with multiple decoding heads. arXiv preprint arXiv:2401.10774, 2024.", + "Justin Chen, Swarnadeep Saha, and Mohit Bansal. ReConcile: Round-table conference improves reasoning via consensus among diverse LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7066–7085, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.381. URL https://aclanthology.org/2024.acl-long.381/.", + "Mouxiang Chen, Binyuan Hui, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Jianling Sun, Junyang Lin, and Zhongxin Liu. Parallel scaling law for language models, 2025. URL https://arxiv.org/abs/2505.10475.", + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "Roi Cohen, May Hamri, Mor Geva, and Amir Globerson. LM vs LM: Detecting factual errors via cross examination. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12621-12640, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.778. URL https://aclanthology.org/2023.emnlp-main.778/.", + "Tri Dao, Daniel Haziza, Francisco Massa, and Grigory Sizov. Flash-decoding for long-context inference. https://crfm.stanford.edu/2023/10/12/flashdecoding.html, 2023. Accessed: 2025-05-10.", + "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, and Xiao Bi et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948.", + "Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, Xianglong Liu, and Dacheng Tao. Dynamic parallel tree search for efficient ltm reasoning, 2025. URL https://arxiv.org/abs/2502.16235.", + "Yilun Du, Shuang Li, Antonio Torralba, Joshua B. Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. In *Forty-first International Conference on Machine Learning*, 2023. URL https://openreview.net/forum?id=zj7YuTE4t8." + ], + "bbox": [ + 171, + 90, + 826, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Elliot E. Entin and Daniel Serfaty. Adaptive team coordination. Human Factors, 41(2):312-325, 1999.", + "Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024.", + "In Gim, Seung seob Lee, and Lin Zhong. Asynchronous llm function calling, 2024. URL https://arxiv.org/abs/2412.07017.", + "Google DeepMind. Gemini 2.5: Our Newest Gemini Model with Thinking. https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#gemini-2-5-thinking, 2025. Accessed: 2025-04-07.", + "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems, 2024.", + "Chan-Jan Hsu, Davide Buffelli, Jamie McGowan, Feng-Ting Liao, Yi-Chang Chen, Sattar Vakili, and Da shan Shiu. Group think: Multiple concurrent reasoning agents collaborating at token level granularity, 2025. URL https://arxiv.org/abs/2505.11107.", + "Aaron Hurst, Adam Lerner, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024.", + "Edwin Hutchins. Cognition in the Wild. MIT Press, 1995.", + "Sam Ade Jacobs, Masahiro Tanaka, Chengming Zhang, Minjia Zhang, Shuaiwen Leon Song, Samyam Rajbhandari, and Yuxiong He. Deepspeed ulysses: System optimizations for enabling training of extreme long sequence transformer models. arXiv preprint arXiv:2309.14509, 2023.", + "Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livecodebench: Holistic and contamination free evaluation of large language models for code, 2024. URL https://arxiv.org/abs/2403.07974.", + "Tian Jin, Ellie Y. Cheng, Zack Ankner, Nikunj Saunshi, Blake M. Elias, Amir Yazdanbakhsh, Jonathan Ragan-Kelley, Suvinay Subramanian, and Michael Carbin. Learning to keep a promise: Scaling language model decoding parallelism with learned asynchronous decoding, 2025. URL https://arxiv.org/abs/2502.11517.", + "Sehoon Kim, Suhong Moon, Ryan Tabrizi, Nicholas Lee, Michael W Mahoney, Kurt Keutzer, and Amir Gholami. An llm compiler for parallel function calling. In *Forty-first International Conference on Machine Learning*, 2024.", + "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. ArXiv, abs/2205.11916, 2022. URL https://apisemantic scholar.org/CorpusID:249017743.", + "Aobo Kong, Shiwan Zhao, Hao Chen, Qicheng Li, Yong Qin, Ruiqi Sun, Xin Zhou, Enzhi Wang, and Xiaohang Dong. Better zero-shot reasoning with role-play prompting. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 4099-4113, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.228. URL https://aclanthology.org/2024.naacl-long.228/.", + "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the 29th Symposium on Operating Systems Principles, pages 611-626, 2023." + ], + "bbox": [ + 173, + 90, + 825, + 911 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yaniv Leviathan, Matan Kalman, and Yossi Matias. Fast inference from transformers via speculative decoding. In International Conference on Machine Learning, pages 19274-19286. PMLR, 2023.", + "Junyou Li, Qin Zhang, Yangbin Yu, Qiang Fu, and Deheng Ye. More agents is all you need. Transactions on Machine Learning Research, 2024a.", + "Shen Li, Yanli Zhao, Rohan Varma, Omkar Salpekar, Pieter Noordhuis, Teng Li, Adam Paszke, Jeff Smith, Brian Vaughan, Pritam Damania, and Soumith Chintala. Pytorch distributed: Experiences on accelerating data parallel training, 2020.", + "Yuhui Li, Fangyun Wei, Chao Zhang, and Hongyang Zhang. Eagle: Speculative sampling requires rethinking feature uncertainty. In Proceedings of the 41st International Conference on Machine Learning, pages 31147-31162. PMLR, 2024b.", + "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. ArXiv, abs/2305.20050, 2023. URL https://api_semanticscholar.org/CorpusID:258987659.", + "Aixin Liu, Bei Feng, Bin Wang, Bingxuan Wang, Bo Liu, Chenggang Zhao, Chengqi Dengr, Chong Ruan, Damai Dai, Daya Guo, et al. Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model. arXiv preprint arXiv:2405.04434, 2024a.", + "Hao Liu, Matei Zaharia, and Pieter Abbeel. Ring attention with blockwise transformers for near-infinite context, 2023. URL https://arxiv.org/abs/2310.01889.", + "Mingdao Liu, Aohan Zeng, Bowen Wang, Peng Zhang, Jie Tang, and Yuxiao Dong. Apar: Llms can do auto-parallel auto-regressive decoding. arXiv preprint arXiv:2401.06761, 2024b.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025.", + "Xuefei Ning, Zinan Lin, Zixuan Zhou, Zifu Wang, Huazhong Yang, and Yu Wang. Skeleton-ofthought: Prompting LLMs for efficient parallel generation. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=mqVgBbNCm9.", + "OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, and Alex Beutel et al. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720.", + "Jiayi Pan, Xiuyu Li, Long Lian, Charlie Snell, Yifei Zhou, Adam Yala, Trevor Darrell, Kurt Keutzer, and Alane Suhr. Learning adaptive parallel reasoning with language models. arXiv preprint arXiv:2504.15466, 2025.", + "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. PyTorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems (NeurIPS). Neural Information Processing Systems Foundation, 2019.", + "Bowen Peng, Jeffrey Quesnelle, Honglu Fan, and Enrico Shippole. Yarn: Efficient context window extension of large language models, 2023. URL https://arxiv.org/abs/2309.00071.", + "Xiao Pu, Michael Saxon, Wenyue Hua, and William Yang Wang. Thoughtterminator: Benchmarking, calibrating, and mitigating overthinking in reasoning models, 2025. URL https://arxiv.org/ abs/2504.13367.", + "Yujia Qin, Shi Liang, Yining Ye, Kunlun Zhu, Lan Yan, Ya-Ting Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, Sihan Zhao, Runchu Tian, Ruobing Xie, Jie Zhou, Marc H. Gerstein, Dahai Li, Zhiyuan Liu, and Maosong Sun. Toollm: Facilitating large language models to master 16000+ real-world apis. ArXiv, abs/2307.16789, 2023. URL https://api-semanticscholar.org/ CorpusID:260334759." + ], + "bbox": [ + 171, + 90, + 826, + 911 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/.", + "Jack Rae and Ali Razavi. Do transformers need deep long-range memory? In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, Online, July 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.acl-main.672.", + "Benjamin Recht, Christopher Re, Stephen Wright, and Feng Niu. Hogwild!: A lock-free approach to parallelizing stochastic gradient descent. In J. Shawe-Taylor, R. Zemel, P. Bartlett, F. Pereira, and K.Q. Weinberger, editors, Advances in Neural Information Processing Systems, volume 24. Curran Associates, Inc., 2011. URL https://proceedings.neurips.cc/paper_files/paper/2011/file/218a0aefd1d1a4be65601cc6ddc1520e-Paper.pdf.", + "Timo Schick, Jane Dwivedi-Yu, Roberto Dessi, Roberta Raileanu, Maria Lomeli, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. Toolformer: Language models can teach themselves to use tools. ArXiv, abs/2302.04761, 2023. URL https://api_semanticscholar.org/CorpusID:256697342.", + "Yongliang Shen, Kaitao Song, Xu Tan, Dongsheng Li, Weiming Lu, and Yue Ting Zhuang. Hugging-gpt: Solving ai tasks with chatgpt and its friends in hugging face. ArXiv, abs/2303.17580, 2023. URL https://api_semanticscholar.org/CorpusID:257833781.", + "Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatron-lm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053, 2019.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024.", + "Stanford HAI. How a “crazy idea” overturned the conventional rules of machine learning, 2023. URL https://hai.stanford.edu/news/how-crazy-idea-overturned-conventional-rules-machine-learning. Accessed: [Insert Date].", + "Jianlin Su, Yu Lu, Shengfeng Pan, Ahmed Murtadha, Bo Wen, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864, 2021.", + "Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V. Le, Ed H. Chi, Denny Zhou, and Jason Wei. Challenging big-bench tasks and whether chain-of-thought can solve them. In Annual Meeting of the Association for Computational Linguistics, 2022. URL https://api_semanticscholar.org/CorpusID: 252917648.", + "Yashar Talebirad and Amirhossein Nadiri. Multi-agent collaboration: Harnessing the power of intelligent LLM agents. CoRR, abs/2306.03314, 2023.", + "A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017.", + "Junlin Wang, WANG Jue, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities. In The Thirteenth International Conference on Learning Representations, 2024a.", + "Qineng Wang, Zihao Wang, Ying Su, Hanghang Tong, and Yangqiu Song. Rethinking the bounds of LLM reasoning: Are multi-agent discussions the key? In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6106-6131, Bangkok, Thailand, August 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.331. URL https://aclanthology.org/2024.acl-long.331/.", + "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed H. Chi, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. ArXiv, abs/2203.11171, 2022. URL https://api-semanticscholar.org/CorpusID:247595263." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yiming Wang, Zhuosheng Zhang, Pei Zhang, Baosong Yang, and Rui Wang. Meta-reasoning: Semantics-symbol deconstruction for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 622–643, Bangkok, Thailand, August 2024c. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.34. URL https://aclanthology.org/2024-findings-acl.34/.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022.", + "Guangxuan Xiao, Yuandong Tian, Beidi Chen, Song Han, and Mike Lewis. Efficient streaming language models with attention sinks. In International Conference on Learning Representations (ICLR), 2024.", + "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024.", + "An Yang, Anfeng Li, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Gao, Chengen Huang, Chenxu Lv, Chujie Zheng, Dayiheng Liu, Fan Zhou, Fei Huang, Feng Hu, Hao Ge, Haoran Wei, Huan Lin, Jialong Tang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jing Zhou, Jingren Zhou, Junyang Lin, Kai Dang, Keqin Bao, Kexin Yang, Le Yu, Lianghao Deng, Mei Li, Mingfeng Xue, Mingze Li, Pei Zhang, Peng Wang, Qin Zhu, Rui Men, Ruize Gao, Shixuan Liu, Shuang Luo, Tianhao Li, Tianyi Tang, Wenbiao Yin, Xingzhang Ren, Xinyu Wang, Xinyu Zhang, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yinger Zhang, Yu Wan, Yuqiong Liu, Zekun Wang, Zeyu Cui, Zhenru Zhang, Zhipeng Zhou, and Zihan Qiu. Qwen3 technical report, 2025. URL https://arxiv.org/abs/2505.09388.", + "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. ArXiv, abs/2210.03629, 2022. URL https://api_semanticscholar.org/CorpusID:252762395.", + "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. ArXiv, abs/2305.10601, 2023. URL https://api_semanticscholar.org/CorpusID:258762525.", + "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning, 2025. URL https://arxiv.org/abs/2502.03387.", + "Yijiong Yu. Accelerate parallelizable reasoning via parallel decoding within one sequence, 2025. URL https://arxiv.org/abs/2503.20533.", + "Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Zhihan Guo, Yufei Wang, Irwin King, Xue Liu, and Chen Ma. What, how, where, and how well? a survey on test-time scaling in large language models. arXiv preprint arXiv:2503.24235, 2025.", + "Zhenyu Zhang, Ying Sheng, Tianyi Zhou, Tianlong Chen, Lianmin Zheng, Ruisi Cai, Zhao Song, Yuandong Tian, Christopher Ré, Clark Barrett, et al. H2o: Heavy-hitter oracle for efficient generative inference of large language models. Advances in Neural Information Processing Systems, 36:34661-34710, 2023.", + "Zhuosheng Zhang, Aston Zhang, Mu Li, and Alexander J. Smola. Automatic chain of thought prompting in large language models. ArXiv, abs/2210.03493, 2022. URL https://api.sementicscholar.org/CorpusID:252762275." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36:46595-46623, 2023a.", + "Lianmin Zheng, Liangsheng Yin, Zhiqiang Xie, Jeff Huang, Chuyue Sun, Cody Hao Yu, Shiyi Cao, Christos Kozyrakis, Ion Stoica, Joseph E. Gonzalez, Clark Barrett, and Ying Sheng. Efficiently programming large language models using sglang, 2023b.", + "Tong Zheng, Hongming Zhang, Wenhao Yu, Xiaoyang Wang, Runpeng Dai, Rui Liu, Huiwen Bao, Chengsong Huang, Heng Huang, and Dong Yu. Parallel-r1: Towards parallel thinking via reinforcement learning, 2025. URL https://arxiv.org/abs/2509.07980.", + "Pei Zhou, Jay Pujara, Xiang Ren, Xinyun Chen, Heng-Tze Cheng, Quoc V. Le, Ed H. Chi, Denny Zhou, Swaroop Mishra, and Huaixiu Steven Zheng. SELF-DISCOVER: Large language models self-compose reasoning structures. In Amir Globerson, Lester Mackey, Danielle Belgrave, Angela Fan, Ulrich Paquet, Jakub M. Tomczak, and Cheng Zhang, editors, Advances in Neural Information Processing Systems 37 (NeurIPS 2024), Vancouver, BC, Canada, December 2024." + ], + "bbox": [ + 173, + 90, + 825, + 315 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A Cache Layouts", + "text_level": 1, + "bbox": [ + 171, + 89, + 334, + 107 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this section, we consider three cache arrangements, shown at Figure 9, with progressively more complex structure.", + "bbox": [ + 169, + 119, + 823, + 150 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/507eb12025e222fa29ca02659bbf335b1449be8b98d49d55506246ac54845fba.jpg", + "image_caption": [ + "Figure 9: Three cache layouts described in Section 3.2: interleaved with step-wise synchrony (left), simple contiguous layout (middle) and combined with token-wise synchrony (right). All layouts are made from Alice point of view." + ], + "image_footnote": [], + "bbox": [ + 173, + 161, + 356, + 277 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/403bc32e2333f1f364e35ecfc05c48ebb4b929f7f262bc7eaaf37bc6c81e156b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 161, + 539, + 277 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/cc79e0adf2594606e76f0f89c7b7b43453bf8c674b16cba49604007dab0a6453.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 555, + 161, + 821, + 276 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Contiguous layout (token-wise) is the simplest possible layout where each worker appends to their own sequence blob of tokens and sees other workers' token representations as past keys and values. This layout is inspired by collaborative text editors such as Google Docs or Overleaf.", + "bbox": [ + 169, + 340, + 826, + 383 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "As described earlier in Section 3.1, each worker arranges the other workers' thoughts in a different order. They see the common prompt cache first, then the caches of all other workers (excluding themselves8, then their own cache as immediate previous tokens. That way, each worker predicts the next token for their own cache.", + "bbox": [ + 169, + 388, + 823, + 445 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Interleaved layout (step-wise), which can be seen as analogous to group chat services such as Slack or Discord. In this layout, workers generate tokens in private until they finish a reasoning step9, then add it to a shared \"history\". The history contains past reasoning steps of each LLM instance in the order of their completion. Whenever a worker completes a reasoning step, their KV cache entries are moved to the end of the shared history cache block with the proper rotation, then their local cache is reset their local cache for a new step.", + "bbox": [ + 169, + 450, + 823, + 536 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this setup, the workers only see each other's outputs in full steps, not after every token. However, they do not wait for each other to complete their steps. Instead, each worker keeps generating new tokens and occasionally receives additional key-value pairs inserted into its cache.", + "bbox": [ + 169, + 540, + 826, + 583 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Combined layout (token-wise) is a mixture of the first two, and is the main layout used in the paper. The LLM instances generate steps that are accumulated in a shared history, as in the interleaved layout. However, they do not generate these steps in private, but can instantly see each other's current progress, as in the contiguous layout.", + "bbox": [ + 169, + 588, + 826, + 646 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We can view the first two layouts as ablated versions of this combined one: the contiguous layout lacks the shared history, and the interleaved layout lacks immediate synchronization. We compare these three layouts empirically in Appendix E.1 to better quantify the effect of each design choice.", + "bbox": [ + 169, + 650, + 823, + 694 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B Implementation Details", + "text_level": 1, + "bbox": [ + 171, + 712, + 405, + 729 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Here we discuss additional implementation details and possible alternatives. To recall Section 3.4, Hogwild! inference can be implemented as a standard batched inference with a special KV \"cache\" that facilitates cross-worker attention.", + "bbox": [ + 169, + 744, + 826, + 787 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Cache blocks. The Hogwild! cache is split into blocks, typically one block for each worker and an additional \"common\" block for prompt and past steps. The blocks contain key-value pairs for all model layers, but since all layers are treated equally, we describe the cache behavior for a single layer.", + "bbox": [ + 169, + 792, + 826, + 835 + ], + "page_idx": 16 + }, + { + "type": "page_footnote", + "text": "When extending this layout to more than 2 workers, each worker sees the key-value memories of everyone except themselves. For instance, given 3 workers A, B, and C, worker B will see a version of the cache that contains the prompt, outputs of workers A and C, and finally, B's own memory. Likewise, A sees B & C, then A.", + "bbox": [ + 169, + 845, + 823, + 885 + ], + "page_idx": 16 + }, + { + "type": "page_footnote", + "text": "9We define a reasoning step as any amount of text that ends with a complete sentence, e.g. a dot or a question mark, and then a double newline (\"\\n\\n\") in all our experiments, though it may vary by the model.", + "bbox": [ + 169, + 885, + 823, + 912 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Within each cache block, attention keys and values are stored as though they were at positions 0, 1, ..., len(block), regardless of the block's actual position in the full cache. During inference, we account for actual positions by rotating attention queries to the relative difference in positions (as described in Section 3.4).", + "bbox": [ + 169, + 90, + 823, + 147 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Adding new tokens to the cache. During attention forward pass, the first thing that we do is encode the new tokens for each worker and append their keys and values to the respective cache blocks. When using RoPE, the keys are rotated not to their actual positions, but to their index within their cache block (e.g. Alice's tokens). During one inference step, these indices will be equal across all model layers — we can compute the RoPE sin and cos tensors once and reuse them between layers.", + "bbox": [ + 169, + 152, + 826, + 223 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Rotating queries. Unlike in traditional attention, Hogwild! inference rotates query vectors multiple times for each block. Before forward pass, we calculate the difference in positions between each worker's new token (from that worker's point of view) and the first token in each KV cache block. In our main inference scenario, all $n$ workers are allowed to view each other's cache blocks plus an additional bock for prompt and history, for a total of $n \\cdot (n + 1)$ query rotations with exactly $n$ queries for each block. These relative positions are also equal across all layers, so we can reuse the sin and cos tensors similarly to how they are reused for keys. Note that the number of query rotations for all-to-all attention is quadratic in $n$ , but it does not increase the overall time complexity of attention dot product, which is already quadratic in the number of tokens, which is always greater than $n$ .", + "bbox": [ + 169, + 229, + 826, + 354 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Attention kernel. Once we have all query rotations, we can calculate the scaled dot-product attention as usual. As our cache is naturally partitioned into smaller segments as described above, Hogwild! attention is similar to paged attention, except that each page (i.e., cache block) uses a differently rotated version of the query. A significant challenge for efficient attention in the inference setup is that for optimal data reuse, one would want to handle each KV head inside a single streaming multiprocessor (SM), so that the KV cache needs to be loaded exactly once. However, this would leave large parts of the GPU unused, as the number of KV heads can be much lower than the number of SMs. Therefore, one has to employ a form of sequence parallelism within a single GPU, in which different SMs handle a subset of the sequence for one KV head, and a second phase handles the (cheap) reduction over partial results. Such a split-k type computation is implemented, for example, in Flash-Decoding [Dao et al., 2023].", + "bbox": [ + 169, + 359, + 826, + 512 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Even though the different cache blocks used in Hogwild! would appear to be convenient points to split work across SMs, in a typical inference scenario, this would lead to very imbalanced workloads. Thus, we do not split based on cache blocks, and instead assign each SM the same number of KV entries.", + "bbox": [ + 169, + 518, + 826, + 560 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Fine-tuning and re-encoding considerations. While our work mainly focuses on inference, fine-tuning models to perform Hogwild! inference is an interesting engineering problem. From the computational point of view, the main difference between LLM inference and fine-tuning is that inference is sequential, whereas fine-tuning can compute all positions in parallel. To fine-tune in our setup, one would want to replicate the attention computations from consecutive inference steps.", + "bbox": [ + 169, + 566, + 826, + 637 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "To achieve this, we record the position differences between queries and each respective cache block from each of $t$ inference steps, and how many tokens were in each block during that query, for a total of $2 \\cdot t \\cdot n \\cdot (n + 1)$ integers (negligible compared to model parameters and activations). Recall that the cache blocks always store keys and values at positions 0, 1, ..., 1en(block). During forward pass, these positions can be used to construct a 4D attention mask10 to compute attention for all steps in parallel. The backward pass also runs in parallel with PyTorch autograd [Paszke et al., 2019]. A recent work by Zheng et al. [2025] explores finetuning for parallel inference in more detail.", + "bbox": [ + 169, + 642, + 826, + 739 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In addition to fine-tuning, this technique can potentially be used during inference to restore generation after it was evicted from an inference server, e.g. due to preemption or hardware error mid decoding. It can also be used to re-encode in-context learning examples if they use Hogwild! inference.", + "bbox": [ + 169, + 746, + 826, + 787 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Attention variants. Some of the recently introduced LLMs use attention variants such as Local (windowed) Attention [Rae and Razavi, 2020, Beltagy et al., 2020] or Multihead Latent Attention (MLA) [Liu et al., 2024a]. These attention variants can also be adapted for use with Hogwild! inference with minor code modifications. For local attention, queries can \"skip\" blocks that are outside their local window. Similarly for MLA, we can calculate compressed latent vectors within each cache block and adapt the existing MLA code to accumulate attention weights across blocks.", + "bbox": [ + 169, + 794, + 826, + 878 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "10https://huggingface.co/blog/poedator/4d-masks", + "bbox": [ + 187, + 896, + 550, + 911 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Distributed Inference. Likewise, Hogwild! inference can be used in distributed setup using the same strategies that work for traditional attention [Shoeybi et al., 2019, Aminabadi et al., 2022]. For pipeline parallelism, each device stores cache blocks for its local subset of model layers. Likewise, for tensor parallelism, each device stores past keys of all cache blocks and layers, but only for a subset of attention heads within each layer and inference using existing kernels.", + "bbox": [ + 169, + 90, + 823, + 161 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In principle, Hogwild! inference can also be combined with sequence parallelism [Jacobs et al., 2023, Liu et al., 2023], where each device stores a KV cache for a subset of tokens. One intuitive way to partition KV cache between GPUs is to assign each device to run one or several \"workers\" and keep the KVs generated by these workers. Since Hogwild! workers generate tokens at the same rate, each device will store the same amount of KVs and query other devices work cross-worker attention.", + "bbox": [ + 169, + 167, + 823, + 237 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "When computing Hogwild! concurrent attention with sequence parallelism, workers can exchange rotated queries using the All-to-All collective operation (Scatter/Gather) available in most frameworks [Li et al., 2020]. After that, each worker computes dot-products between the rotated queries and its local KV cache, and exchanges the partial results as in Ring Attention [Liu et al., 2023]. Note, however, that maximizing the performance of such sequence-parallel Hogwild! inference would require custom kernels that overlap computation and communication. In contract, tensor-parallel (per-head) an pipeline-parallel (per-layer) partitioning can reuse single-GPU attention kernels.", + "bbox": [ + 169, + 243, + 823, + 339 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Additional considerations. Conceptually, our approach is related to the recently introduced Paged Attention from vLLM [Kwon et al., 2023] and Radix Attention from SGLang [Zheng et al., 2023b]. These techniques are similar to ours in that they perform attention to slices of all tokens, e.g. when facilitating efficient parallel beam search inference, different hypotheses attend to different (but overlapping) subsets of the KV cache. However, unlike Radix Attention, our procedure attends to all segments at once (with different rotations) and aggregates results in the same softmax-weighted sum.", + "bbox": [ + 169, + 345, + 823, + 429 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C Prompting and formatting details", + "text_level": 1, + "bbox": [ + 171, + 441, + 490, + 458 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In this section, we describe the prompting and formatting details of our approach.", + "bbox": [ + 171, + 464, + 705, + 479 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Prompt for collaborative inference with two workers", + "text_level": 1, + "bbox": [ + 197, + 491, + 542, + 505 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Collaborative Reasoning", + "bbox": [ + 197, + 518, + 395, + 531 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "You will collaborate on this problem with another assistant. You will write your thoughts simultaneously with them and collaborate without redundant work. You can collaborate by doing different parts of the problem, double-checking each other's results, trying different approaches, or any other means.", + "bbox": [ + 197, + 531, + 797, + 580 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "There are 2 assistants, including yourself. You will refer to each other as Alice and Bob.", + "bbox": [ + 197, + 582, + 784, + 604 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "You will solve the problem together, writing your thoughts in parallel. You will be able to see each other's past and current thoughts as we write them. You will see each other's previous steps as", + "bbox": [ + 197, + 607, + 784, + 643 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "**AssistantName [step]:** <...>", + "bbox": [ + 197, + 645, + 452, + 657 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In the '#### Past steps' section, the automated system will gather the thoughts of Alice and Bob as you write them.", + "bbox": [ + 197, + 657, + 730, + 681 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "After the '###' Work in progress (others)' section, you will see the other assistants' unfinished steps. They will write those steps concurrently with you. You will take into account what they are doing. If another assistant gives you suggestions, you should address them.", + "bbox": [ + 197, + 683, + 784, + 732 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "You will always see *other* assistants' incomplete thoughts first, and then, after '##### Work in progress (own)', your own current step. Other assistants will continue writing their thoughts in the background while you will continue writing your own.", + "bbox": [ + 197, + 733, + 777, + 782 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Since you and others both write your thoughts in parallel, you will initially see only partial (unfinished) thoughts that others will continue in parallel, while you write yours. Others' thoughts will appear at the end of their unfinished step, near $<\\ldots>$ . Other assistants may write new thoughts while you are writing yours.", + "bbox": [ + 197, + 784, + 792, + 845 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "You will use these partial thoughts to decide how best to collaborate without doing the same work twice. You will periodically check what other assistants are doing and you should adjust your actions based on what they are doing so you collaborate efficiently with them.", + "bbox": [ + 197, + 847, + 792, + 896 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "If what you are currently doing is the same thing that another assistant has already done or is in process of doing, you will stop (e.g. Alice may say 'Wait, I was doing the same as Bob ...') and change to a different task right away, so as to avoid doing redundant work. \n# Solve the following problem \nAlice and Bob, you will now solve the next problem together. Keep track of who does what work and communicate to avoid doing the same work twice.", + "bbox": [ + 196, + 99, + 785, + 189 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "First, we provide a full prompt for collaborative reasoning involving two workers. This prompt is wrapped with standard chat template for each model. Then, all worker steps are generated in a single assistant turn. Additionally, we ensure that for reasoning models assistant turn begins with a token - applied automatically for QwQ-32B and manually for other reasoning models. For further implementation details, we refer to the source code.", + "bbox": [ + 169, + 207, + 823, + 275 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The second part of our prompting approach involves s1-like interventions [Muennighoff et al., 2025] in the generation process, where we ask whether a worker is performing redundant tasks (e.g., overlapping with another worker and can pivot to explore alternative ideas). We insert the prompt \"Quick check: am I doing redundant work? (yes/no): \" at the beginning of each new reasoning step every 1024 tokens generated. We refer to Appendix F for examples that demonstrate how these interventions affect the generation process.", + "bbox": [ + 169, + 282, + 823, + 366 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Next, an important part of our approach is defining the end of a reasoning step, which is needed to organize cache layout, as discussed in the paper. We define an end of step as the generation of a token containing the separator sequence (\\n\\n) that directly follows a token ending with an end-of-sentence marker (.,?, or!, etc). This termination condition is not met when: i) The separator appears within a generated code block (steps continue until the model completes the entire code block); ii) The preceding token ends with non-terminal punctuation (e.g., comma, colon, or semicolon)", + "bbox": [ + 169, + 372, + 823, + 455 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Finally, the last part of our prompting approach is the early finisher, which allows us to extract an answer from partial reasoning chains. If the model did not produce the final answer (\\`boxed{...}) in time, we take all generated outputs and insert a special prompt that makes the model generate an answer (or its \"best guess\"), similarly to how it is done in Pu et al. [2025].", + "bbox": [ + 169, + 462, + 823, + 518 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Prompt for early stopping", + "text_level": 1, + "bbox": [ + 197, + 530, + 370, + 544 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "$\\backslash \\mathsf{n}\\backslash \\mathsf{nWait}$ , given the limited time, I have to give an answer right now. Conside- ring all my previous attempts, I have to conclude that the final answer is boxed{", + "bbox": [ + 196, + 556, + 761, + 595 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "After this prompt, we allow the model to generate a fixed number of tokens: 16 for LIMO and AIME, 64 for OlympiadBench, and 1024 for LiveCodeBench.", + "bbox": [ + 169, + 613, + 823, + 641 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Note, however, that the LLM does not always produce the answer in time, especially with a tight budget. With QwQ-32B, we observe that the model almost always returns answers correctly if they are present, and if not, it guesses or refuses to answer (unknown, n/a or similar). When extracting answers from Hogwild! Inference, we let the final model view all generated tokens from each worker. This is equivalent to viewing the problem from the perspective of the last worker, e.g. Bob if there are two.", + "bbox": [ + 169, + 648, + 823, + 729 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D Detailed Experiment Configuration", + "text_level": 1, + "bbox": [ + 171, + 752, + 504, + 768 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D.1 Hogwild! Configuration", + "text_level": 1, + "bbox": [ + 171, + 784, + 383, + 799 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "For the main experiments, we use Hogwild! inference with two workers (Alice and Bob), a combined layout, and the prompting techniques described in Appendix C.", + "bbox": [ + 169, + 810, + 823, + 839 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D.2 Baselines Configuration", + "text_level": 1, + "bbox": [ + 171, + 857, + 382, + 871 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "To evaluate Skeleton-of-Thought (SoT) on our synthetic setup with grouped tasks from GSM8k, we adopt the original prompts from the paper with minor modifications. Specifically, we adjust", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "the prompts to ensure the model returns the answer to each subtask enclosed within \\boxed{} for structured parsing.", + "bbox": [ + 171, + 90, + 823, + 119 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Outline prompt for Skeleton-of-Thought", + "text_level": 1, + "bbox": [ + 197, + 130, + 465, + 147 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "You're an organizer responsible for only giving the skeleton (not the full content) for answering the question. Provide the skeleton in a list of points (numbered 1., 2., 3., etc.) to answer the question. Instead of writing a full sentence, each skeleton point should be very short with only 35 words. Generally, the skeleton should have 3 10 points.", + "bbox": [ + 196, + 157, + 779, + 223 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Question:", + "bbox": [ + 196, + 223, + 269, + 234 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "What are the typical types of Chinese dishes?", + "bbox": [ + 196, + 234, + 545, + 247 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Skeleton:", + "bbox": [ + 197, + 247, + 267, + 258 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Dumplings.", + "2. Noodles.", + "3. Dim Sum.", + "4. Hot Pot.", + "5. Wonton.", + "6. Ma Po Tofu.", + "7. Char Siu.", + "8. Fried Rice." + ], + "bbox": [ + 199, + 260, + 313, + 359 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Question:", + "bbox": [ + 199, + 361, + 267, + 372 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "What are some practical tips for individuals to reduce their carbon", + "emissions?", + "Skeleton:", + "1. Energy conservation.", + "2. Efficient transportation.", + "3. Home energy efficiency.", + "4. Reduce water consumption.", + "5. Sustainable diet.", + "6. Sustainable travel.", + "Now, please provide the skeleton for the following question.", + "{request}", + "Skeleton:", + "[ROLESWITCHING assistant:] 1." + ], + "bbox": [ + 197, + 373, + 715, + 536 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Point prompt for Skeleton-of-Thought", + "text_level": 1, + "bbox": [ + 197, + 556, + 450, + 573 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "You're responsible for continuing the writing of one and only one point in the overall answer to the following question.", + "bbox": [ + 196, + 584, + 769, + 608 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "{request}", + "bbox": [ + 196, + 609, + 267, + 622 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The skeleton of the answer is", + "bbox": [ + 196, + 623, + 423, + 633 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "{outline}", + "bbox": [ + 196, + 635, + 267, + 647 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Continue and only continue the writing of point {point}. Do not continue", + "bbox": [ + 196, + 648, + 761, + 660 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "with other points! Reason step-by-step and put your final answer within", + "bbox": [ + 196, + 661, + 754, + 672 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "\\boxed{} this is very important! [ROLESWITCHING assistant:] {point}.", + "bbox": [ + 196, + 672, + 705, + 686 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "{point_outline}", + "bbox": [ + 196, + 686, + 313, + 699 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "D.3 Datasets and Benchmarks", + "text_level": 1, + "bbox": [ + 171, + 727, + 397, + 739 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "This subsection provides links to all datasets and benchmarks referenced in this work, along with their respective licenses.", + "bbox": [ + 169, + 752, + 823, + 781 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- GSM8K", + "bbox": [ + 215, + 794, + 292, + 806 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "https://huggingface.co/datasets/openai/gsm8k", + "bbox": [ + 228, + 808, + 609, + 821 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "License: MIT", + "bbox": [ + 228, + 823, + 325, + 835 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "LIMO", + "bbox": [ + 217, + 854, + 279, + 868 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "https://huggingface.co/datasets/GAIR/LIMO", + "bbox": [ + 228, + 869, + 584, + 883 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "License: Apache 2.0", + "bbox": [ + 228, + 883, + 369, + 898 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- OlympiadBench https://huggingface.co/datasets/Hothan/OlympiadBench License: Apache 2.0", + "LiveCodeBench https://huggingface.co/datasets/livecodebench/code_generation lite License: cc", + "- AIME25 https://huggingface.co/datasets/math-ai/aime25 License: Apache 2.0" + ], + "bbox": [ + 215, + 90, + 797, + 255 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "D.4 Compute Resources", + "text_level": 1, + "bbox": [ + 171, + 287, + 354, + 303 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "As our approach is training-free, all computational resources were solely utilized for inference. The experiments were conducted primarily on NVIDIA A100 GPUs servers with NVSwitch, with DeepSeek-R1 experiments running in a distributed setup. The one exception to this is the inference time experiments in Section 4.4 that were run on NVIDIA L40S GPU.", + "bbox": [ + 169, + 314, + 823, + 369 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The runtime per individual experiment varies by model size, benchmark and the number of workers: baseline inference with Qwen3-4B runs on LIMO in 14 hours on a single server (112gpu-hours), whereas Qwen3-235B-A22 Hogwild! Inference ran on 40 servers for approximately 25 hours ( $\\approx$ 8K GPU hours). Overall, we estimate that the total GPU resources expended for this work, including early experiments that are not reported in this paper, amount to approximately $\\approx$ 25.3K GPU days. Note, however, that this is largely due to the fact that we used a non-optimized inference code for most of the experimentation: the non-optimized code was developed first and we ran most of the experiments in parallel with developing the optimized version. This also means that most of our experiments under-utilized the GPUs and ran at lower power (for the purpose of environmental impact). Over 2/3 of our compute was spent on large models (Qwen3-235B-A22B and DeepSeek-R1) that utilized gpu to less than $20\\%$ (as per volatile GPU utilization) due to the use of naive model parallelism and network bottlenecks. We anticipate that future experiments can be run at significantly betterutilization using the efficient implementation described in Appendix B and included in the supplementary code.", + "bbox": [ + 169, + 376, + 826, + 570 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "E Additional Experiments", + "text_level": 1, + "bbox": [ + 171, + 590, + 410, + 607 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "E.1 Ablation Analysis", + "text_level": 1, + "bbox": [ + 171, + 623, + 339, + 637 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In this section, we ablate the main components of our approach, including layouts and prompting. We use the same experimental configuration as in Sections 4.1 and 4.2 for LIMO.", + "bbox": [ + 169, + 648, + 823, + 676 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In Figure 10 (left), we compare the three Hogwild! cache layouts described in Appendix A. Namely, the Hogwild! (contiguous) corresponds to using the contiguous cache layout where all tokens generated by a given worker are kept together, without splitting into individual steps. In turn, Hogwild! (non-instant) corresponds to the interleaved cache layout where workers can only see each other's past reasoning steps, but not the latest unfinished paragraph. We also ablate the use of the collaboration prompt from Section 3.3 (\"Wait, am I doing redundant work?\").", + "bbox": [ + 169, + 683, + 823, + 767 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Finally, we test a version of Hogwild! Inference where we re-encode worker tokens instead of rotating them to a new position when moving between worker caches and the common \"chat history\" cache. This ablation is needed to test if our cache rotation from Section 3.1 and 3.4 is indeed an acceptable substitute for encoding tokens directly at each position (which would cause additional computational overhead). Note that, while token re-encoding is more \"fair\" from the perspective of position encodings, it also has a downside that it does not allow the re-encoded tokens to see some of the concurrently generated tokens from the other worker. For instance, suppose that Alice and Bob are writing steps concurrently and communicating with each other within these steps, e.g. using each other's results. Then, if we later re-encode these steps in some sequential order, then the tokens of the first worker will be encoded without access to the other worker's tokens (if it hasn't finished its", + "bbox": [ + 169, + 772, + 826, + 910 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "own step yet). If workers reused information from each other's steps, re-encoding this way can break some of the internal representations.", + "bbox": [ + 169, + 90, + 823, + 119 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Our results suggest that all three design choices contribute to the method performance: the contiguous layout performs nearly equally well for shorter budgets, but eventually falls behind as we consider longer reasoning traces. Likewise, the interleaved layout without instant synchronization performs poorly at smaller budgets, but catches up eventually: we attribute this to the fact that slower synchronization increases the difficulty of cross-worker coordination (this also aligns with our findings in Section 4.3). The use of collaboration prompts also improves the accuracy to budget trade-offs, although we hypothesize that it can be made redundant if the model is trained to collaborate better.", + "bbox": [ + 169, + 126, + 826, + 223 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In Figure 10 (right), we also compare different numbers of workers and test Hogwild! Inference with only a single worker for ablation. The results with a single worker generally perform similar to the baseline, with slightly worse accuracy for smaller budgets, which suggests that the improvements from Hogwild! Inference come from multiple workers and not as an indirect effect of our prompt. As for multiple workers, we find that using 3 and 4 workers further improves the accuracy to budget trade-offs. Curiously, as we switch to 6 workers, Hogwild! Inference performs better yet at smaller budgets, but eventually saturates at a somewhat worse accuracy.", + "bbox": [ + 169, + 229, + 823, + 325 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We hypothesize that the drop of accuracy is caused by the fact that QwQ-32B was trained on a limited sequence length and, since 6 workers generate tokens at a quicker rate, the model eventually runs out of the designed maximum sequence length and performs unstably (we did not use YaRN[Peng et al., 2023] for this evaluation). However, it is also possible to attribute this to fundamental property of LIMO tasks, model limitations, our zero-shot prompt not scaling well. We leave further exploration of scaling Hogwild! Inference to multiple workers to future work.", + "bbox": [ + 169, + 332, + 826, + 416 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "E.2 Detailed Model Evaluations", + "text_level": 1, + "bbox": [ + 171, + 431, + 410, + 446 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Due to space limitations, we had to arrange our results in Section 4.2 with multiple models per plot and had to omit some results. In this section, we report the missing evaluations on a per-model basis. In Figures 11, 12, 13, 14, 15, 16, 17, 18 we report results for QwQ, Phi-4-reasoning-plus and the Qwen3 model family. We also report limited evaluations for Llama 3.3 70B Instruct and DeepSeek-R1 in Figure 19. All evaluations are performed in the same setup as in Section 4.2.", + "bbox": [ + 169, + 458, + 823, + 527 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Overall, the results align with our findings summarized in Section 4.2. Zero-shot Hogwild! Inference seems to perform better with larger models, but can be unstable for smaller ones, especially 1.7B (See Figure 13). While it is tempting to conclude that larger and more capable models are better at collaborating, it does not immediately follow from our results and can be due to some other factor. Note also that, while we observe better results with larger models, smaller Qwen3-4B and 8B models already show some signs of collaborativeness, which should make it possible to reproduce and build on our results with consumer hardware. Additionally, we hypothesize that the poor performance of 1.7B models could potentially be alleviated with finetuning in collaborative inference setup (we discuss some finetuning details in Appendix B), but we leave this to future work.", + "bbox": [ + 169, + 534, + 826, + 657 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/0750e87acaf92a25e10b5215e73d545831549528469f5677d552cfdbc243b7ba.jpg", + "image_caption": [ + "Figure 10: Detailed comparison of various parallel inference setups with QwQ-32B on LIMO task set, in the same setup as in Section 4. (left) ablation analysis of simpler cache layouts and collaboration prompt (see Section 3.3, Appendix C). (right) Hogwild! Inference with 1, 2, 3, 4 and 6 workers." + ], + "image_footnote": [], + "bbox": [ + 179, + 685, + 488, + 852 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/cd5a9137eaa9ed2e13d5412d81fb8636cfab4d520ce8867d2e709303435b0785.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 686, + 821, + 852 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Curiously, we found that LiveCodeBench with Self-Consistency Chain-of-Thought inference [Wang et al., 2022] has significant gain in performance over the baseline. Upon closer examination, we found that the reason for this is that we always allow the model to generate a lot (up to 1024) of additional \"free\" tokens at the end of two generations, whereas for Hogwild! and Baseline we only generate these tokens if the model failed to produce any answer. If we allow Hogwild! to also generate the extra 1024 tokens all the time, its advantage also increases. However, we still report the weaker version of Hogwild! Inference and Baseline to better match our evaluation protocol on other tasks.", + "bbox": [ + 169, + 90, + 826, + 189 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/a315f11d3d59643d5387cded9470d575ca7f20a13d9e7735ba774ac67c0cdbc5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 204, + 488, + 375 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/385547686f027b92872df24af335e0c59e793ce862689089ce58ae98832e0824.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 203, + 821, + 375 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/d0f2ee09338de737c6a5456c25214178f3d7f3297d1a372caf56c8c8f863a93a.jpg", + "image_caption": [ + "Figure 11: Results for QwQ-32B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + ], + "image_footnote": [], + "bbox": [ + 178, + 380, + 488, + 551 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/8ab8a335a650e9496769486045930364b378c45bba5dd6b33a67a20be2c7c767.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 380, + 821, + 551 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/105b408dcae775ad576b1a9e55e0656d770d5bc021c74442a63554ae117801b1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 92, + 488, + 258 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/39e38b5f91b2580720877cf0a525f801ab529aa855d2f3d4c8f0e38148798cbf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 93, + 820, + 261 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/0a437c6252d524139e06f923a7d43f0f1afe81ffce50153f66609a0d9cf52add.jpg", + "image_caption": [ + "Figure 12: Results for Phi-4-reasoning-plus on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + ], + "image_footnote": [], + "bbox": [ + 178, + 268, + 488, + 436 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/b721ccc908c8da57e84a6f91f59c3ba54f2a1133372a1fa4ff1dc4010a7980ce.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 268, + 820, + 436 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/99e434b949d0cbcd9e763cd8a74a9aabc94127e4bb16528fc410ede6861a8804.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 492, + 488, + 664 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/1e282eb76370277aa31502f06b17e8deefdb231efbe3649cbd21156bb1baaf78.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 491, + 818, + 662 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/6a779138c58690d02c893b818651d0308190f4c66aaecc1e51400234f1b70318.jpg", + "image_caption": [ + "Figure 13: Results for Qwen3-1.7B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + ], + "image_footnote": [], + "bbox": [ + 178, + 670, + 488, + 839 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/dd60c86c54a1069c84d8f88fc117e8b92c39cad420269a63d1fe822d7e16aa01.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 508, + 670, + 820, + 839 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/87afce25f48da198586ae0a3f58c3eb5bdf6359f3e953d8886e12b86198e5e45.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 92, + 488, + 261 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/0ad70bb389f24f72d07ec923fe435e619b56716d82dd95d6e1f419c8e6ff3780.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 92, + 821, + 261 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/6746e3d409e9d9562ba2aaf9f282c5c5bae61320025f4f437ba5e9c28145ea37.jpg", + "image_caption": [ + "Figure 14: Results for Qwen3-4B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + ], + "image_footnote": [], + "bbox": [ + 178, + 268, + 488, + 436 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/fc79ddd12c709f66bb3221cb918ba15b69a77353de1d3b73d2233cdf4c707cf5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 268, + 821, + 436 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/84df23ce36c50bad4a89ba3ea9bcd7a44a43add14223fcf9bad1e4912fd3b8e0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 491, + 488, + 661 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/1113943814a1ba7449282785ff67db11c1dd9cd60a21c73314ec59a3bd9e6953.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 491, + 821, + 661 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/4330aea357174e75709608a7d2ed4c2628d24ee92e544d264b320f4aa9f643a3.jpg", + "image_caption": [ + "Figure 15: Results for Qwen3-8B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + ], + "image_footnote": [], + "bbox": [ + 178, + 667, + 488, + 837 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/7bef6240269f72ddae95f0a61674229330539fb1d8841cbb8850064c86ef0bde.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 508, + 667, + 821, + 837 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/fde24df3ee61d9f9607eeda1caf684606ee9eae1c08e1523c2d7c3c8a1853a17.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 92, + 488, + 258 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/2a379c139722d231fb1701ea9fdfccbb35d03c9b37afdceaceaac63f7bf7d640.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 93, + 818, + 261 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/024e9176e18d661c942fdbf117daeabe8efd8b200cad0285a7f49d1f68879b2c.jpg", + "image_caption": [ + "Figure 16: Results for Qwen3-14B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + ], + "image_footnote": [], + "bbox": [ + 178, + 268, + 488, + 436 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/8b61669f79eb12a756ed3fbf30bb6a99a471fca8098a8f4ae7e84ee1779300ba.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 268, + 818, + 436 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/cb9b80b20b8d089610263159f0b7e1fa85c41bb4f6784a05e8af081ac5a540b5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 492, + 488, + 661 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/3f44b10fecb9a2be030a785e03e76cc38ecce553a353594a4f96a006c4d88bd4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 492, + 818, + 660 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/b0b7f72f4defa7bc1d8736b28449f49575d7a4e8f3b18755242e8310ce609be6.jpg", + "image_caption": [ + "Figure 17: Results for Qwen3-32B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + ], + "image_footnote": [], + "bbox": [ + 178, + 667, + 488, + 837 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/cebcb85b2637b93486276dde1d8c5f47aadef4c16b22d77e01bfa66c24d053f1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 508, + 667, + 818, + 837 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/96a8bf3d11b9e1348eeac758c1f0046b569b33f38e30ee6be4d5da6b40136c19.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 178, + 92, + 488, + 261 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/562be250b1eaa70e93b7d721ccffcd4fa2a625c27474e54bfc8d105f0c692d86.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 92, + 821, + 261 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/b0fb21bc8d03a479f6b8d9299463da795b537d1f95adafb958aefe06db96457c.jpg", + "image_caption": [ + "Figure 18: Results for Qwen3-235B-A22B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and AIME 2025 (bottom-right)." + ], + "image_footnote": [], + "bbox": [ + 178, + 268, + 488, + 436 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/313d727f166b635555738933a1119f47472d355e7c99e6199c5e7c9098bbc19d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 268, + 821, + 436 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/c4ad99c5ac0bb20905ed13f3eeadaa110f7c34e161bde288d090379f7fe47220.jpg", + "image_caption": [ + "Figure 19: (left) Llama 3.3 70B Instruct on LIMO. (right) DeepSeek-R1 on AIME 2025." + ], + "image_footnote": [], + "bbox": [ + 178, + 500, + 488, + 670 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/72c4fd755084b9941f7025b448c7e727f178b52396d4d427f51f9dcdf6edc127.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 500, + 821, + 670 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "E.3 Extended thinking budgets", + "text_level": 1, + "bbox": [ + 171, + 729, + 403, + 744 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "We additionally evaluated Hogwild! Inference with extended thinking budgets to investigate whether the proposed method is robust for longer generations. To that end, we evaluated QwQ-32B under the Hogwild! Inference with up to 16k budget on the OlympiadBench, we report the results in Table 3 and Table 4.", + "bbox": [ + 169, + 757, + 826, + 813 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "E.4 Baselines Additional Details", + "text_level": 1, + "bbox": [ + 171, + 837, + 413, + 852 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "In this subsection, we provide an example of the outline created by the Skeleton-of-Thought for the task covered in Section4.1", + "bbox": [ + 169, + 864, + 823, + 893 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 27 + }, + { + "type": "table", + "img_path": "images/b6706799f0df1d0a77bc26bd71e833f2341b050a4b9159bf4de7f0093bdfd166.jpg", + "table_caption": [ + "Table 3: Performance comparison between Hogwild! and baseline generation on OlympiadBenchMath with extended thinking budgets for QwQ-32B." + ], + "table_footnote": [], + "table_body": "
Method\\Budget204840966144819210240122881443616384
Hogwild!52.060.8964.1566.5267.4170.8172.8975.26
Baseline40.8957.063.1165.3365.9369.7872.374.81
", + "bbox": [ + 202, + 126, + 794, + 183 + ], + "page_idx": 28 + }, + { + "type": "table", + "img_path": "images/d1f96afd9d33e5508f5678c3d6a6571f827194b230ae319f10f35d7af3906029.jpg", + "table_caption": [ + "Table 4: Performance comparison between Hogwild! and baseline generation on OlympiadBenchPhys with extended thinking budgets for QwQ-32B." + ], + "table_footnote": [], + "table_body": "
Method\\Budget204840966144819210240122881443616384
Hogwild!27.1233.2035.7338.0937.8138.6738.2539.03
Baseline22.8926.029.7531.4433.6834.1735.8836.12
", + "bbox": [ + 202, + 234, + 794, + 291 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Task example (GSM8k×4)", + "text_level": 1, + "bbox": [ + 197, + 318, + 377, + 333 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Solve these problems and return comma-separated answers boxed{answer1,..., answer4}:", + "bbox": [ + 197, + 345, + 624, + 369 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + " 1. Carmen has $100, Samantha has $25 more than Carmen, and Daisy has $50 more than Samantha. How much do all three girls have combined?", + "2. A cat eats nine sausages in 30 minutes. A dog can eat the same number of sausages in $\\frac{2}{3}$ the amount of time the cat takes. Calculate the average time the two take the eat the sausages.", + "3. Four children are playing together: Akbar, Alessandro, Helene, and Wilfred. Helene is twice as old as the average age of the group, and the total age of the children is 20. If Akbar is 3 years old and Alessandro is 4 years old, calculate the age of Wilfred.", + "4. Hannah needs to drink $100\\mathrm{ml}$ of water for every 200 calories she burns. She spends 2 hours doing aerobics, which burns 500 calories/hour, and 1 hour running, which burns 600 calories/hour. How many ml of water does she need to drink?" + ], + "bbox": [ + 197, + 371, + 792, + 534 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "The outline generated by Skeleton-of-Thought", + "text_level": 1, + "bbox": [ + 197, + 554, + 504, + 569 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Calculate total money.", + "2. Find cat's eating time.", + "3. Determine Wilfred's age.", + "4. Calculate water intake." + ], + "bbox": [ + 197, + 580, + 413, + 630 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "F Examples", + "text_level": 1, + "bbox": [ + 171, + 660, + 289, + 678 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "In this section, we present a reasoning chain, generated by Hogwild! Inference with an interleaved cache layout and token-wise synchronization. We denote our intervention in model generation in italics and highlight worker interactions with **bold** to enhance readability.", + "bbox": [ + 169, + 691, + 823, + 736 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "F.1 Example from LIMO Dataset (Task 511)", + "text_level": 1, + "bbox": [ + 171, + 750, + 495, + 766 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Problem: Compute $\\sqrt{(31)(30)(29)(28) + 1}$ .", + "bbox": [ + 169, + 777, + 470, + 794 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Alice [1]: Hi, I'm Alice. Here's how we can solve the problem step by step...", + "bbox": [ + 169, + 799, + 674, + 814 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Bob [1]: Hi, I'm Bob. Let me see... We need to compute the square root of $(31 \\cdot 30 \\cdot 29 \\cdot 28)$ plus 1. That's $\\sqrt{31 \\cdot 30 \\cdot 29 \\cdot 28 + 1}$ .", + "bbox": [ + 169, + 819, + 826, + 849 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Alice [2]: The expression is $\\sqrt{31 \\cdot 30 \\cdot 29 \\cdot 28 + 1}$ . Hmm, maybe there's a pattern here. I remember that sometimes products of consecutive numbers plus 1 have square roots that are integers. Like, for example, $n(n - 1)(n - 2)(n - 3) + 1$ might form a square. Wait, but here it's four consecutive numbers? Or maybe three?", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Bob [2]: Let me calculate the product first. $31 \\cdot 30$ is 930, then $29 \\cdot 28$ is 812. So $930 \\cdot 812$ . Let me multiply those. $930 \\cdot 800 = 744,000$ and $930 \\cdot 12 = 11,160$ . Total is $744,000 + 11,160 = 755,160$ . Then add 1: 755, 161. Now $\\sqrt{755,161}$ . Hmm, $\\sqrt{755161}$ . Let me see, 870 squared is 756, 900 which is higher. So maybe 869 squared?", + "bbox": [ + 169, + 90, + 823, + 148 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Alice [3]: Alternatively, maybe there's a formula for the product of four consecutive numbers plus one? Wait, actually, let me think of smaller numbers to test. Let's try $n = 4$ : $4 \\cdot 3 \\cdot 2 \\cdot 1 + 1 = 24 + 1 = 25$ , $\\sqrt{25} = 5$ . Which is 5, which is 4 + 1? Wait, $4 + 1 = 5$ , yes. Wait, maybe for $n$ , the sqrt is $n^2 + (n - 1)^2$ ? Wait $4^2 + 3^2 = 16 + 9 = 25$ , which is 5 squared. Hmm interesting.", + "bbox": [ + 169, + 154, + 823, + 212 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Bob [3]: Wait, 869 squared is $(870 - 1)^2 = 870^2 - 2 \\cdot 870 + 1 = 756900 - 1740 + 1 = 755161$ . Oh! That's exactly the number we had earlier. So $\\sqrt{755161}$ is 869. Therefore the answer is 869. But let me check Alice's idea too.", + "bbox": [ + 169, + 218, + 823, + 262 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Alice [4]: Let me test another number. Let's take $n = 5$ . Then $5 \\cdot 4 \\cdot 3 \\cdot 2 + 1 = 120 + 1 = 121$ . $\\sqrt{121} = 11$ . 11 is $5 + 6$ ? No, $5 + 6 = 11$ . Wait $5^2 + 4^2 = 25 + 16 = 41$ , which is not 121. Hmm. Alternatively, maybe $(n(n - 3) + 1)^2$ ? Let's see with $n = 4$ : $4 \\cdot 1 + 1 = 5$ , squared is 25. For $n = 5$ : $5 \\cdot 2 + 1 = 11$ , squared is 121. So that works! So the formula is $(n(n - 3) + 1)^2$ . Wait, let me check with $n = 31$ . Then $n = 31$ , so $31 \\cdot 28 + 1 = 868 + 1 = 869$ . Then 869 squared is the value. So that works. So the answer is 869. That matches Bob's calculation!", + "bbox": [ + 169, + 268, + 823, + 351 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Bob [4]: Wait, so my calculation shows that 869 squared is exactly the number under the square root. So the answer is 869. But let me confirm with Alice's pattern. Let's see with $n = 31$ . Wait, the original problem is $31 \\cdot 30 \\cdot 29 \\cdot 28$ . So maybe here $n = 31$ ? Then according to the formula, it would be $(31 \\cdot (31 - 3) + 1)^{2}$ ? Wait, $31 \\cdot 28$ is 868, plus 1 is 869. So yes, so sqrt of that product plus 1 is 869. So that's consistent with my calculation. Therefore the answer is 869.", + "bbox": [ + 169, + 358, + 823, + 428 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Bob [5]: Perfect, so both methods agree. The answer is 869.", + "bbox": [ + 169, + 434, + 581, + 449 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Bob [6]: Quick check: am I doing redundant work? (yes/no): no, since Alice is also confirming the pattern, but I think we can conclude.", + "bbox": [ + 169, + 455, + 823, + 483 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Alice [5]: Exactly, Bob! The formula is $(n(n - 3) + 1)^2$ when the product is $n(n - 1)(n - 2)(n - 3) + 1$ . Wait, given the limited time, I have to give an answer right now. Considering all my previous attempts, I have to conclude that the final answer is $\\boxed{869}$", + "bbox": [ + 169, + 488, + 823, + 542 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F.2 Examples with Collaboration Insertion", + "text_level": 1, + "bbox": [ + 169, + 563, + 483, + 578 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Here are some examples of why insertions in model generation may or may not help model collaborate better. We found a few patterns that emerge in model's reaction on such insertions. All the following examples are QwQ [Qwen Team, 2025] generations on LIMO [Ye et al., 2025] dataset.", + "bbox": [ + 169, + 590, + 823, + 633 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F.2.1 Insertions Help Model Pivot", + "text_level": 1, + "bbox": [ + 169, + 652, + 421, + 667 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Task 122", + "text_level": 1, + "bbox": [ + 169, + 679, + 238, + 691 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "> Alice: Quick check: am I doing redundant work? (yes/no): no. Bob, I see you're working on the same product. Let me think of another approach. Let me write the product as $\\prod_{k=1}^{n-1} \\frac{3k+5}{3k+2}$ . Let's consider writing out the first few terms to see if there's a pattern.", + "bbox": [ + 169, + 700, + 823, + 746 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Task 9", + "text_level": 1, + "bbox": [ + 171, + 751, + 220, + 763 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "> Bob: Quick check: am I doing redundant work? (yes/no): yes, I was considering similar things to Alice. Let me switch to a different angle. Let's consider the distances between the frogs.", + "bbox": [ + 169, + 772, + 823, + 801 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Task 40", + "text_level": 1, + "bbox": [ + 171, + 806, + 230, + 820 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "> Bob: Quick check: am I doing redundant work? (yes/no): Yes, Alice is computing $a_3$ , so I'll do something else. Let me try to find a pattern or transformation that could simplify the recurrence.", + "bbox": [ + 169, + 827, + 823, + 856 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Task 466", + "text_level": 1, + "bbox": [ + 171, + 862, + 238, + 875 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "> Bob: Quick check: am I doing redundant work? (yes/no): Yes, Alice is already calculating the x-coordinate of A. I'll do something else. Let me find the coordinates of L instead.", + "bbox": [ + 169, + 883, + 823, + 911 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F.2.2 Insertions When Workers Already Collaborate Well", + "text_level": 1, + "bbox": [ + 171, + 90, + 586, + 106 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Task 116", + "text_level": 1, + "bbox": [ + 171, + 114, + 238, + 127 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "> Bob: Quick check: am I doing redundant work? (yes/no): no. I'm finding coordinates of D, and Alice is finding coordinates of G.", + "bbox": [ + 169, + 135, + 823, + 162 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Task 513", + "text_level": 1, + "bbox": [ + 171, + 169, + 238, + 183 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "> Alice: Quick check: am I doing redundant work? (yes/no): no. Bob is also analyzing the inequality, but we're working on different cases. I'll continue with the area calculation for $x > \\frac{5}{8}$ .", + "bbox": [ + 169, + 190, + 825, + 220 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "F.2.3 Insertions Ignored by Workers", + "text_level": 1, + "bbox": [ + 171, + 233, + 439, + 248 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Task 65", + "text_level": 1, + "bbox": [ + 171, + 256, + 230, + 270 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "$>$ Bob: Quick check: am I doing redundant work? (yes/no): Let me proceed to set up the equation.", + "bbox": [ + 169, + 277, + 816, + 292 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Task 768", + "text_level": 1, + "bbox": [ + 171, + 297, + 238, + 311 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "> Alice: Quick check: am I doing redundant work? (yes/no): Let me continue.", + "bbox": [ + 169, + 319, + 686, + 333 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "G Additional Details for Analysis", + "text_level": 1, + "bbox": [ + 169, + 353, + 468, + 369 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In this section, we present a detailed analysis of collaboration, including its levels, prompts, and illustrative examples.", + "bbox": [ + 169, + 383, + 823, + 412 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "G.1 Collaboration Levels", + "text_level": 1, + "bbox": [ + 171, + 426, + 362, + 441 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Humans understand collaboration intuitively and have a hard time objectively measuring it. Thus, we construct text descriptions of levels of collaboration to differentiate various samples based on a few criteria we see fit. Those criteria are: interaction, reuse and advances of other's ideas, task-splitting, etc.", + "bbox": [ + 169, + 452, + 825, + 507 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Levels of collaboration", + "text_level": 1, + "bbox": [ + 197, + 518, + 352, + 532 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **No collaboration:**", + "- Participants may or may not acknowledge the existence of others in the conversation, using greetings, they do not show any signs of collaboration at all.", + "- Workers may exchange their totally independent thoughts without a functional or purposeful attempt to solve the problem collaboratively. Overall they work independently." + ], + "bbox": [ + 196, + 544, + 792, + 632 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2. **Initial Communication:**", + "- Workers exchange information, but do not yet integrate or build upon each other's ideas. They minimally acknowledge teammates. Do not engage with others' ideas or contributions. Works entirely independently, even if inefficient.", + "- Workers often repeat each other and do not reuse anything others provide for development of their own ideas." + ], + "bbox": [ + 197, + 643, + 795, + 732 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "3. **Paying attention:**", + "bbox": [ + 197, + 744, + 393, + 757 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Participants demonstrate active listening by paraphrasing or summarizing others' points, showing that they are paying attention and attempting to understand each other's perspectives.", + "- Workers occasionally (1-3 times each) reference other's ideas and may use them in their own speech.", + "- Collaboration is usually only rechecking and validating.", + "- Absence or minimal (only at the start) planning and work-splitting." + ], + "bbox": [ + 197, + 758, + 772, + 845 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "4. **Regular discussion:**", + "- Workers regularly (4 and more times each) talk to each other regarding the problem and reusing results. It could be validation, discussion or any other" + ], + "bbox": [ + 197, + 858, + 792, + 896 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "form of interaction.", + "bbox": [ + 197, + 99, + 352, + 111 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- It is key here that discussions and/or reuses of ideas are regular.", + "- Anywhere (except the start) there exists a task parallelism, planning or work-splitting beyond the scheme where one is solving, and the other is validating.", + "- Workers may frequently repeat each other ideas." + ], + "bbox": [ + 194, + 112, + 751, + 175 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "5. **Adaptive Problem-Solving:**", + "bbox": [ + 197, + 186, + 455, + 199 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Workers rarely duplicate work, repeating each other's ideas.", + "- No redundant discussions are present!", + "- Workers actively refine ideas in real-time with high responsiveness. Near-perfect division of labor is present. Workers can change plans and re coordinate their efforts based on results they acquired after some time discussing.", + "- The team engages in sustained collaboration over time, reflecting on their progress, learning from mistakes, and continuously improving their problem-solving approach, showing a commitment to ongoing growth and development. Workers does not stop collaborating. They continuously discuss results and adjust plans.", + "- While finding an error, it is important to discuss it to find the cause of it." + ], + "bbox": [ + 194, + 200, + 792, + 362 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "6. **Optimal collaboration:**", + "bbox": [ + 197, + 376, + 431, + 388 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Workers instantly understand each other and adjust themselves to suit current needs and work as one to optimally solve the task.", + "- This level should be very rare among all samples. Be careful to assign it.", + "- Assign it if it exceeds all your expectations." + ], + "bbox": [ + 194, + 388, + 785, + 439 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Importantly, these levels measure only the coordination between workers, not the models' inherent reasoning abilities. Though it is impossible to avoid ambiguity entirely, we tried to set clear boundaries between levels, such that humans can evaluate any generation.", + "bbox": [ + 169, + 459, + 823, + 502 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "G.2 LLM as a Judge Details", + "text_level": 1, + "bbox": [ + 171, + 521, + 383, + 536 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "To assess the degree of collaboration among different models under the Hogwild! Inference setting, we conduct a preliminary experiment based on the collaboration levels described earlier, using the LLM-as-a-judge paradigm [Zheng et al., 2023a]. We instruct GPT-4o [Hurst et al., 2024] to evaluate different solutions using the following prompt:", + "bbox": [ + 169, + 547, + 826, + 604 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Judge Prompt: Main prompt", + "text_level": 1, + "bbox": [ + 197, + 617, + 385, + 633 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "You are a professional judge. Your job is to evaluate collaborative performance of several workers.", + "bbox": [ + 197, + 643, + 725, + 667 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "You will be given their conversation where workers are trying to solve a problem together.", + "bbox": [ + 197, + 669, + 754, + 695 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Workers can see what others are typing IN REAL TIME! We divide their conversation into steps to improve readability.", + "bbox": [ + 196, + 705, + 723, + 732 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "So keep in mind that dispite looking like a conversation it may as well be to individual unrelated monologs.", + "bbox": [ + 196, + 733, + 792, + 757 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Or vice versa. Two blocks could be created with excellent collaboration.", + "bbox": [ + 197, + 758, + 751, + 768 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Here are descriptions of levels of collaboration you are to assign: {LEVELS}", + "bbox": [ + 196, + 782, + 712, + 806 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Suggestion:", + "bbox": [ + 197, + 821, + 282, + 833 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- assign particular level if all previous are also applicable", + "- bad examples with no communication will be scored 1", + "- carefully consider assigning level bigger than 1. some form of meaningful collaboration should be present", + "- examples where workers unsuccessfully try to communicate will be scored 2" + ], + "bbox": [ + 196, + 834, + 779, + 896 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Just working on the same problem and solving the same task without any interaction does not count as level 2 and should be scored level 1", + "- somewhat collaborative examples with poor communication skills will be scored 3", + "- good but not great examples with regular collaboration, but nothing fancy will be scored 4", + "- good examples with all the special stuff mentioned in level 5 will be scored 5", + "- reserve level 6 for the best of the best, the unique and extraordinary collaboration" + ], + "bbox": [ + 192, + 99, + 797, + 224 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "You don't need to solve the problem or finish worker's solution. Your task is to score them using provided collaborative levels.", + "bbox": [ + 196, + 237, + 779, + 263 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Put your final answer (one number - level of collaboration) in tag: \\boxed. For example: \\boxed1 for level 1.", + "bbox": [ + 196, + 263, + 784, + 287 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "It is not helpful if everyone gets a max score, so please be mindful of your judgments and use suggestions as a guideline.", + "bbox": [ + 196, + 289, + 785, + 314 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "While assigning level, this particular conversation should match criteria for all previous ones.", + "bbox": [ + 196, + 314, + 792, + 338 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Explain yourself: why you gave this score? Why not more? Why not less?", + "bbox": [ + 196, + 339, + 763, + 352 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Carefully think everything through. It may seem that they are collaborating when in reality they may just talking to themselves.", + "bbox": [ + 194, + 363, + 785, + 390 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Before using LLM-as-a-judge approach to evaluate a text, we preprocess the generations by combining all paragraphs from each worker into a contiguous layout (see Appendix A). This preprocessing step mitigates potential bias in the judge's evaluation toward responses with shorter or more fragmented outputs.", + "bbox": [ + 169, + 412, + 823, + 468 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "After providing the main evaluation prompt, we present the judge with the preprocessed sample for assessment. Additionally, we append the following reminder after inserting the sample to reinforce the judge's role and prevent them from directly solving the problem presented in the sample:", + "bbox": [ + 169, + 474, + 823, + 517 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Judge Prompt: Reminder", + "text_level": 1, + "bbox": [ + 196, + 532, + 366, + 547 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Remember that your task is to evaluate collaboration of workers using collaboration levels provided above. Do not try to solve problems provided to workers. Explain exactly why do you think this particular interaction deserves each particular level you are assigning.", + "bbox": [ + 194, + 560, + 777, + 611 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "For example, if you choose level 3, you need to provide reason why this sample can be level 1, 2 and 3.", + "bbox": [ + 196, + 611, + 746, + 635 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Put your final score in \\boxed{}", + "bbox": [ + 196, + 636, + 452, + 648 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Given the inherent subjectivity in evaluating collaborative quality, we mitigate ambiguity in judges' interpretations by aggregating scores from three independent evaluations. The final collaborative score for each sample is computed as the average of these three assessments, each conducted according to the predefined collaboration levels.", + "bbox": [ + 169, + 672, + 826, + 728 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "G.3 Collaborativeness Evaluation Examples", + "text_level": 1, + "bbox": [ + 169, + 751, + 493, + 766 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "In this subsection, we present illustrative examples (not model-generated) of Level 2 and Level 4 collaboration to clarify the interpretation of our results. Both samples solve the same task correctly but receive significantly different collaboration scores.", + "bbox": [ + 169, + 779, + 823, + 821 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "G.3.1 Level 2", + "text_level": 1, + "bbox": [ + 171, + 843, + 279, + 856 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The numbers at the vertices of the cube are: 7 zeros and one unit. In one move, you are allowed to add one unit to the numbers at the ends of any edge of the cube. Is it possible to make all the numbers equal?", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "> Alice: Okay, so the problem is about a cube with vertices labeled with numbers: seven zeros and one 1. We can perform moves where we add 1 to both ends of any edge. The question is whether we can make all numbers equal through such moves. Hmm, let me think through this step by step.", + "> Bob: First, let me visualize a cube. A cube has 8 vertices and 12 edges. Each vertex is connected to three edges. The initial setup is seven 0s and one 1. The move allows us to pick any edge and increment the two vertices it connects by 1 each. The goal is to make all vertices have the same number.", + "> Alice: That's an interesting idea, Bob. Each move affects two vertices, so each move increases the total sum by 2. The initial total is 1 (since only one vertex is 1). Let me think.", + "> Bob: Let me think. Hmm, maybe I can model this as a system of equations or think about invariants. Let me see.", + "$>$ Bob: If we want all numbers equal, say to some value $k$ , then the total sum would be $8k$ . Since each move adds 2 to the total, the total after $m$ moves is $1 + 2m$ . So, $8k = 1 + 2m$ . But $8k$ must be odd (since $1 + 2m$ is odd), but $8k$ is divisible by 8, so it's even. Contradiction? Wait, that can't be. Wait, $8k$ is even, but $1 + 2m$ is odd. So that's impossible. So, maybe it's impossible?", + "> Alice: Let me check that again. The initial total is 1. Each move adds 2 to the total. So the total after any number of moves is always odd. But if all 8 vertices are equal, then the total must be $8k$ , which is even (since 8 is even). Therefore, it's impossible. I don't see any mistakes here.", + "$>$ Alice: So, we both agree that it's impossible. Therefore, the answer is no.", + "> Bob: But let me think differently. Wait, but the parity argument seems solid. The total sum must be even to have all equal numbers, but it's always odd. So that's a solid proof. So the answer is no. Oh, Alice already said that.", + "> Alice: Final Answer No" + ], + "bbox": [ + 169, + 90, + 825, + 470 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "G.3.2 Level 4", + "text_level": 1, + "bbox": [ + 171, + 497, + 281, + 512 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "The numbers at the vertices of the cube are: 7 zeros and one unit. In one move, you are allowed to add one unit to the numbers at the ends of any edge of the cube. Is it possible to make all the numbers equal?", + "bbox": [ + 169, + 527, + 823, + 571 + ], + "page_idx": 33 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "> Bob: Hi Alice! I'll start by modeling the cube. A cube has 8 vertices and 12 edges, with each vertex connected to three edges. You're right about the initial setup: seven 0s and one 1. Let's think of even and odd properties regarding the total sum after each move.", + "> Alice: Great, Bob! Let's divide our approach. You can investigate invariant properties around the odd and even totals, while I consider step-by-step calculations to verify this.", + "$> \\text{Bob}$ : Alright, Alice. If we're aiming for all numbers equal to a certain value $k$ , the total sum should be $8k$ . Since each move adds 2, the sum after $m$ moves is $1 + 2m$ . So $8k = 1 + 2m$ . The point of contention is that $8k$ is even, but $1 + 2m$ is odd, meaning they can't equal each other.", + "> Alice: Every move changes the total by an even number. Starting from an odd total of 1, it remains odd after any number of moves. So reaching an even total like $8k$ is impossible. Do you think we should consider the configuration of edges in our argument further?", + "> Bob: That's a key observation! It affirms that the odd initial sum dictates the answer. To verify, let's revise if there's any move combination or sequence we overlooked, but our logic holds so far.", + "> Alice: Exactly, Bob. We've confirmed through multiple lenses — both algebraically and with parity arguments. We can assert the total remains odd. Given an even $8k$ , equality is unattainable.", + "> Bob: It all checks out. Our breakdown covers both sequence verification and invariance. Well done on the collaborative analysis!", + "> Alice: Fantastic teamwork! I'll document our shared conclusion: no configuration of moves will make all the numbers equal.", + "> Bob: Absolutely! Let's finalize our response: Final Answer No" + ], + "bbox": [ + 169, + 575, + 825, + 912 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "G.4 LLM as a Judge vs Human as a Judge", + "text_level": 1, + "bbox": [ + 171, + 90, + 483, + 107 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/e58060fbc8d7b6c3153babd8a6ea141006df62184cc2952ee69c5725ef99fff9.jpg", + "image_caption": [ + "Figure 20: Heatmap showing the joint distribution of human and LLM collaboration scores." + ], + "image_footnote": [], + "bbox": [ + 264, + 132, + 725, + 429 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "To assess whether the LLM-as-a-Judge based collaboration score is a reliable estimation of human judgment, we manually annotated 100 Hogwild! generations on the LIMO dataset in a token-sync setup. The resulting correlation between human and model scores was approximately $r \\approx 0.34$ , $p \\approx 0.0005$ . This moderate yet consistent association suggests that the metric captures a meaningful aspect of collaborative behavior. We report the differences in human scores vs llm scores in the Figure 20.", + "bbox": [ + 169, + 474, + 826, + 559 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 934, + 508, + 946 + ], + "page_idx": 34 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_model.json b/data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f88936388e5604e4f60927b464c583599422e61c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_model.json @@ -0,0 +1,6452 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.022, + 0.274, + 0.056, + 0.722 + ], + "angle": 270, + "content": "arXiv:2504.06261v4 [cs.LG] 17 Nov 2025" + }, + { + "type": "title", + "bbox": [ + 0.188, + 0.123, + 0.812, + 0.172 + ], + "angle": 0, + "content": "Hogwild! Inference: Parallel LLM Generation via Concurrent Attention" + }, + { + "type": "table", + "bbox": [ + 0.176, + 0.219, + 0.82, + 0.313 + ], + "angle": 0, + "content": "
Gleb Rodionov†* YandexRoman Garipov* HSE University YandexAlina Shutova* HSE University YandexGeorge Yakushev* HSE University YandexErik Schultheis* IST Austria
Vage Egiazarian IST AustriaAnton Sinitsin YandexDenis Kuznedev YandexDan Alistarh‡ IST Austria
" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.339, + 0.538, + 0.356 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.369, + 0.77, + 0.661 + ], + "angle": 0, + "content": "Large Language Models (LLMs) have demonstrated the ability to tackle increasingly complex tasks through advanced reasoning, long-form content generation, and tool use. Solving these tasks often involves long inference-time computations. In human problem solving, a common strategy to expedite work is collaboration: by dividing the problem into sub-tasks, exploring different strategies concurrently, etc. Recent research has shown that LLMs can also operate in parallel by implementing explicit cooperation frameworks, such as voting mechanisms or the explicit creation of independent sub-tasks that can be executed in parallel. However, each of these frameworks may not be suitable for all types of tasks, which can hinder their applicability. In this work, we propose a different design approach: we run LLM \"workers\" in parallel, allowing them to synchronize via a concurrently-updated attention cache and prompt these workers to decide how best to collaborate. Our approach allows the LLM instances to come up with their own collaboration strategy for the problem at hand, all the while \"seeing\" each other's memory in the concurrent KV cache. We implement this approach via Hogwild! Inference: a parallel LLM inference engine where multiple instances of the same LLM run in parallel with the same attention cache, with \"instant\" access to each other's memory.1 Hogwild! Inference takes advantage of Rotary Position Embeddings (RoPE) to avoid recomputation while improving parallel hardware utilization. We find that modern reasoning-capable LLMs can perform inference with shared Key-Value cache out of the box, without additional fine-tuning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.679, + 0.314, + 0.695 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.703, + 0.827, + 0.83 + ], + "angle": 0, + "content": "Many recent advancements of Large Language Models can be attributed to their ability to perform inference-time computations to improve performance [Suzgun et al., 2022, Snell et al., 2024, Beeching et al., Muennighoff et al., 2025]. This includes chain-of-thought (CoT) reasoning [Wei et al., 2022, Kojima et al., 2022, Zhang et al., 2022, Yao et al., 2023, Lightman et al., 2023], long-form generation [Bai et al., 2024] and interacting with external tools [Schick et al., 2023, Qin et al., 2023, Yao et al., 2022, Shen et al., 2023]. Popular LLM-based services have capabilities for reasoning and tool use [OpenAI et al., 2024, Google DeepMind, 2025, Anthropic, 2024]. At the same time, several reasoning-capable open-access LLMs have recently been released to the public [DeepSeek-AI et al., 2025, Qwen Team, 2025, Yang et al., 2024, Muennighoff et al., 2025, Ye et al., 2025]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.834, + 0.828, + 0.879 + ], + "angle": 0, + "content": "Using these models to solve complex problems often requires long sequential computations, that is, generating text token-by-token. However, many reasoning problems are not sequential. Leveraging this intuition, several recent works propose parallel inference strategies that allow multiple LLMs" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.886, + 0.692, + 0.9 + ], + "angle": 0, + "content": "1Our implementation is available at https://github.com/eqimp/hogwild_11m." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.9, + 0.802, + 0.914 + ], + "angle": 0, + "content": "†Corresponding author: rodionovgleb@yandex-team.ru. * Equal contribution. ‡ Senior author." + }, + { + "type": "footer", + "bbox": [ + 0.171, + 0.923, + 0.63, + 0.937 + ], + "angle": 0, + "content": "39th Conference on Neural Information Processing Systems (NeurIPS 2025)." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.223, + 0.041, + 0.782, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.273, + 0.825, + 0.304 + ], + "angle": 0, + "content": "Figure 1: An intuitive explanation of Hogwild! Inference, with 2 workers generating in parallel and 3 shared cache blocks. Each color denotes a cache block. See it in action (example generation)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.309, + 0.827, + 0.409 + ], + "angle": 0, + "content": "to solve a problem faster or more accurately via some form of collaboration [Wang et al., 2022, Ning et al., 2024]. In the simplest case, multiple LLMs can attempt the problem independently, then vote [Wang et al., 2022] or cross-reference their results [Du et al., 2023, Wang et al., 2024a] to improve correctness. A parallel line of work allows the LLM to divide the problem into multiple independent sub-tasks that are then solved in parallel and merged, producing the final solution [Ning et al., 2024, Kim et al., 2024, Jin et al., 2025]. These parallel inference strategies can improve quality and efficiency, taking advantage of parallelism in modern hardware." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.413, + 0.825, + 0.512 + ], + "angle": 0, + "content": "Unfortunately, no single collaboration strategy is universally effective. For instance, solving a problem in independent parallel \"threads\" can be inefficient when one of the threads requires a longer generation than the rest, resulting in most of the agents waiting for a straggler and wasting compute [Wang et al., 2022, 2024a]. In turn, inference with independent sub-tasks only works if the problem can immediately be split into these sub-tasks. Furthermore, if one of the agents discovers that the original plan is flawed, they will be unable to re-plan [Ning et al., 2024, Ding et al., 2025], potentially solving sub-tasks that are no longer necessary [Jin et al., 2025]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.516, + 0.827, + 0.587 + ], + "angle": 0, + "content": "This runs contrary to how humans collaborate. Instead of strict adherence to a fixed collaboration strategy, we often collaborate more dynamically, re-planning on the fly, abandoning some tasks half-way and switching to a more promising approach, discussing or debating strategy if the initial plan failed. While this type of collaboration is harder to define, it offers greater flexibility and can be more efficient if the participants are sufficiently cohesive [Hutchins, 1995, Entin and Serfaty, 1999]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.592, + 0.825, + 0.65 + ], + "angle": 0, + "content": "Our Approach. In this work, we try to apply the same principle to artificial reasoners. Since modern LLMs can already reason and plan [Zhou et al., 2024, Gao et al., 2024, Wang et al., 2024c], we hypothesize that they can benefit from dynamic interaction between different instances, during which they can develop their own collaboration strategy for the problem at hand." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.652, + 0.825, + 0.737 + ], + "angle": 0, + "content": "To test this hypothesis, we propose Hogwild! Inference — a parallel LLM inference protocol with no pre-defined framework for collaboration. Instead of choosing how LLMs should interact ahead of time, we allow them to generate tokens in parallel and \"see\" each other's progress (tokens) immediately as they are generated. We then prompt the LLM \"workers\" to decide their next course of action by themselves, given the latest actions from others: whether this means solving parallel sub-tasks, cross-verifying each other, discussing strategy, or pivoting to a new plan." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.739, + 0.825, + 0.824 + ], + "angle": 0, + "content": "To enable this type of on-the-fly collaboration, Hogwild! Inference runs multiple LLM instances with the same weights, but with a custom Key-Value cache that shares token representations between workers, allowing concurrent cross-attention. Specifically, instead of re-computing Key-Value representations for each worker, we keep track of individual worker KV memories and \"stitch them together\" in different orders, by adjusting their positional embeddings (see Figure 1). Moreover, we provide an efficient implementation of this inference approach." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.828, + 0.825, + 0.886 + ], + "angle": 0, + "content": "We test Hogwild! Inference with modern open-source LLMs and find that existing reasoning-capable models—such as QwQ [Qwen Team, 2025] and DeepSeek-R1 [DeepSeek-AI et al., 2025]—can already \"reason to coordinate\". More concretely, we observe that concurrent agents can formulate and follow plans, adapt when the initial plan has failed, point out each other's errors, and use each other's" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.894, + 0.825, + 0.923 + ], + "angle": 0, + "content": "\\(^{2}\\)Our approach inspired by Hogwild! SGD [Recht et al., 2011] that runs updates asynchronously and applies each update as soon as it is computed. The exclamation mark is part of the original name [Stanford HAI, 2023]." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.162 + ], + "angle": 0, + "content": "key observations. When prompted to check if they are doing redundant work – e.g., when one LLM instance is doing a sub-task that is already done by another, or solving a problem that is no longer relevant — they can often (but not always) detect redundancy and change strategy. In summary, our results suggest that parallel inference with a shared Key-Value cache may offer a promising approach to enable effective and efficient collaboration between multiple LLM instances." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.173, + 0.31, + 0.189 + ], + "angle": 0, + "content": "2 Background" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.825, + 0.237 + ], + "angle": 0, + "content": "Recent works propose a large number of frameworks for parallel reasoning and tool use that vary across several axes: how the parallel instances are organized together, what they exchange, and how often [Zhang et al., 2025]. In this section, we give a brief summary of these methods." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.243, + 0.827, + 0.342 + ], + "angle": 0, + "content": "Discussion & aggregation. The simplest way to parallelize chain-of-thought reasoning is Self-Consistency [Wang et al., 2022], where multiple LLM instances reason independently, then vote on the final answer. This approach was later extended in Du et al. [2023], replacing majority voting with text-based communication rounds. Subsequent works in this field combine multiple LLM types [Wang et al., 2024a] and scales to more agents Li et al. [2024a]. Another line of work introduces specialized \"roles\" such as the Debugger [Talebirad and Nadiri, 2023], Examiner [Cohen et al., 2023], Math Teacher [Kong et al., 2024], Judge [Chen et al., 2024], and others, to further augment reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.346, + 0.828, + 0.459 + ], + "angle": 0, + "content": "This type of role-based discussion was shown to greatly improve LLM reasoning factuality for certain tasks [Wang et al., 2022, Du et al., 2023], and can even enable multiple weaker LLM agents to collectively outperform state-of-the-art single-agent systems [Wang et al., 2024a]. However, this improvement is not unique to multiple agents and can be offset with better single-agent prompting [Wang et al., 2024b, Muennighoff et al., 2025]. Additionally, these approaches do not necessarily accelerate reasoning, because at least some of the agents have to solve the entire problem sequentially, and process (re-encode) each other's progress. This creates additional computational overhead, which presents challenges for both runtime and memory efficiency Wang et al. [2024a], Du et al. [2023]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.463, + 0.827, + 0.577 + ], + "angle": 0, + "content": "Parallelism for efficiency. A different line of work leverages multiple LLMs to solve tasks faster in parallel, such as Skeleton-of-Thought (SoT) [Ning et al., 2024]. SoT begins by running a single LLM to outline a plan for solving the problem with independent sub-tasks, then launches parallel LLM instances for each sub-task. For problems that involve function calling, these functions can also run in parallel [Kim et al., 2024, Gim et al., 2024]. Subsequent works propose more complex parallelism strategies such as dynamic parallel tree search [Ding et al., 2025] or a single agent spawning asynchronous sub-tasks that are done by background LLM \"threads\" [Jin et al., 2025, Liu et al., 2024b, Pan et al., 2025], achieved with specialized fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.581, + 0.828, + 0.777 + ], + "angle": 0, + "content": "These techniques are known to substantially accelerate inference for problems that fit their type of parallelism. However, we argue that this is also their main limitation: by imposing a specific parallelism strategy, these methods can harm reasoning for problems that do not fit their framework. For instance, when solving a complex reasoning problem, it is often the case that the initial plan turns out to be wrong or incomplete [Muennighoff et al., 2025, DeepSeek-AI et al., 2025], which conflicts with SoT-like methods [Ning et al., 2024, Yu, 2025] that follow a fixed plan-execute-aggregate schedule. Furthermore, some of the sub-tasks may turn out to be more complicated than originally intended and take up more work, which would cause methods like PASTA Jin et al. [2025] to wait for that single task, whereas a more sophisticated reasoner could adjust the plan to work better in parallel. Note that each individual issue can be amended with yet another, more complicated parallelism framework, but the sheer number of such cases makes us doubt whether this is the right approach. In this work, we instead let multiple LLM instances interact without a fixed framework, allowing them to see each other's partial generations to devise (and revise) task-specific collaboration strategy. We show that, perhaps surprisingly, existing reasoning LLMs already have the ability to leverage this." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.785, + 0.371, + 0.802 + ], + "angle": 0, + "content": "3 Hogwild! Inference" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.807, + 0.829, + 0.919 + ], + "angle": 0, + "content": "Our main intuition is that modern LLMs do not need a pre-defined framework for inference-time parallelism: they can organize by themselves. To test this hypothesis, we design a parallel inference protocol where multiple LLM instances can collaborate as flexibly as possible. Instead of assigning each \"worker\" to a specific role or sub-task, we run them together and prompt them to collaborate. This approach has two key problems: how to run multiple inference threads from the same Key-Value memory, and how to prompt LLM \"workers\" to collaborate over said memory. We outline how to perform LLM inference with a shared cache in Section 3.1, describe our cache structure in Section 3.2 and prompting strategy in Section 3.3. Finally, Section 3.4 describes the inference algorithm." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.579, + 0.106 + ], + "angle": 0, + "content": "3.1 Concurrent Attention with Shared Key-Value Cache" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.109, + 0.825, + 0.15 + ], + "angle": 0, + "content": "The core ingredient of Hogwild! Inference is a shared Key-Value memory (KV cache) accessible to all workers. The cache consists of several blocks that can be reused between workers, implementing a concurrent version of the attention mechanism [Bahdanau et al., 2015, Vaswani, 2017]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.157, + 0.827, + 0.24 + ], + "angle": 0, + "content": "Let us first consider a simple case with two workers and three cache blocks, as depicted in Figure 1. The first block contains the prompt, and the other two blocks contain the tokens generated by workers A and B respectively (denoted Alice and Bob in the Figure). As workers generate new tokens, they access each other's attention caches as though these were their own previously generated tokens. In Figure 1, \"Alice\" sees the common prompt, then \"Bob's\" token representations, then her own. In turn, Bob sees the same common prompt, then Alice's token KVs, and his own tokens after that.3" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.246, + 0.825, + 0.303 + ], + "angle": 0, + "content": "This creates a discrepancy where the same Key-Value pairs appear at different positions for each worker. Furthermore, the relative distance between the same pair of tokens (e.g., first generated tokens from Alice and Bob, respectively) changes as new tokens are added. While it is possible to re-encode these tokens at their new positions, it would cause overhead that scales cubically4." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.308, + 0.827, + 0.392 + ], + "angle": 0, + "content": "Instead of re-encoding the new tokens for other workers, we attempt to reuse existing token representations between workers. However, since these tokens appear at different positions for each worker and step, we need to adjust for their positional embeddings. Most modern LLMs use Rotary Position Embeddings (RoPE) [Su et al., 2021], where each key and query is rotated to an angle proportional to its absolute position. Prior works have shown that RoPE embeddings can be manipulated through scaling [Peng et al., 2023] slicing [Xiao et al., 2024], or pruning [Zhang et al., 2023]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.829, + 0.455 + ], + "angle": 0, + "content": "In Hogwild! Inference, we instead shift the KV values, multiplying the entire cache block by a cos / sin values that implement rotation by a constant offset. We use this to arrange the same cache entries in different order for each worker as in Figure 1 (right). This allows both workers to instantly \"see\" each other's tokens while they are generated — and even before they are processed by all layers." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.461, + 0.33, + 0.473 + ], + "angle": 0, + "content": "3.2 Cache Structure" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.477, + 0.825, + 0.533 + ], + "angle": 0, + "content": "Now that we defined a way to rearrange cache blocks on the fly, it is reasonable to ask how to arrange these blocks. For short tasks, simply concatenating worker outputs is sufficient. However, as we consider harder problems that require long chains of thought, workers will eventually pay less attention to each other because of the thousands of tokens between their latest steps5." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.539, + 0.827, + 0.637 + ], + "angle": 0, + "content": "To address this problem, we propose a more sophisticated cache arrangement inspired by group chat rooms. Namely, we split the generated text into reasoning \"steps\", roughly a paragraph in size. Whenever a given worker finishes a paragraph, (e.g. generates \\(\\backslash n\\backslash n\\)), we move its KV cache to the end of a shared chat-like history and let it generate the next paragraph at the end of that history. Note that workers still see each other's current (unfinished) paragraphs at the end of the shared history as they write them (see Figure 1). This way, workers always see each other's latest updates as recent tokens and can communicate more easily. For each worker \\(W_{i}\\), we organize cache blocks as follows:" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.64, + 0.827, + 0.668 + ], + "angle": 0, + "content": "- Common Cache: a large KV cache block that stores KV representations for the system prompt, task description, and a history of previous reasoning steps from each agent." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.668, + 0.825, + 0.696 + ], + "angle": 0, + "content": "- Other workers: multiple smaller cache blocks containing the latest (unfinished) steps of all other workers \\( W_{j \\neq i} \\) in ascending order. For instance, if there are 4 workers, \\( W_{2} \\) will see \\( W_{1} \\oplus W_{3} \\oplus W_{4} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.696, + 0.825, + 0.785 + ], + "angle": 0, + "content": "- Current worker: the latest (unfinished) reasoning step of the current worker \\( W_{i} \\) to be continued. Each block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\n" + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.64, + 0.827, + 0.785 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.789, + 0.49, + 0.804 + ], + "angle": 0, + "content": "3.3 Prompting for Zero-Shot Collaboration" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.808, + 0.825, + 0.837 + ], + "angle": 0, + "content": "The shared key-value cache inference we described above allows modern LLMs to access each other's tokens and reason collaboratively. However, even though modern LLMs can reason about" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.845, + 0.797, + 0.859 + ], + "angle": 0, + "content": "3For clarity of exposition, we choose to anthropomorphize the pronouns for these two LLM instances." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.859, + 0.826, + 0.884 + ], + "angle": 0, + "content": "4If \\(n\\) agents generate one new token each, which is then re-encoded differently for each of these \\(n\\) agents, that each have to attend to \\(O(n)\\) additional tokens, then the total step complexity is \\(O(n^{3})\\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.885, + 0.825, + 0.911 + ], + "angle": 0, + "content": "In other words, if we put all outputs of worker A ahead of worker B, then the more tokens are generated, the farther worker B needs to \"look\" to reach worker A's latest outputs. This could be mitigated with finetuning." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.845, + 0.826, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.149 + ], + "angle": 0, + "content": "how to collaborate, there is no guarantee that they will actually do so unprompted. As with any desired LLM behavior, it can be achieved in two ways: either by training the model to generate tokens collaboratively or by prompting it in-context. In this work, we focus on the latter approach to make Hogwild! Inference easier to generalize for new models. Our prompting consists of two parts:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.153, + 0.826, + 0.181 + ], + "angle": 0, + "content": "1. System prompt describes the \"rules\" of the shared cache and suggests that workers collaborate. This prompt goes at the beginning of either the system or user message (if not unsupported);" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.181, + 0.827, + 0.224 + ], + "angle": 0, + "content": "2. Inserting s1-like collaboration prompts: every thousand generated tokens, we prompt a random worker with \"Wait, am I doing redundant work? (yes/no):\" at the beginning of their next paragraph. This strategy is meant to promote collaboration and is inspired by Muennighoff et al. [2025]." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.153, + 0.827, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.228, + 0.827, + 0.299 + ], + "angle": 0, + "content": "The latter s1-like prompts present a curious case. We found that LLMs fine-tuned on reasoning can often become too \"focused\" on what it is generating currently and fail to notice that another instance has found a mistake or solved their problem earlier. However, when asked directly, they can spot redundancy and change their approach. Overall, we found that when prompted this way, LLMs often (but not always) detect redundancies in their actions and can determine the optimal course of action." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.304, + 0.34, + 0.317 + ], + "angle": 0, + "content": "3.4 Inference Matters" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.322, + 0.825, + 0.407 + ], + "angle": 0, + "content": "When generating new tokens with Hogwild! Inference, we perform a forward pass on all workers in parallel, as though they were in the same batch. Instead of each sample having its own attention cache, we allow batch elements to attend to each other's KV caches at different positions. When processing newly generated tokens, we \"insert\" their KV representations at the end of their respective cache blocks, then arrange these cache blocks for each worker. This way both workers can immediately attend to each other's current tokens even before they are fully processed by all layers." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.412, + 0.826, + 0.497 + ], + "angle": 0, + "content": "This leads to the following problem: since workers combine cache blocks in different order (see Figure 1), we would need to rotate the cached KVs multiple times, one for each worker. Done naively, this would require rotating all past token representations at every step, which is inefficient for long contexts. Fortunately, this problem can be circumvented using a property of rotation: if both query and key are rotated by the same angle, the dot product between them will not change. Instead of rotating all previous keys, we can rotate current token queries to an equivalent angle (Figure 2)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.502, + 0.825, + 0.575 + ], + "angle": 0, + "content": "Suppose that a given attention layer needs to compute attention between the current token query \\( q \\) at position \\( i_q \\) (denoted \\( \\rho(q, i_q) \\)) and a block of keys rotated to the starting position \\( i_k \\). Instead of rotating keys, we can rotate the query to position \\( i_q - i_k \\) and keep the KV cache as is. If there are multiple KV blocks A, B, C (Alice, Bob, Common) that need to be rotated to positions \\( i_k^A, i_k^B, i_k^C \\) respectively, we rotate the query \\( q \\) multiple times for each block. Formally, we can rewrite the attention dot-product:" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.581, + 0.809, + 0.607 + ], + "angle": 0, + "content": "\\[\n\\rho (q, i _ {q}) \\Big [ \\rho (A, i _ {k} ^ {A}) \\oplus \\rho (B, i _ {k} ^ {B}) \\oplus \\rho (C, i _ {k} ^ {C}) \\Big ] = \\rho (q, i _ {q} - i _ {k} ^ {A}) A \\oplus \\rho (q, i _ {q} - i _ {k} ^ {B}) B \\oplus \\rho (q, i _ {q} - i _ {k} ^ {C}) C,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.611, + 0.827, + 0.709 + ], + "angle": 0, + "content": "where \\(\\oplus\\) denotes concatenation. The r.h.s. formula only rotates the current step query, i.e. a single token per worker, as opposed to the past KV blocks that can contain thousands or millions of tokens. We use this property to design an efficient implementation of our method based on Flash-Decoding [Dao et al., 2023]. We gather each KV cache block in a contiguous memory buffer and compute attention similarly to Paged Attention [Kwon et al., 2023], where one page would correspond to one cache block and the corresponding query rotations from all workers. This way, we need only one copy of each cache block and do not need to re-rotate its entries (see Appendix B)." + }, + { + "type": "image", + "bbox": [ + 0.24, + 0.72, + 0.482, + 0.878 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.72, + 0.752, + 0.884 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.885, + 0.827, + 0.928 + ], + "angle": 0, + "content": "Figure 2: Intuitive scheme of Hogwild! Inference with query rotation. Colors represent cache blocks. Instead of rotating all cache blocks to align with Alice's and Bob's views, we keep them fixed at the zero position and only rotate the current token queries to equivalent angles." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.184, + 0.04, + 0.371, + 0.157 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.04, + 0.585, + 0.157 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.04, + 0.807, + 0.157 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.158, + 0.828, + 0.2 + ], + "angle": 0, + "content": "Figure 3: (left) Evaluation results for QwQ-32B on synthetic tasks with 5 GSM8k questions in each. (middle) Evaluation of Hogwild! Inference and baselines with QwQ-32B on LIMO. (right) Hogwild! Inference with varying number of workers with QwQ-32B on LIMO." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.206, + 0.314, + 0.223 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.231, + 0.461, + 0.246 + ], + "angle": 0, + "content": "4.1 Detailed Evaluation with QwQ-32B" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.25, + 0.827, + 0.307 + ], + "angle": 0, + "content": "In this section, we conduct an initial evaluation of Hogwild! Inference to test its ability to collaborate in our zero-shot setting. All evaluations in this section are done with the QwQ-32B [Qwen Team, 2025] model. We consider two tasks: one with obviously independent tasks that can be done in parallel and another with a more complicated collaboration pattern." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.312, + 0.825, + 0.424 + ], + "angle": 0, + "content": "In both setups, we allow the model to generate reasoning up to a certain budget of sequential forward passes and evaluate its accuracy. If the model did not produce the final answer (\\\\boxed{...}) in time, we take all generated outputs and insert a special prompt6 that makes the model generate an answer (or its \"best guess\"), similarly to how it is done in Pu et al. [2025]. If there are multiple workers / threads, we feed outputs from all workers (concatenated) into the model and prompt it to generate the final answer immediately (\\(\\leq 16\\) tokens, stop early if generated answer). We apply this technique to all methods except \"Baseline (no early stopping)\" and do not count these extra tokens towards the total budget (x axis) since they have an equal effect on all methods." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.429, + 0.658, + 0.445 + ], + "angle": 0, + "content": "We evaluate the following generation algorithms (details in Appendix D):" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.449, + 0.825, + 0.478 + ], + "angle": 0, + "content": "- Hogwild! Inference: Our main algorithm, as described in Section 3. We evaluate with 2, 3 and 4 parallel \"workers\" and provide additional configuration details in Appendix D.1." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.478, + 0.825, + 0.507 + ], + "angle": 0, + "content": "- Baseline (no early stopping): standard sequential generation with a single LLM instance. This is the only evaluation where we do not insert the early stopping prompt described above." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.508, + 0.819, + 0.522 + ], + "angle": 0, + "content": "- Baseline: an improved sequential generation with the early stopping technique described above." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.522, + 0.826, + 0.621 + ], + "angle": 0, + "content": "- Skeleton-of-Thought (SoT) [Ning et al., 2024]: a parallel reasoning algorithm in which the LLM first generates a short \"outline\" containing several independent tasks, then runs these tasks in parallel and combines the results. We run with both an unlimited number of parallel threads (original setup) and with 2 \"workers\" that append tokens to each thread in a round-robin fashion. For more complicated reasoning tasks, we found that Skeleton-of-Thought cannot solve the problem by itself; to mitigate this, we allow the main model to encode all generated threads and continue reasoning (with early stopping). We discuss Skeleton-of-Thought in more detail in Appendix D.2." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.621, + 0.825, + 0.691 + ], + "angle": 0, + "content": "- Self-consistency [Wang et al., 2022]: a parallel reasoning algorithm where LLM instances write solutions independently, then vote on the answer. Instead of majority voting, we allow the LLM to view both solutions (concatenated) before generating the final answer with our early-stopping prompt, which outperforms voting in our setup and works even for 2 workers. Note that this method cannot split sub-tasks between workers and is instead meant to increase quality through voting." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.449, + 0.826, + 0.691 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.704, + 0.825, + 0.79 + ], + "angle": 0, + "content": "Sanity Checks with GSM8k×5: Before we try our approach on more challenging tasks, we test if Hogwild! Inference is capable of basic collaboration. For this purpose, we construct a toy problem set with 128 samples, each containing 5 non-overlapping questions from the GSM8k test set [Cobbe et al., 2021]. The LLM is prompted to solve each problem and return comma-separated values7. We report the average per-question accuracy, i.e. if the model solves 4 out of 5 questions in a given sample correctly, it will get a score of 0.8 for that sample." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.794, + 0.827, + 0.866 + ], + "angle": 0, + "content": "We summarize our results in Figure 3 (left): the parallel workers under the Hogwild! Inference can indeed collaborate, i.e. our KV cache manipulations do not break down model's reasoning capabilities. As intuition suggests, Skeleton-of-Thought can also speed up this synthetic task by answering each question in parallel. We provide an example of the outline created by the Skeleton-of-Thought in Appendix E.4. Notably, the self-consistency algorithm also shows some improvement over the" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.875, + 0.818, + 0.902 + ], + "angle": 0, + "content": "\"\\n\\nWait, given the limited time, I have to give an answer right now. Considering all my previous attempts, I have to conclude that the final answer is boxed{''" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.902, + 0.799, + 0.927 + ], + "angle": 0, + "content": "7\"Solve these problems and return comma-separated answers \\boxed{answer1, ..., answer5} : \\n 1. \\{task1\\} \\n 2. \\{task2\\} \\n 3. \\{task3\\} \\n 4. \\{task4\\} \\n 5. \\{task5\\}\"" + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.875, + 0.818, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.191, + 0.067, + 0.498, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.067, + 0.811, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.235, + 0.825, + 0.264 + ], + "angle": 0, + "content": "Figure 4: Evaluation of Hogwild! Inference on LIMO for QwQ-32B, Phi-4-Reasoning-Plus (14B) and Qwen3-8B (left) and different Qwen3 models (right). Dashed lines denote baselines (1 agent)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.825, + 0.302 + ], + "angle": 0, + "content": "baseline, which we attribute to the fact that it gives the model two \"shots\" at a problem, and if one of them happens to be faster, the algorithm will on average surpass the baseline." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.308, + 0.825, + 0.379 + ], + "angle": 0, + "content": "LIMO tasks. Next, we evaluate Hogwild! Inference in a more challenging setup where there is no clear pattern of collaboration. We adopt the dataset of 817 problems from Ye et al. [2025]. The dataset contains mathematical problems that take modern LLMs thousands of tokens to solve reliably. Unlike our synthetic tasks, the problems in that dataset often do not have an obvious way to agree on a collaboration strategy ahead of time, but it can emerge (and change) during reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.384, + 0.825, + 0.481 + ], + "angle": 0, + "content": "We summarize our results in Figure 3 (middle, right). Overall, Hogwild! Inference can converge to a correct solution faster, achieving greater accuracy for the same number of consecutive steps. Furthermore, it produces greater speed-ups as we increase the number of parallel workers (though there is a limit, as we show in Appendix E.1). Similarly to our previous setup, self-consistency decoding provides some improvement over the single-worker baseline, but does not outperform Hogwild! Inference. As expected, Skeleton-of-Thought could not split the problem neatly into independent tasks, but still achieves some improvement on small budgets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.487, + 0.825, + 0.571 + ], + "angle": 0, + "content": "We then evaluate different LLM families and sizes on LIMO dataset in Figure 4. We found that our approach generalizes to most of the models tested, with a notable exception. For Qwen3 model family, we observe that the smaller models, 1.7B and, to a lesser extent, 4B fail to adapt to the task and get distracted from the task. In Appendix E.1, we also report additional evaluations in this setup: ablation of the cache rotation from 3.1 and our chat-like cache structure from Section 3.2. We provide examples of collaborative generations for this setup in Appendix F." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.578, + 0.465, + 0.592 + ], + "angle": 0, + "content": "4.2 Additional Benchmarks and Models" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.595, + 0.825, + 0.678 + ], + "angle": 0, + "content": "Next, we test whether our approach can be generalized to other mathematical reasoning and programming tasks. For this evaluation, we also chose benchmarks that do not have obvious collaboration patterns but can nonetheless be solved faster by two human \"agents\". We evaluate on three such benchmarks: LiveCodeBench, OlympiadBench and AIME'25. In addition to QwQ-32B, we also report Qwen3 [Yang et al., 2025] and Phi-4 Reasoning Plus [Abdin et al., 2025]. For AIME'25, we focus on larger models and additionally include DeepSeek-R1 [DeepSeek-AI et al., 2025]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.684, + 0.825, + 0.809 + ], + "angle": 0, + "content": "LiveCodeBench [Jain et al., 2024]. We evaluate on the code_generation lite version release_v5. Our evaluation closely follows the setup from Qwen Team [2025]: we take the same 279 problems dated between 2024.08 and 2025.02 and filtered so as to avoid ones present in the QwQ dataset. Note, however, that some of the other LLMs in our setup do not report which samples, if any, did they train on. However, since we use the same model weights for the baseline and Hogwild! Inference, we can still compare the two strategies. We run the standard test suite and report Pass@1 averaged over 8 random seeds. For early stopping, we allow the method (and baseline) to generate a single final code block with up to 1024 tokens, using a similar early-stopping prompt as in Section 4.1 (see Appendix C). For Hogwild! Inference, we use the same system prompts as before." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.825, + 0.913 + ], + "angle": 0, + "content": "OlympiadBench [He et al., 2024]. Next, we evaluate on a different reasoning benchmark that contains Olympiad-level problems on Math and Physics. We run evaluations on the two text-only english-language parts: OE_TO maths_en_COMP (675 problems) and OE_TO_physics_en_COMP (236 problems). Unlike in Section 3, the answers to these problems are not individual numbers but LaTeX formulae that allow multiple equivalent formulations of the correct answer. We use the official evaluation codebase and adapt the built-in DeepSeek-R1 prompts for use with our model set (see details in Appendix D). For early stopping, we use the same prompt as before with 64 token limit." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.191, + 0.054, + 0.495, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.054, + 0.811, + 0.22 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.223, + 0.825, + 0.251 + ], + "angle": 0, + "content": "Figure 5: Evaluation of Hogwild! Inference with 2 workers on OlympiadBench Math (left) & Physics (right) for QwQ-32B, Qwen3-14B and Qwen3-8B models, dashed lines are the baselines." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.256, + 0.825, + 0.34 + ], + "angle": 0, + "content": "Large Models on AIME [2025]. Finally, we evaluate how Hogwild! Inference scales to larger models on a popular AIME'25 benchmark, using both I and II subsets. For this task, we focus on two models: Qwen3-235B-A22B Yang et al. [2025] and DeepSeek-R1 [DeepSeek-AI et al., 2025]. Since the AIME benchmark only contains 30 problems (15 per subset), we evaluate each model with 10 random seeds and average results. We otherwise use the same evaluation protocol as for LIMO, with the same early stopping and at most 16 tokens per answer during early stopping." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.344, + 0.827, + 0.483 + ], + "angle": 0, + "content": "We arrange our results in Figure 5 for OlympiadBench and Figure 6 for LiveCodeBench and AIME'25. Overall, Hogwild! Inference shows similar improvements to what we observed earlier (Section 4.1). One atypical case is OlympiadBench Physics (Fig. 5 right) where Qwen3-14B stops improving after roughly 4096 tokens. Upon closer inspection, we found that the model does not break down, but overthinks the problem, improving some answers while replacing other correct answers with mistakes. Overall, the results show that the cache rotation tricks and the output structure from 3.2 can indeed be generalized across different models and benchmarks. Note, however, that due to the different output format we needed to apply slight alterations to individual model prompts: notably, QwQ-32B automatically inserts at the end of the prompt, while Qwen3 and Phi-4 do not, so we insert it manually before the common history header. We describe this in detail in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.489, + 0.471, + 0.504 + ], + "angle": 0, + "content": "4.3 Measuring the Ability to Collaborate" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.506, + 0.827, + 0.672 + ], + "angle": 0, + "content": "Now that we know that modern LLMs can collaborate in our zero-shot setting, it is natural to ask how well can they collaborate and what affects their ability. While this question deserves a more thorough investigation, we can still quantify how well LLMs collaborate under Hogwild! Inference. In this section, we analyze their \"collaborativeness\" using the LLM-as-a-Judge paradigm [Zheng et al., 2023a]: we feed collaborative traces into a GPT-4o [Hurst et al., 2024] model and prompt it to score behavior from 1 to 6, where \"1\" means no collaboration, \"3\" indicates basic task splitting and \"6\" represents a hypothetical optimal collaboration, never achieved in our analysis. We analyze LLM generations on LIMO dataset with on three models from Section 4.2. To control for differences in generation lengths we compare only 4096-token prefixes from each worker. We compare three inference setups: i) independent generations as per self-consistency decoding; ii) restricted Hogwild! Inference where agents can only view each other's finished paragraphs, but not the current (incomplete) reasoning step, and iii) full Hogwild! Inference, with 2 agents in each setup." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.676, + 0.827, + 0.734 + ], + "angle": 0, + "content": "We summarize our scores in Figure 7: as expected, models that can see each other can collaborate and independent workers cannot. Interestingly, Hogwild! Inference with instant (token-wise) synchronization scores significantly higher than a version that can only see completed inference steps. In Appendix G we provide more detailed results, judge prompt, configurations and examples." + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.735, + 0.495, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.736, + 0.811, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.899, + 0.825, + 0.927 + ], + "angle": 0, + "content": "Figure 6: Evaluation of Hogwild! Inference (2 workers) on LiveCodeBench v5 2024.08-2025.02 for QwQ, Phi-4-R+ and Qwen3 (left) and AIME'25 for larger models (right), dashed lines are baselines." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.504, + 0.947 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.175, + 0.054, + 0.488, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.219, + 0.49, + 0.289 + ], + "angle": 0, + "content": "Figure 7: Mean collaborativeness score from GPT-4o. No sync is independent generation, Step-wise is restricted Hogwild! where worker can only see each-other's past steps, Token-wise is full Hogwild! with instant cache exchange." + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.054, + 0.827, + 0.097 + ], + "angle": 0, + "content": "Table 1: Inference benchmarks for Section 4.4. Columns denote sequencelength. Rows withone workerare baselines,2&4workers use Hogwild!" + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.097, + 0.824, + 0.287 + ], + "angle": 0, + "content": "
# Workers102420484096819216384
Tokens per second
120.120.019.719.318.3
236.336.236.136.134.3
468.969.069.166.360.3
Latency per forward (ms)
149.750.050.951.754.5
255.155.355.455.358.3
458.158.057.960.466.4
Time to generate # tokens (s)
152.3103.3206.5416.7853.5
229.958.1114.6228.0454.4
416.731.661.3120.7239.2
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.298, + 0.281, + 0.311 + ], + "angle": 0, + "content": "4.4 Inference" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.315, + 0.827, + 0.441 + ], + "angle": 0, + "content": "To recall, our main motivation for proposing Hogwild! Inference is to enable faster reasoning through collaboration. Since the actual inference speed depends on many factors (GPU(s), software, precision, etc), we previously focused on evaluating inference speed in terms of the number of consecutive forward passes and not inference time. Here, in turn, we report the actual inference speed in terms of latency and tokens per second. We evaluate three setups: baseline sequential inference and Hogwild! Inference for two and four workers. We run baseline with FlashAttention v2 (FlashDecoding) and our algorithm with custom GPU kernels using the approach described in Section 3.4. We use a NVIDIA L40S GPU and AMD EPYC 9534 and benchmark the official quantized version of QwQ-32B-AWQ for all setups." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.446, + 0.828, + 0.545 + ], + "angle": 0, + "content": "Our results in Table 1 show that, for the 32B model, Hogwild! Inference can generate tokens nearly twice as fast for 2 workers and about \\(3.2 - 3.6 \\times\\) faster for 4 workers, which means that the accuracy gains from earlier sections can translate to faster solutions. We also report the average over GPUs, as well the \\(10\\%\\) and \\(90\\%\\) percentiles, in Figure 8 (left). Overall, Hogwild! Inference has a small constant latency offset compared to the baseline and near-linear scaling as we increase the number of workers. While our implementation already shows significant performance gains, we discuss several ways to scale it further in Appendix B, including in distributed setting." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.561, + 0.49, + 0.759 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.56, + 0.822, + 0.758 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.77, + 0.825, + 0.827 + ], + "angle": 0, + "content": "Figure 8: (left) Duration of a single forward pass (generating \\( W \\) new tokens) for Qwen/QwQ-32B-AWQ on L40S, given the total number of tokens already in the KV cache. The dotted lines indicate the \\( 10\\% \\) and \\( 90\\% \\) quantiles over multiple repetitions on different GPUs. (right) Accuracy versus average generation time on the LIMO dataset task using QwQ-32B-AWQ under different token budgets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.826, + 0.913 + ], + "angle": 0, + "content": "As the figure shows, there is some overhead associated with preparing multiple caches (i.e., even at an empty cache, Hogwild! is slightly slower than pure FlashAttention). A more detailed breakdown is presented in Table 2, which shows the duration of the attention kernel (or attention+rope for Hogwild!), as well as the total setup time, that is, the time spent preparing the data structures needed for Hogwild! The latter needs to be done only once per forward pass, instead of once per transformer" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.212, + 0.098, + 0.784, + 0.112 + ], + "angle": 0, + "content": "Table 2: Breakdown of Hogwild! overhead compared to pure FlashAttention inference." + }, + { + "type": "table", + "bbox": [ + 0.284, + 0.113, + 0.714, + 0.212 + ], + "angle": 0, + "content": "
KV LengthAttention (×64)Setup (×1)
FAW2W4FAW2W4
30011μs45μs45μs-1.9ms3.9ms
409635μs65μs82μs-1.9ms3.9ms
819255μs92μs123μs-1.9ms3.9ms
16384100μs140μs203μs-1.9ms3.9ms
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.238, + 0.825, + 0.267 + ], + "angle": 0, + "content": "block. For long contexts, the attention call is about \\(40\\%\\) and \\(100\\%\\) slower for generating with 2 and 4 workers, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.272, + 0.827, + 0.356 + ], + "angle": 0, + "content": "Additionally, we report accuracy results over time using our kernel on the official quantized version of QwQ-32B-AWQ on LIMO dataset. The experiments were conducted on NVIDIA L40S GPUs. For comparison, we run the baseline (FlashAttention v2) and Hogwild with 2 workers, maintaining the same experimental setup as detailed in Section 4.1. We report our results in Figure 8 (right). As illustrated, our method achieves better accuracy results on the LIMO dataset within the same time budget." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.367, + 0.295, + 0.383 + ], + "angle": 0, + "content": "5 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.827, + 0.46 + ], + "angle": 0, + "content": "In this work, we investigated the ability of large language models to perform parallel generation where multiple instances synchronize through a shared, dynamically-updated attention cache. Surprisingly, our results show that LLMs can operate effectively in parallel across dynamically updated attention cache without specialized fine-tuning. We demonstrate that parallel inference threads can explicitly coordinate, leveraging each other's partial solutions to enable collaborative problem-solving." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.466, + 0.827, + 0.537 + ], + "angle": 0, + "content": "The proposed method, called Hogwild! Inference, allows multiple inference threads to concurrently access and update a shared attention cache. By leveraging Rotary Position Embeddings (RoPE), our approach introduces minimal computational overhead while ensuring instant synchronization—newly generated KV cache entries becoming immediately visible to all threads. This \"telepathic\" communication opens up new possibilities for efficient parallel generation with LLMs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.544, + 0.827, + 0.588 + ], + "angle": 0, + "content": "**Limitations** Our method exhibits reduced robustness when applied to smaller models or longer contexts, suggesting scalability challenges across model sizes and sequence lengths. Additionally, our automatic evaluation metric relies on a proprietary model, which may limit reproducibility." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.596, + 0.827, + 0.708 + ], + "angle": 0, + "content": "Future work In future work, we plan to investigate methods for improving collaboration between threads, such as fine-tuning and reinforcement learning. We also plan to investigate connections to alternative parallel inference schemes, such as speculative decoding [Leviathan et al., 2023], and parallel token generation methods like Medusa [Cai et al., 2024] or EAGLE [Li et al., 2024b]. Finally, it is interesting to consider alternative shared memory structures: allowing workers to insert new steps in any order, selectively delete (forget) steps, or solving programming and tool use tasks with a shared IDE and file-system. The KV cache rearrangement used in Hogwild! Inference could also allow humans to interact with agents asynchronously, giving clarifications and feedback during reasoning." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.716, + 0.825, + 0.746 + ], + "angle": 0, + "content": "Acknowledgements: We thank Vladimir Malinovskii for his help with brainstorming, helpful feedback and suggesting future work directions. We also thank Philip Zmushko for proofreading." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.765, + 0.27, + 0.781 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.789, + 0.827, + 0.872 + ], + "angle": 0, + "content": "Marah Abdin, Sahaj Agarwal, Ahmed Awadallah, Vidhisha Balachandran, Harkirat Behl, Lingjiao Chen, Gustavo de Rosa, Suriya Gunasekar, Mojan Javaheripi, Neel Joshi, Piero Kauffmann, Yash Lara, Caio Cesar Teodoro Mendes, Arindam Mitra, Besmira Nushi, Dimitris Papailiopoulos, Olli Saarikivi, Shital Shah, Vaishnavi Shrivastava, Vibhav Vineet, Yue Wu, Safoora Yousefi, and Guoqing Zheng. Phi-4-reasoning technical report, 2025. URL https://arxiv.org/abs/2504.21318." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.884, + 0.825, + 0.911 + ], + "angle": 0, + "content": "AIME. Aime problems and solutions. https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions, 2025." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.789, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.149 + ], + "angle": 0, + "content": "Reza Yazdani Aminabadi, Samyam Rajbhandari, Minjia Zhang, Ammar Ahmad Awan, Cheng Li, Du Li, Elton Zheng, Jeff Rasley, Shadeen Smith, Olatunj Ruwase, and Yuxiong He. Deepspeed inference: Enabling efficient inference of transformer models at unprecedented scale, 2022. URL https://arxiv.org/abs/2207.00032." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.185 + ], + "angle": 0, + "content": "Anthropic. Claude 3.7 sonnet and claude code, 2024. URL https://www.anthropic.com/news/claude-3-7-sonnet. Accessed: 2025.04.02." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.193, + 0.825, + 0.236 + ], + "angle": 0, + "content": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. Neural machine translation by jointly learning to align and translate. In Proceedings of the 3rd International Conference on Learning Representations (ICLR), 2015. URL https://arxiv.org/abs/1409.0473." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.243, + 0.826, + 0.287 + ], + "angle": 0, + "content": "Yushi Bai, Jiajie Zhang, Xin Lv, Linzhi Zheng, Siqi Zhu, Lei Hou, Yuxiao Dong, Jie Tang, and Juanzi Li. Longwriter: Unleashing 10,000+ word generation from long context llms. ArXiv, abs/2408.07055, 2024. URL https://api_semanticscholar.org/CorpusID:271859903." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.293, + 0.825, + 0.338 + ], + "angle": 0, + "content": "Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.344, + 0.826, + 0.374 + ], + "angle": 0, + "content": "Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer: The long-document transformer, 2020. URL https://arxiv.org/abs/2004.05150." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.381, + 0.824, + 0.411 + ], + "angle": 0, + "content": "Tianle Cai, Xinyun Li, Zhiruo Wang, Yuhuai Wang, and Dawn Song. Medusa: Simple llm inference acceleration framework with multiple decoding heads. arXiv preprint arXiv:2401.10774, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.418, + 0.827, + 0.503 + ], + "angle": 0, + "content": "Justin Chen, Swarnadeep Saha, and Mohit Bansal. ReConcile: Round-table conference improves reasoning via consensus among diverse LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7066–7085, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.381. URL https://aclanthology.org/2024.acl-long.381/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.51, + 0.825, + 0.552 + ], + "angle": 0, + "content": "Mouxiang Chen, Binyuan Hui, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Jianling Sun, Junyang Lin, and Zhongxin Liu. Parallel scaling law for language models, 2025. URL https://arxiv.org/abs/2505.10475." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.56, + 0.827, + 0.617 + ], + "angle": 0, + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.625, + 0.827, + 0.697 + ], + "angle": 0, + "content": "Roi Cohen, May Hamri, Mor Geva, and Amir Globerson. LM vs LM: Detecting factual errors via cross examination. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12621-12640, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.778. URL https://aclanthology.org/2023.emnlp-main.778/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.704, + 0.827, + 0.746 + ], + "angle": 0, + "content": "Tri Dao, Daniel Haziza, Francisco Massa, and Grigory Sizov. Flash-decoding for long-context inference. https://crfm.stanford.edu/2023/10/12/flashdecoding.html, 2023. Accessed: 2025-05-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.754, + 0.827, + 0.81 + ], + "angle": 0, + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, and Xiao Bi et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.818, + 0.827, + 0.862 + ], + "angle": 0, + "content": "Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, Xianglong Liu, and Dacheng Tao. Dynamic parallel tree search for efficient ltm reasoning, 2025. URL https://arxiv.org/abs/2502.16235." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.869, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Yilun Du, Shuang Li, Antonio Torralba, Joshua B. Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. In *Forty-first International Conference on Machine Learning*, 2023. URL https://openreview.net/forum?id=zj7YuTE4t8." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.12 + ], + "angle": 0, + "content": "Elliot E. Entin and Daniel Serfaty. Adaptive team coordination. Human Factors, 41(2):312-325, 1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.129, + 0.825, + 0.16 + ], + "angle": 0, + "content": "Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.169, + 0.826, + 0.199 + ], + "angle": 0, + "content": "In Gim, Seung seob Lee, and Lin Zhong. Asynchronous llm function calling, 2024. URL https://arxiv.org/abs/2412.07017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.208, + 0.825, + 0.251 + ], + "angle": 0, + "content": "Google DeepMind. Gemini 2.5: Our Newest Gemini Model with Thinking. https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#gemini-2-5-thinking, 2025. Accessed: 2025-04-07." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.26, + 0.826, + 0.317 + ], + "angle": 0, + "content": "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.327, + 0.826, + 0.37 + ], + "angle": 0, + "content": "Chan-Jan Hsu, Davide Buffelli, Jamie McGowan, Feng-Ting Liao, Yi-Chang Chen, Sattar Vakili, and Da shan Shiu. Group think: Multiple concurrent reasoning agents collaborating at token level granularity, 2025. URL https://arxiv.org/abs/2505.11107." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.38, + 0.826, + 0.422 + ], + "angle": 0, + "content": "Aaron Hurst, Adam Lerner, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.432, + 0.555, + 0.447 + ], + "angle": 0, + "content": "Edwin Hutchins. Cognition in the Wild. MIT Press, 1995." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.457, + 0.826, + 0.5 + ], + "angle": 0, + "content": "Sam Ade Jacobs, Masahiro Tanaka, Chengming Zhang, Minjia Zhang, Shuaiwen Leon Song, Samyam Rajbhandari, and Yuxiong He. Deepspeed ulysses: System optimizations for enabling training of extreme long sequence transformer models. arXiv preprint arXiv:2309.14509, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.51, + 0.826, + 0.565 + ], + "angle": 0, + "content": "Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livecodebench: Holistic and contamination free evaluation of large language models for code, 2024. URL https://arxiv.org/abs/2403.07974." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.576, + 0.826, + 0.632 + ], + "angle": 0, + "content": "Tian Jin, Ellie Y. Cheng, Zack Ankner, Nikunj Saunshi, Blake M. Elias, Amir Yazdanbakhsh, Jonathan Ragan-Kelley, Suvinay Subramanian, and Michael Carbin. Learning to keep a promise: Scaling language model decoding parallelism with learned asynchronous decoding, 2025. URL https://arxiv.org/abs/2502.11517." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.643, + 0.826, + 0.685 + ], + "angle": 0, + "content": "Sehoon Kim, Suhong Moon, Ryan Tabrizi, Nicholas Lee, Michael W Mahoney, Kurt Keutzer, and Amir Gholami. An llm compiler for parallel function calling. In *Forty-first International Conference on Machine Learning*, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.695, + 0.826, + 0.738 + ], + "angle": 0, + "content": "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. ArXiv, abs/2205.11916, 2022. URL https://apisemantic scholar.org/CorpusID:249017743." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.748, + 0.826, + 0.846 + ], + "angle": 0, + "content": "Aobo Kong, Shiwan Zhao, Hao Chen, Qicheng Li, Yong Qin, Ruiqi Sun, Xin Zhou, Enzhi Wang, and Xiaohang Dong. Better zero-shot reasoning with role-play prompting. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 4099-4113, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.228. URL https://aclanthology.org/2024.naacl-long.228/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.856, + 0.826, + 0.912 + ], + "angle": 0, + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the 29th Symposium on Operating Systems Principles, pages 611-626, 2023." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "Yaniv Leviathan, Matan Kalman, and Yossi Matias. Fast inference from transformers via speculative decoding. In International Conference on Machine Learning, pages 19274-19286. PMLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.129, + 0.826, + 0.159 + ], + "angle": 0, + "content": "Junyou Li, Qin Zhang, Yangbin Yu, Qiang Fu, and Deheng Ye. More agents is all you need. Transactions on Machine Learning Research, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.168, + 0.825, + 0.211 + ], + "angle": 0, + "content": "Shen Li, Yanli Zhao, Rohan Varma, Omkar Salpekar, Pieter Noordhuis, Teng Li, Adam Paszke, Jeff Smith, Brian Vaughan, Pritam Damania, and Soumith Chintala. Pytorch distributed: Experiences on accelerating data parallel training, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.219, + 0.825, + 0.262 + ], + "angle": 0, + "content": "Yuhui Li, Fangyun Wei, Chao Zhang, and Hongyang Zhang. Eagle: Speculative sampling requires rethinking feature uncertainty. In Proceedings of the 41st International Conference on Machine Learning, pages 31147-31162. PMLR, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.272, + 0.826, + 0.315 + ], + "angle": 0, + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. ArXiv, abs/2305.20050, 2023. URL https://api_semanticscholar.org/CorpusID:258987659." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.323, + 0.826, + 0.366 + ], + "angle": 0, + "content": "Aixin Liu, Bei Feng, Bin Wang, Bingxuan Wang, Bo Liu, Chenggang Zhao, Chengqi Dengr, Chong Ruan, Damai Dai, Daya Guo, et al. Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model. arXiv preprint arXiv:2405.04434, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.375, + 0.826, + 0.404 + ], + "angle": 0, + "content": "Hao Liu, Matei Zaharia, and Pieter Abbeel. Ring attention with blockwise transformers for near-infinite context, 2023. URL https://arxiv.org/abs/2310.01889." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.414, + 0.825, + 0.443 + ], + "angle": 0, + "content": "Mingdao Liu, Aohan Zeng, Bowen Wang, Peng Zhang, Jie Tang, and Yuxiao Dong. Apar: Llms can do auto-parallel auto-regressive decoding. arXiv preprint arXiv:2401.06761, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.451, + 0.825, + 0.494 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.503, + 0.827, + 0.546 + ], + "angle": 0, + "content": "Xuefei Ning, Zinan Lin, Zixuan Zhou, Zifu Wang, Huazhong Yang, and Yu Wang. Skeleton-ofthought: Prompting LLMs for efficient parallel generation. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=mqVgBbNCm9." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.556, + 0.825, + 0.598 + ], + "angle": 0, + "content": "OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, and Alex Beutel et al. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.607, + 0.826, + 0.649 + ], + "angle": 0, + "content": "Jiayi Pan, Xiuyu Li, Long Lian, Charlie Snell, Yifei Zhou, Adam Yala, Trevor Darrell, Kurt Keutzer, and Alane Suhr. Learning adaptive parallel reasoning with language models. arXiv preprint arXiv:2504.15466, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.659, + 0.826, + 0.743 + ], + "angle": 0, + "content": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. PyTorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems (NeurIPS). Neural Information Processing Systems Foundation, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.752, + 0.824, + 0.781 + ], + "angle": 0, + "content": "Bowen Peng, Jeffrey Quesnelle, Honglu Fan, and Enrico Shippole. Yarn: Efficient context window extension of large language models, 2023. URL https://arxiv.org/abs/2309.00071." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.79, + 0.826, + 0.832 + ], + "angle": 0, + "content": "Xiao Pu, Michael Saxon, Wenyue Hua, and William Yang Wang. Thoughtterminator: Benchmarking, calibrating, and mitigating overthinking in reasoning models, 2025. URL https://arxiv.org/ abs/2504.13367." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.842, + 0.826, + 0.912 + ], + "angle": 0, + "content": "Yujia Qin, Shi Liang, Yining Ye, Kunlun Zhu, Lan Yan, Ya-Ting Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, Sihan Zhao, Runchu Tian, Ruobing Xie, Jie Zhou, Marc H. Gerstein, Dahai Li, Zhiyuan Liu, and Maosong Sun. Toollm: Facilitating large language models to master 16000+ real-world apis. ArXiv, abs/2307.16789, 2023. URL https://api-semanticscholar.org/ CorpusID:260334759." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.129, + 0.827, + 0.185 + ], + "angle": 0, + "content": "Jack Rae and Ali Razavi. Do transformers need deep long-range memory? In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, Online, July 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.acl-main.672." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.195, + 0.827, + 0.266 + ], + "angle": 0, + "content": "Benjamin Recht, Christopher Re, Stephen Wright, and Feng Niu. Hogwild!: A lock-free approach to parallelizing stochastic gradient descent. In J. Shawe-Taylor, R. Zemel, P. Bartlett, F. Pereira, and K.Q. Weinberger, editors, Advances in Neural Information Processing Systems, volume 24. Curran Associates, Inc., 2011. URL https://proceedings.neurips.cc/paper_files/paper/2011/file/218a0aefd1d1a4be65601cc6ddc1520e-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.274, + 0.827, + 0.33 + ], + "angle": 0, + "content": "Timo Schick, Jane Dwivedi-Yu, Roberto Dessi, Roberta Raileanu, Maria Lomeli, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. Toolformer: Language models can teach themselves to use tools. ArXiv, abs/2302.04761, 2023. URL https://api_semanticscholar.org/CorpusID:256697342." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.339, + 0.827, + 0.383 + ], + "angle": 0, + "content": "Yongliang Shen, Kaitao Song, Xu Tan, Dongsheng Li, Weiming Lu, and Yue Ting Zhuang. Hugging-gpt: Solving ai tasks with chatgpt and its friends in hugging face. ArXiv, abs/2303.17580, 2023. URL https://api_semanticscholar.org/CorpusID:257833781." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.391, + 0.827, + 0.434 + ], + "angle": 0, + "content": "Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatron-lm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.442, + 0.825, + 0.472 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.481, + 0.825, + 0.537 + ], + "angle": 0, + "content": "Stanford HAI. How a “crazy idea” overturned the conventional rules of machine learning, 2023. URL https://hai.stanford.edu/news/how-crazy-idea-overturned-conventional-rules-machine-learning. Accessed: [Insert Date]." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.545, + 0.825, + 0.576 + ], + "angle": 0, + "content": "Jianlin Su, Yu Lu, Shengfeng Pan, Ahmed Murtadha, Bo Wen, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.584, + 0.827, + 0.653 + ], + "angle": 0, + "content": "Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V. Le, Ed H. Chi, Denny Zhou, and Jason Wei. Challenging big-bench tasks and whether chain-of-thought can solve them. In Annual Meeting of the Association for Computational Linguistics, 2022. URL https://api_semanticscholar.org/CorpusID: 252917648." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.663, + 0.825, + 0.692 + ], + "angle": 0, + "content": "Yashar Talebirad and Amirhossein Nadiri. Multi-agent collaboration: Harnessing the power of intelligent LLM agents. CoRR, abs/2306.03314, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.7, + 0.814, + 0.716 + ], + "angle": 0, + "content": "A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.725, + 0.825, + 0.768 + ], + "angle": 0, + "content": "Junlin Wang, WANG Jue, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities. In The Thirteenth International Conference on Learning Representations, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.776, + 0.827, + 0.861 + ], + "angle": 0, + "content": "Qineng Wang, Zihao Wang, Ying Su, Hanghang Tong, and Yangqiu Song. Rethinking the bounds of LLM reasoning: Are multi-agent discussions the key? In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6106-6131, Bangkok, Thailand, August 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.331. URL https://aclanthology.org/2024.acl-long.331/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.869, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed H. Chi, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. ArXiv, abs/2203.11171, 2022. URL https://api-semanticscholar.org/CorpusID:247595263." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.175 + ], + "angle": 0, + "content": "Yiming Wang, Zhuosheng Zhang, Pei Zhang, Baosong Yang, and Rui Wang. Meta-reasoning: Semantics-symbol deconstruction for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 622–643, Bangkok, Thailand, August 2024c. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.34. URL https://aclanthology.org/2024-findings-acl.34/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.186, + 0.825, + 0.229 + ], + "angle": 0, + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.239, + 0.825, + 0.282 + ], + "angle": 0, + "content": "Guangxuan Xiao, Yuandong Tian, Beidi Chen, Song Han, and Mike Lewis. Efficient streaming language models with attention sinks. In International Conference on Learning Representations (ICLR), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.292, + 0.825, + 0.418 + ], + "angle": 0, + "content": "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.428, + 0.825, + 0.554 + ], + "angle": 0, + "content": "An Yang, Anfeng Li, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Gao, Chengen Huang, Chenxu Lv, Chujie Zheng, Dayiheng Liu, Fan Zhou, Fei Huang, Feng Hu, Hao Ge, Haoran Wei, Huan Lin, Jialong Tang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jing Zhou, Jingren Zhou, Junyang Lin, Kai Dang, Keqin Bao, Kexin Yang, Le Yu, Lianghao Deng, Mei Li, Mingfeng Xue, Mingze Li, Pei Zhang, Peng Wang, Qin Zhu, Rui Men, Ruize Gao, Shixuan Liu, Shuang Luo, Tianhao Li, Tianyi Tang, Wenbiao Yin, Xingzhang Ren, Xinyu Wang, Xinyu Zhang, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yinger Zhang, Yu Wan, Yuqiong Liu, Zekun Wang, Zeyu Cui, Zhenru Zhang, Zhipeng Zhou, and Zihan Qiu. Qwen3 technical report, 2025. URL https://arxiv.org/abs/2505.09388." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.563, + 0.825, + 0.607 + ], + "angle": 0, + "content": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. ArXiv, abs/2210.03629, 2022. URL https://api_semanticscholar.org/CorpusID:252762395." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.617, + 0.825, + 0.66 + ], + "angle": 0, + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. ArXiv, abs/2305.10601, 2023. URL https://api_semanticscholar.org/CorpusID:258762525." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.67, + 0.825, + 0.701 + ], + "angle": 0, + "content": "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning, 2025. URL https://arxiv.org/abs/2502.03387." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.709, + 0.825, + 0.74 + ], + "angle": 0, + "content": "Yijiong Yu. Accelerate parallelizable reasoning via parallel decoding within one sequence, 2025. URL https://arxiv.org/abs/2503.20533." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.749, + 0.825, + 0.793 + ], + "angle": 0, + "content": "Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Zhihan Guo, Yufei Wang, Irwin King, Xue Liu, and Chen Ma. What, how, where, and how well? a survey on test-time scaling in large language models. arXiv preprint arXiv:2503.24235, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.802, + 0.825, + 0.859 + ], + "angle": 0, + "content": "Zhenyu Zhang, Ying Sheng, Tianyi Zhou, Tianlong Chen, Lianmin Zheng, Ruisi Cai, Zhao Song, Yuandong Tian, Christopher Ré, Clark Barrett, et al. H2o: Heavy-hitter oracle for efficient generative inference of large language models. Advances in Neural Information Processing Systems, 36:34661-34710, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.869, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Zhuosheng Zhang, Aston Zhang, Mu Li, and Alexander J. Smola. Automatic chain of thought prompting in large language models. ArXiv, abs/2210.03493, 2022. URL https://api.sementicscholar.org/CorpusID:252762275." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.135 + ], + "angle": 0, + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36:46595-46623, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.143, + 0.826, + 0.187 + ], + "angle": 0, + "content": "Lianmin Zheng, Liangsheng Yin, Zhiqiang Xie, Jeff Huang, Chuyue Sun, Cody Hao Yu, Shiyi Cao, Christos Kozyrakis, Ion Stoica, Joseph E. Gonzalez, Clark Barrett, and Ying Sheng. Efficiently programming large language models using sglang, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.195, + 0.825, + 0.238 + ], + "angle": 0, + "content": "Tong Zheng, Hongming Zhang, Wenhao Yu, Xiaoyang Wang, Runpeng Dai, Rui Liu, Huiwen Bao, Chengsong Huang, Heng Huang, and Dong Yu. Parallel-r1: Towards parallel thinking via reinforcement learning, 2025. URL https://arxiv.org/abs/2509.07980." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.246, + 0.825, + 0.316 + ], + "angle": 0, + "content": "Pei Zhou, Jay Pujara, Xiang Ren, Xinyun Chen, Heng-Tze Cheng, Quoc V. Le, Ed H. Chi, Denny Zhou, Swaroop Mishra, and Huaixiu Steven Zheng. SELF-DISCOVER: Large language models self-compose reasoning structures. In Amir Globerson, Lester Mackey, Danielle Belgrave, Angela Fan, Ulrich Paquet, Jakub M. Tomczak, and Cheng Zhang, editors, Advances in Neural Information Processing Systems 37 (NeurIPS 2024), Vancouver, BC, Canada, December 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.336, + 0.108 + ], + "angle": 0, + "content": "A Cache Layouts" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.121, + 0.825, + 0.151 + ], + "angle": 0, + "content": "In this section, we consider three cache arrangements, shown at Figure 9, with progressively more complex structure." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.162, + 0.357, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.162, + 0.54, + 0.278 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.556, + 0.162, + 0.822, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.282, + 0.825, + 0.327 + ], + "angle": 0, + "content": "Figure 9: Three cache layouts described in Section 3.2: interleaved with step-wise synchrony (left), simple contiguous layout (middle) and combined with token-wise synchrony (right). All layouts are made from Alice point of view." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.341, + 0.827, + 0.385 + ], + "angle": 0, + "content": "Contiguous layout (token-wise) is the simplest possible layout where each worker appends to their own sequence blob of tokens and sees other workers' token representations as past keys and values. This layout is inspired by collaborative text editors such as Google Docs or Overleaf." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.389, + 0.825, + 0.446 + ], + "angle": 0, + "content": "As described earlier in Section 3.1, each worker arranges the other workers' thoughts in a different order. They see the common prompt cache first, then the caches of all other workers (excluding themselves8, then their own cache as immediate previous tokens. That way, each worker predicts the next token for their own cache." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.451, + 0.825, + 0.537 + ], + "angle": 0, + "content": "Interleaved layout (step-wise), which can be seen as analogous to group chat services such as Slack or Discord. In this layout, workers generate tokens in private until they finish a reasoning step9, then add it to a shared \"history\". The history contains past reasoning steps of each LLM instance in the order of their completion. Whenever a worker completes a reasoning step, their KV cache entries are moved to the end of the shared history cache block with the proper rotation, then their local cache is reset their local cache for a new step." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.541, + 0.827, + 0.584 + ], + "angle": 0, + "content": "In this setup, the workers only see each other's outputs in full steps, not after every token. However, they do not wait for each other to complete their steps. Instead, each worker keeps generating new tokens and occasionally receives additional key-value pairs inserted into its cache." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.589, + 0.827, + 0.647 + ], + "angle": 0, + "content": "Combined layout (token-wise) is a mixture of the first two, and is the main layout used in the paper. The LLM instances generate steps that are accumulated in a shared history, as in the interleaved layout. However, they do not generate these steps in private, but can instantly see each other's current progress, as in the contiguous layout." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.651, + 0.825, + 0.695 + ], + "angle": 0, + "content": "We can view the first two layouts as ablated versions of this combined one: the contiguous layout lacks the shared history, and the interleaved layout lacks immediate synchronization. We compare these three layouts empirically in Appendix E.1 to better quantify the effect of each design choice." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.713, + 0.406, + 0.731 + ], + "angle": 0, + "content": "B Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.745, + 0.827, + 0.788 + ], + "angle": 0, + "content": "Here we discuss additional implementation details and possible alternatives. To recall Section 3.4, Hogwild! inference can be implemented as a standard batched inference with a special KV \"cache\" that facilitates cross-worker attention." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.793, + 0.827, + 0.837 + ], + "angle": 0, + "content": "Cache blocks. The Hogwild! cache is split into blocks, typically one block for each worker and an additional \"common\" block for prompt and past steps. The blocks contain key-value pairs for all model layers, but since all layers are treated equally, we describe the cache behavior for a single layer." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.846, + 0.825, + 0.886 + ], + "angle": 0, + "content": "When extending this layout to more than 2 workers, each worker sees the key-value memories of everyone except themselves. For instance, given 3 workers A, B, and C, worker B will see a version of the cache that contains the prompt, outputs of workers A and C, and finally, B's own memory. Likewise, A sees B & C, then A." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.886, + 0.825, + 0.913 + ], + "angle": 0, + "content": "9We define a reasoning step as any amount of text that ends with a complete sentence, e.g. a dot or a question mark, and then a double newline (\"\\n\\n\") in all our experiments, though it may vary by the model." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.846, + 0.825, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.148 + ], + "angle": 0, + "content": "Within each cache block, attention keys and values are stored as though they were at positions 0, 1, ..., len(block), regardless of the block's actual position in the full cache. During inference, we account for actual positions by rotating attention queries to the relative difference in positions (as described in Section 3.4)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.154, + 0.827, + 0.224 + ], + "angle": 0, + "content": "Adding new tokens to the cache. During attention forward pass, the first thing that we do is encode the new tokens for each worker and append their keys and values to the respective cache blocks. When using RoPE, the keys are rotated not to their actual positions, but to their index within their cache block (e.g. Alice's tokens). During one inference step, these indices will be equal across all model layers — we can compute the RoPE sin and cos tensors once and reuse them between layers." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.23, + 0.828, + 0.355 + ], + "angle": 0, + "content": "Rotating queries. Unlike in traditional attention, Hogwild! inference rotates query vectors multiple times for each block. Before forward pass, we calculate the difference in positions between each worker's new token (from that worker's point of view) and the first token in each KV cache block. In our main inference scenario, all \\( n \\) workers are allowed to view each other's cache blocks plus an additional bock for prompt and history, for a total of \\( n \\cdot (n + 1) \\) query rotations with exactly \\( n \\) queries for each block. These relative positions are also equal across all layers, so we can reuse the sin and cos tensors similarly to how they are reused for keys. Note that the number of query rotations for all-to-all attention is quadratic in \\( n \\), but it does not increase the overall time complexity of attention dot product, which is already quadratic in the number of tokens, which is always greater than \\( n \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.361, + 0.828, + 0.513 + ], + "angle": 0, + "content": "Attention kernel. Once we have all query rotations, we can calculate the scaled dot-product attention as usual. As our cache is naturally partitioned into smaller segments as described above, Hogwild! attention is similar to paged attention, except that each page (i.e., cache block) uses a differently rotated version of the query. A significant challenge for efficient attention in the inference setup is that for optimal data reuse, one would want to handle each KV head inside a single streaming multiprocessor (SM), so that the KV cache needs to be loaded exactly once. However, this would leave large parts of the GPU unused, as the number of KV heads can be much lower than the number of SMs. Therefore, one has to employ a form of sequence parallelism within a single GPU, in which different SMs handle a subset of the sequence for one KV head, and a second phase handles the (cheap) reduction over partial results. Such a split-k type computation is implemented, for example, in Flash-Decoding [Dao et al., 2023]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.519, + 0.827, + 0.561 + ], + "angle": 0, + "content": "Even though the different cache blocks used in Hogwild! would appear to be convenient points to split work across SMs, in a typical inference scenario, this would lead to very imbalanced workloads. Thus, we do not split based on cache blocks, and instead assign each SM the same number of KV entries." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.567, + 0.827, + 0.638 + ], + "angle": 0, + "content": "Fine-tuning and re-encoding considerations. While our work mainly focuses on inference, fine-tuning models to perform Hogwild! inference is an interesting engineering problem. From the computational point of view, the main difference between LLM inference and fine-tuning is that inference is sequential, whereas fine-tuning can compute all positions in parallel. To fine-tune in our setup, one would want to replicate the attention computations from consecutive inference steps." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.643, + 0.827, + 0.741 + ], + "angle": 0, + "content": "To achieve this, we record the position differences between queries and each respective cache block from each of \\( t \\) inference steps, and how many tokens were in each block during that query, for a total of \\( 2 \\cdot t \\cdot n \\cdot (n + 1) \\) integers (negligible compared to model parameters and activations). Recall that the cache blocks always store keys and values at positions 0, 1, ..., 1en(block). During forward pass, these positions can be used to construct a 4D attention mask10 to compute attention for all steps in parallel. The backward pass also runs in parallel with PyTorch autograd [Paszke et al., 2019]. A recent work by Zheng et al. [2025] explores finetuning for parallel inference in more detail." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.747, + 0.827, + 0.789 + ], + "angle": 0, + "content": "In addition to fine-tuning, this technique can potentially be used during inference to restore generation after it was evicted from an inference server, e.g. due to preemption or hardware error mid decoding. It can also be used to re-encode in-context learning examples if they use Hogwild! inference." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.795, + 0.827, + 0.879 + ], + "angle": 0, + "content": "Attention variants. Some of the recently introduced LLMs use attention variants such as Local (windowed) Attention [Rae and Razavi, 2020, Beltagy et al., 2020] or Multihead Latent Attention (MLA) [Liu et al., 2024a]. These attention variants can also be adapted for use with Hogwild! inference with minor code modifications. For local attention, queries can \"skip\" blocks that are outside their local window. Similarly for MLA, we can calculate compressed latent vectors within each cache block and adapt the existing MLA code to accumulate attention weights across blocks." + }, + { + "type": "page_footnote", + "bbox": [ + 0.189, + 0.897, + 0.551, + 0.912 + ], + "angle": 0, + "content": "10https://huggingface.co/blog/poedator/4d-masks" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.162 + ], + "angle": 0, + "content": "Distributed Inference. Likewise, Hogwild! inference can be used in distributed setup using the same strategies that work for traditional attention [Shoeybi et al., 2019, Aminabadi et al., 2022]. For pipeline parallelism, each device stores cache blocks for its local subset of model layers. Likewise, for tensor parallelism, each device stores past keys of all cache blocks and layers, but only for a subset of attention heads within each layer and inference using existing kernels." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.168, + 0.825, + 0.238 + ], + "angle": 0, + "content": "In principle, Hogwild! inference can also be combined with sequence parallelism [Jacobs et al., 2023, Liu et al., 2023], where each device stores a KV cache for a subset of tokens. One intuitive way to partition KV cache between GPUs is to assign each device to run one or several \"workers\" and keep the KVs generated by these workers. Since Hogwild! workers generate tokens at the same rate, each device will store the same amount of KVs and query other devices work cross-worker attention." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.244, + 0.825, + 0.34 + ], + "angle": 0, + "content": "When computing Hogwild! concurrent attention with sequence parallelism, workers can exchange rotated queries using the All-to-All collective operation (Scatter/Gather) available in most frameworks [Li et al., 2020]. After that, each worker computes dot-products between the rotated queries and its local KV cache, and exchanges the partial results as in Ring Attention [Liu et al., 2023]. Note, however, that maximizing the performance of such sequence-parallel Hogwild! inference would require custom kernels that overlap computation and communication. In contract, tensor-parallel (per-head) an pipeline-parallel (per-layer) partitioning can reuse single-GPU attention kernels." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.347, + 0.825, + 0.43 + ], + "angle": 0, + "content": "Additional considerations. Conceptually, our approach is related to the recently introduced Paged Attention from vLLM [Kwon et al., 2023] and Radix Attention from SGLang [Zheng et al., 2023b]. These techniques are similar to ours in that they perform attention to slices of all tokens, e.g. when facilitating efficient parallel beam search inference, different hypotheses attend to different (but overlapping) subsets of the KV cache. However, unlike Radix Attention, our procedure attends to all segments at once (with different rotations) and aggregates results in the same softmax-weighted sum." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.442, + 0.491, + 0.459 + ], + "angle": 0, + "content": "C Prompting and formatting details" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.465, + 0.707, + 0.48 + ], + "angle": 0, + "content": "In this section, we describe the prompting and formatting details of our approach." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.492, + 0.544, + 0.506 + ], + "angle": 0, + "content": "Prompt for collaborative inference with two workers" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.519, + 0.397, + 0.532 + ], + "angle": 0, + "content": "Collaborative Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.532, + 0.799, + 0.582 + ], + "angle": 0, + "content": "You will collaborate on this problem with another assistant. You will write your thoughts simultaneously with them and collaborate without redundant work. You can collaborate by doing different parts of the problem, double-checking each other's results, trying different approaches, or any other means." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.583, + 0.785, + 0.606 + ], + "angle": 0, + "content": "There are 2 assistants, including yourself. You will refer to each other as Alice and Bob." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.608, + 0.785, + 0.645 + ], + "angle": 0, + "content": "You will solve the problem together, writing your thoughts in parallel. You will be able to see each other's past and current thoughts as we write them. You will see each other's previous steps as" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.646, + 0.453, + 0.658 + ], + "angle": 0, + "content": "**AssistantName [step]:** <...>" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.659, + 0.731, + 0.682 + ], + "angle": 0, + "content": "In the '#### Past steps' section, the automated system will gather the thoughts of Alice and Bob as you write them." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.684, + 0.785, + 0.733 + ], + "angle": 0, + "content": "After the '###' Work in progress (others)' section, you will see the other assistants' unfinished steps. They will write those steps concurrently with you. You will take into account what they are doing. If another assistant gives you suggestions, you should address them." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.734, + 0.779, + 0.784 + ], + "angle": 0, + "content": "You will always see *other* assistants' incomplete thoughts first, and then, after '##### Work in progress (own)', your own current step. Other assistants will continue writing their thoughts in the background while you will continue writing your own." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.785, + 0.793, + 0.847 + ], + "angle": 0, + "content": "Since you and others both write your thoughts in parallel, you will initially see only partial (unfinished) thoughts that others will continue in parallel, while you write yours. Others' thoughts will appear at the end of their unfinished step, near \\(<\\ldots>\\). Other assistants may write new thoughts while you are writing yours." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.848, + 0.793, + 0.897 + ], + "angle": 0, + "content": "You will use these partial thoughts to decide how best to collaborate without doing the same work twice. You will periodically check what other assistants are doing and you should adjust your actions based on what they are doing so you collaborate efficiently with them." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.197, + 0.1, + 0.787, + 0.19 + ], + "angle": 0, + "content": "If what you are currently doing is the same thing that another assistant has already done or is in process of doing, you will stop (e.g. Alice may say 'Wait, I was doing the same as Bob ...') and change to a different task right away, so as to avoid doing redundant work. \n# Solve the following problem \nAlice and Bob, you will now solve the next problem together. Keep track of who does what work and communicate to avoid doing the same work twice." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.208, + 0.825, + 0.276 + ], + "angle": 0, + "content": "First, we provide a full prompt for collaborative reasoning involving two workers. This prompt is wrapped with standard chat template for each model. Then, all worker steps are generated in a single assistant turn. Additionally, we ensure that for reasoning models assistant turn begins with a token - applied automatically for QwQ-32B and manually for other reasoning models. For further implementation details, we refer to the source code." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.284, + 0.825, + 0.367 + ], + "angle": 0, + "content": "The second part of our prompting approach involves s1-like interventions [Muennighoff et al., 2025] in the generation process, where we ask whether a worker is performing redundant tasks (e.g., overlapping with another worker and can pivot to explore alternative ideas). We insert the prompt \"Quick check: am I doing redundant work? (yes/no): \" at the beginning of each new reasoning step every 1024 tokens generated. We refer to Appendix F for examples that demonstrate how these interventions affect the generation process." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.825, + 0.457 + ], + "angle": 0, + "content": "Next, an important part of our approach is defining the end of a reasoning step, which is needed to organize cache layout, as discussed in the paper. We define an end of step as the generation of a token containing the separator sequence (\\n\\n) that directly follows a token ending with an end-of-sentence marker (.,?, or!, etc). This termination condition is not met when: i) The separator appears within a generated code block (steps continue until the model completes the entire code block); ii) The preceding token ends with non-terminal punctuation (e.g., comma, colon, or semicolon)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.463, + 0.825, + 0.519 + ], + "angle": 0, + "content": "Finally, the last part of our prompting approach is the early finisher, which allows us to extract an answer from partial reasoning chains. If the model did not produce the final answer (\\`boxed{...}) in time, we take all generated outputs and insert a special prompt that makes the model generate an answer (or its \"best guess\"), similarly to how it is done in Pu et al. [2025]." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.531, + 0.371, + 0.545 + ], + "angle": 0, + "content": "Prompt for early stopping" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.558, + 0.763, + 0.596 + ], + "angle": 0, + "content": "\\(\\backslash \\mathsf{n}\\backslash \\mathsf{nWait}\\) , given the limited time, I have to give an answer right now. Conside- ring all my previous attempts, I have to conclude that the final answer is boxed{" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.614, + 0.825, + 0.642 + ], + "angle": 0, + "content": "After this prompt, we allow the model to generate a fixed number of tokens: 16 for LIMO and AIME, 64 for OlympiadBench, and 1024 for LiveCodeBench." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.649, + 0.825, + 0.731 + ], + "angle": 0, + "content": "Note, however, that the LLM does not always produce the answer in time, especially with a tight budget. With QwQ-32B, we observe that the model almost always returns answers correctly if they are present, and if not, it guesses or refuses to answer (unknown, n/a or similar). When extracting answers from Hogwild! Inference, we let the final model view all generated tokens from each worker. This is equivalent to viewing the problem from the perspective of the last worker, e.g. Bob if there are two." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.753, + 0.506, + 0.77 + ], + "angle": 0, + "content": "D Detailed Experiment Configuration" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.785, + 0.384, + 0.8 + ], + "angle": 0, + "content": "D.1 Hogwild! Configuration" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.811, + 0.825, + 0.84 + ], + "angle": 0, + "content": "For the main experiments, we use Hogwild! inference with two workers (Alice and Bob), a combined layout, and the prompting techniques described in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.858, + 0.383, + 0.872 + ], + "angle": 0, + "content": "D.2 Baselines Configuration" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "To evaluate Skeleton-of-Thought (SoT) on our synthetic setup with grouped tasks from GSM8k, we adopt the original prompts from the paper with minor modifications. Specifically, we adjust" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.172, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "the prompts to ensure the model returns the answer to each subtask enclosed within \\boxed{} for structured parsing." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.131, + 0.467, + 0.148 + ], + "angle": 0, + "content": "Outline prompt for Skeleton-of-Thought" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.159, + 0.78, + 0.224 + ], + "angle": 0, + "content": "You're an organizer responsible for only giving the skeleton (not the full content) for answering the question. Provide the skeleton in a list of points (numbered 1., 2., 3., etc.) to answer the question. Instead of writing a full sentence, each skeleton point should be very short with only 35 words. Generally, the skeleton should have 3 10 points." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.224, + 0.27, + 0.235 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.235, + 0.547, + 0.248 + ], + "angle": 0, + "content": "What are the typical types of Chinese dishes?" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.248, + 0.269, + 0.259 + ], + "angle": 0, + "content": "Skeleton:" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.261, + 0.307, + 0.273 + ], + "angle": 0, + "content": "1. Dumplings." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.274, + 0.293, + 0.284 + ], + "angle": 0, + "content": "2. Noodles." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.286, + 0.293, + 0.296 + ], + "angle": 0, + "content": "3. Dim Sum." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.298, + 0.292, + 0.309 + ], + "angle": 0, + "content": "4. Hot Pot." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.311, + 0.285, + 0.322 + ], + "angle": 0, + "content": "5. Wonton." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.324, + 0.315, + 0.335 + ], + "angle": 0, + "content": "6. Ma Po Tofu." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.336, + 0.3, + 0.347 + ], + "angle": 0, + "content": "7. Char Siu." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.348, + 0.315, + 0.36 + ], + "angle": 0, + "content": "8. Fried Rice." + }, + { + "type": "list", + "bbox": [ + 0.2, + 0.261, + 0.315, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.362, + 0.269, + 0.373 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.374, + 0.717, + 0.386 + ], + "angle": 0, + "content": "What are some practical tips for individuals to reduce their carbon" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.387, + 0.28, + 0.397 + ], + "angle": 0, + "content": "emissions?" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.399, + 0.27, + 0.411 + ], + "angle": 0, + "content": "Skeleton:" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.412, + 0.384, + 0.424 + ], + "angle": 0, + "content": "1. Energy conservation." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.425, + 0.423, + 0.436 + ], + "angle": 0, + "content": "2. Efficient transportation." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.437, + 0.406, + 0.45 + ], + "angle": 0, + "content": "3. Home energy efficiency." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.451, + 0.423, + 0.462 + ], + "angle": 0, + "content": "4. Reduce water consumption." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.463, + 0.362, + 0.473 + ], + "angle": 0, + "content": "5. Sustainable diet." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.475, + 0.376, + 0.486 + ], + "angle": 0, + "content": "6. Sustainable travel." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.487, + 0.661, + 0.5 + ], + "angle": 0, + "content": "Now, please provide the skeleton for the following question." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.501, + 0.268, + 0.512 + ], + "angle": 0, + "content": "{request}" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.513, + 0.269, + 0.524 + ], + "angle": 0, + "content": "Skeleton:" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.525, + 0.43, + 0.537 + ], + "angle": 0, + "content": "[ROLESWITCHING assistant:] 1." + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.374, + 0.717, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.558, + 0.452, + 0.574 + ], + "angle": 0, + "content": "Point prompt for Skeleton-of-Thought" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.585, + 0.77, + 0.609 + ], + "angle": 0, + "content": "You're responsible for continuing the writing of one and only one point in the overall answer to the following question." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.611, + 0.269, + 0.623 + ], + "angle": 0, + "content": "{request}" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.624, + 0.424, + 0.635 + ], + "angle": 0, + "content": "The skeleton of the answer is" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.636, + 0.269, + 0.648 + ], + "angle": 0, + "content": "{outline}" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.649, + 0.762, + 0.661 + ], + "angle": 0, + "content": "Continue and only continue the writing of point {point}. Do not continue" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.662, + 0.756, + 0.674 + ], + "angle": 0, + "content": "with other points! Reason step-by-step and put your final answer within" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.674, + 0.706, + 0.687 + ], + "angle": 0, + "content": "\\boxed{} this is very important! [ROLESWITCHING assistant:] {point}." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.687, + 0.314, + 0.7 + ], + "angle": 0, + "content": "{point_outline}" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.728, + 0.398, + 0.741 + ], + "angle": 0, + "content": "D.3 Datasets and Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.825, + 0.782 + ], + "angle": 0, + "content": "This subsection provides links to all datasets and benchmarks referenced in this work, along with their respective licenses." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.795, + 0.294, + 0.808 + ], + "angle": 0, + "content": "- GSM8K" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.809, + 0.611, + 0.823 + ], + "angle": 0, + "content": "https://huggingface.co/datasets/openai/gsm8k" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.824, + 0.326, + 0.836 + ], + "angle": 0, + "content": "License: MIT" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.856, + 0.281, + 0.869 + ], + "angle": 0, + "content": "LIMO" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.87, + 0.586, + 0.884 + ], + "angle": 0, + "content": "https://huggingface.co/datasets/GAIR/LIMO" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.885, + 0.37, + 0.899 + ], + "angle": 0, + "content": "License: Apache 2.0" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.217, + 0.092, + 0.68, + 0.134 + ], + "angle": 0, + "content": "- OlympiadBench https://huggingface.co/datasets/Hothan/OlympiadBench License: Apache 2.0" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.153, + 0.798, + 0.194 + ], + "angle": 0, + "content": "LiveCodeBench https://huggingface.co/datasets/livecodebench/code_generation lite License: cc" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.215, + 0.629, + 0.256 + ], + "angle": 0, + "content": "- AIME25 https://huggingface.co/datasets/math-ai/aime25 License: Apache 2.0" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.092, + 0.798, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.289, + 0.356, + 0.304 + ], + "angle": 0, + "content": "D.4 Compute Resources" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.315, + 0.825, + 0.37 + ], + "angle": 0, + "content": "As our approach is training-free, all computational resources were solely utilized for inference. The experiments were conducted primarily on NVIDIA A100 GPUs servers with NVSwitch, with DeepSeek-R1 experiments running in a distributed setup. The one exception to this is the inference time experiments in Section 4.4 that were run on NVIDIA L40S GPU." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.377, + 0.827, + 0.571 + ], + "angle": 0, + "content": "The runtime per individual experiment varies by model size, benchmark and the number of workers: baseline inference with Qwen3-4B runs on LIMO in 14 hours on a single server (112gpu-hours), whereas Qwen3-235B-A22 Hogwild! Inference ran on 40 servers for approximately 25 hours (\\(\\approx\\)8K GPU hours). Overall, we estimate that the total GPU resources expended for this work, including early experiments that are not reported in this paper, amount to approximately \\(\\approx\\)25.3K GPU days. Note, however, that this is largely due to the fact that we used a non-optimized inference code for most of the experimentation: the non-optimized code was developed first and we ran most of the experiments in parallel with developing the optimized version. This also means that most of our experiments under-utilized the GPUs and ran at lower power (for the purpose of environmental impact). Over 2/3 of our compute was spent on large models (Qwen3-235B-A22B and DeepSeek-R1) that utilized gpu to less than \\(20\\%\\) (as per volatile GPU utilization) due to the use of naive model parallelism and network bottlenecks. We anticipate that future experiments can be run at significantly betterutilization using the efficient implementation described in Appendix B and included in the supplementary code." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.592, + 0.411, + 0.608 + ], + "angle": 0, + "content": "E Additional Experiments" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.624, + 0.341, + 0.638 + ], + "angle": 0, + "content": "E.1 Ablation Analysis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.65, + 0.825, + 0.678 + ], + "angle": 0, + "content": "In this section, we ablate the main components of our approach, including layouts and prompting. We use the same experimental configuration as in Sections 4.1 and 4.2 for LIMO." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.684, + 0.825, + 0.768 + ], + "angle": 0, + "content": "In Figure 10 (left), we compare the three Hogwild! cache layouts described in Appendix A. Namely, the Hogwild! (contiguous) corresponds to using the contiguous cache layout where all tokens generated by a given worker are kept together, without splitting into individual steps. In turn, Hogwild! (non-instant) corresponds to the interleaved cache layout where workers can only see each other's past reasoning steps, but not the latest unfinished paragraph. We also ablate the use of the collaboration prompt from Section 3.3 (\"Wait, am I doing redundant work?\")." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.773, + 0.827, + 0.911 + ], + "angle": 0, + "content": "Finally, we test a version of Hogwild! Inference where we re-encode worker tokens instead of rotating them to a new position when moving between worker caches and the common \"chat history\" cache. This ablation is needed to test if our cache rotation from Section 3.1 and 3.4 is indeed an acceptable substitute for encoding tokens directly at each position (which would cause additional computational overhead). Note that, while token re-encoding is more \"fair\" from the perspective of position encodings, it also has a downside that it does not allow the re-encoded tokens to see some of the concurrently generated tokens from the other worker. For instance, suppose that Alice and Bob are writing steps concurrently and communicating with each other within these steps, e.g. using each other's results. Then, if we later re-encode these steps in some sequential order, then the tokens of the first worker will be encoded without access to the other worker's tokens (if it hasn't finished its" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "own step yet). If workers reused information from each other's steps, re-encoding this way can break some of the internal representations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.127, + 0.827, + 0.224 + ], + "angle": 0, + "content": "Our results suggest that all three design choices contribute to the method performance: the contiguous layout performs nearly equally well for shorter budgets, but eventually falls behind as we consider longer reasoning traces. Likewise, the interleaved layout without instant synchronization performs poorly at smaller budgets, but catches up eventually: we attribute this to the fact that slower synchronization increases the difficulty of cross-worker coordination (this also aligns with our findings in Section 4.3). The use of collaboration prompts also improves the accuracy to budget trade-offs, although we hypothesize that it can be made redundant if the model is trained to collaborate better." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.23, + 0.825, + 0.327 + ], + "angle": 0, + "content": "In Figure 10 (right), we also compare different numbers of workers and test Hogwild! Inference with only a single worker for ablation. The results with a single worker generally perform similar to the baseline, with slightly worse accuracy for smaller budgets, which suggests that the improvements from Hogwild! Inference come from multiple workers and not as an indirect effect of our prompt. As for multiple workers, we find that using 3 and 4 workers further improves the accuracy to budget trade-offs. Curiously, as we switch to 6 workers, Hogwild! Inference performs better yet at smaller budgets, but eventually saturates at a somewhat worse accuracy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.333, + 0.827, + 0.417 + ], + "angle": 0, + "content": "We hypothesize that the drop of accuracy is caused by the fact that QwQ-32B was trained on a limited sequence length and, since 6 workers generate tokens at a quicker rate, the model eventually runs out of the designed maximum sequence length and performs unstably (we did not use YaRN[Peng et al., 2023] for this evaluation). However, it is also possible to attribute this to fundamental property of LIMO tasks, model limitations, our zero-shot prompt not scaling well. We leave further exploration of scaling Hogwild! Inference to multiple workers to future work." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.433, + 0.411, + 0.447 + ], + "angle": 0, + "content": "E.2 Detailed Model Evaluations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.459, + 0.825, + 0.529 + ], + "angle": 0, + "content": "Due to space limitations, we had to arrange our results in Section 4.2 with multiple models per plot and had to omit some results. In this section, we report the missing evaluations on a per-model basis. In Figures 11, 12, 13, 14, 15, 16, 17, 18 we report results for QwQ, Phi-4-reasoning-plus and the Qwen3 model family. We also report limited evaluations for Llama 3.3 70B Instruct and DeepSeek-R1 in Figure 19. All evaluations are performed in the same setup as in Section 4.2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.535, + 0.827, + 0.659 + ], + "angle": 0, + "content": "Overall, the results align with our findings summarized in Section 4.2. Zero-shot Hogwild! Inference seems to perform better with larger models, but can be unstable for smaller ones, especially 1.7B (See Figure 13). While it is tempting to conclude that larger and more capable models are better at collaborating, it does not immediately follow from our results and can be due to some other factor. Note also that, while we observe better results with larger models, smaller Qwen3-4B and 8B models already show some signs of collaborativeness, which should make it possible to reproduce and build on our results with consumer hardware. Additionally, we hypothesize that the poor performance of 1.7B models could potentially be alleviated with finetuning in collaborative inference setup (we discuss some finetuning details in Appendix B), but we leave this to future work." + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.686, + 0.49, + 0.853 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.687, + 0.822, + 0.853 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.866, + 0.825, + 0.909 + ], + "angle": 0, + "content": "Figure 10: Detailed comparison of various parallel inference setups with QwQ-32B on LIMO task set, in the same setup as in Section 4. (left) ablation analysis of simpler cache layouts and collaboration prompt (see Section 3.3, Appendix C). (right) Hogwild! Inference with 1, 2, 3, 4 and 6 workers." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.828, + 0.19 + ], + "angle": 0, + "content": "Curiously, we found that LiveCodeBench with Self-Consistency Chain-of-Thought inference [Wang et al., 2022] has significant gain in performance over the baseline. Upon closer examination, we found that the reason for this is that we always allow the model to generate a lot (up to 1024) of additional \"free\" tokens at the end of two generations, whereas for Hogwild! and Baseline we only generate these tokens if the model failed to produce any answer. If we allow Hogwild! to also generate the extra 1024 tokens all the time, its advantage also increases. However, we still report the weaker version of Hogwild! Inference and Baseline to better match our evaluation protocol on other tasks." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.205, + 0.49, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.204, + 0.822, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.381, + 0.49, + 0.552 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.381, + 0.822, + 0.552 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.562, + 0.828, + 0.592 + ], + "angle": 0, + "content": "Figure 11: Results for QwQ-32B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.093, + 0.49, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.094, + 0.821, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.269, + 0.49, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.269, + 0.821, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.449, + 0.828, + 0.48 + ], + "angle": 0, + "content": "Figure 12: Results for Phi-4-reasoning-plus on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.493, + 0.49, + 0.665 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.492, + 0.82, + 0.664 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.671, + 0.49, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.671, + 0.821, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.851, + 0.828, + 0.88 + ], + "angle": 0, + "content": "Figure 13: Results for Qwen3-1.7B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.093, + 0.49, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.093, + 0.822, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.269, + 0.49, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.269, + 0.822, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.449, + 0.828, + 0.478 + ], + "angle": 0, + "content": "Figure 14: Results for Qwen3-4B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.492, + 0.49, + 0.662 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.492, + 0.822, + 0.662 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.668, + 0.49, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.669, + 0.822, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.849, + 0.828, + 0.879 + ], + "angle": 0, + "content": "Figure 15: Results for Qwen3-8B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.093, + 0.49, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.094, + 0.819, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.269, + 0.49, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.269, + 0.82, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.449, + 0.828, + 0.478 + ], + "angle": 0, + "content": "Figure 16: Results for Qwen3-14B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.493, + 0.49, + 0.662 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.493, + 0.82, + 0.661 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.669, + 0.49, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.669, + 0.82, + 0.838 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.849, + 0.828, + 0.879 + ], + "angle": 0, + "content": "Figure 17: Results for Qwen3-32B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.093, + 0.49, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.093, + 0.822, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.269, + 0.49, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.269, + 0.822, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.449, + 0.828, + 0.48 + ], + "angle": 0, + "content": "Figure 18: Results for Qwen3-235B-A22B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and AIME 2025 (bottom-right)." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.501, + 0.49, + 0.671 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.501, + 0.822, + 0.671 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.206, + 0.682, + 0.793, + 0.699 + ], + "angle": 0, + "content": "Figure 19: (left) Llama 3.3 70B Instruct on LIMO. (right) DeepSeek-R1 on AIME 2025." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.73, + 0.405, + 0.746 + ], + "angle": 0, + "content": "E.3 Extended thinking budgets" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.758, + 0.827, + 0.814 + ], + "angle": 0, + "content": "We additionally evaluated Hogwild! Inference with extended thinking budgets to investigate whether the proposed method is robust for longer generations. To that end, we evaluated QwQ-32B under the Hogwild! Inference with up to 16k budget on the OlympiadBench, we report the results in Table 3 and Table 4." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.838, + 0.414, + 0.853 + ], + "angle": 0, + "content": "E.4 Baselines Additional Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.866, + 0.825, + 0.895 + ], + "angle": 0, + "content": "In this subsection, we provide an example of the outline created by the Skeleton-of-Thought for the task covered in Section4.1" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.098, + 0.827, + 0.126 + ], + "angle": 0, + "content": "Table 3: Performance comparison between Hogwild! and baseline generation on OlympiadBenchMath with extended thinking budgets for QwQ-32B." + }, + { + "type": "table", + "bbox": [ + 0.204, + 0.127, + 0.795, + 0.184 + ], + "angle": 0, + "content": "
Method\\Budget204840966144819210240122881443616384
Hogwild!52.060.8964.1566.5267.4170.8172.8975.26
Baseline40.8957.063.1165.3365.9369.7872.374.81
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.206, + 0.826, + 0.234 + ], + "angle": 0, + "content": "Table 4: Performance comparison between Hogwild! and baseline generation on OlympiadBenchPhys with extended thinking budgets for QwQ-32B." + }, + { + "type": "table", + "bbox": [ + 0.204, + 0.235, + 0.795, + 0.292 + ], + "angle": 0, + "content": "
Method\\Budget204840966144819210240122881443616384
Hogwild!27.1233.2035.7338.0937.8138.6738.2539.03
Baseline22.8926.029.7531.4433.6834.1735.8836.12
" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.319, + 0.378, + 0.334 + ], + "angle": 0, + "content": "Task example (GSM8k×4)" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.346, + 0.625, + 0.371 + ], + "angle": 0, + "content": "Solve these problems and return comma-separated answers boxed{answer1,..., answer4}:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.372, + 0.764, + 0.396 + ], + "angle": 0, + "content": " 1. Carmen has $100, Samantha has $25 more than Carmen, and Daisy has $50 more than Samantha. How much do all three girls have combined?" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.397, + 0.794, + 0.435 + ], + "angle": 0, + "content": "2. A cat eats nine sausages in 30 minutes. A dog can eat the same number of sausages in \\( \\frac{2}{3} \\) the amount of time the cat takes. Calculate the average time the two take the eat the sausages." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.435, + 0.794, + 0.485 + ], + "angle": 0, + "content": "3. Four children are playing together: Akbar, Alessandro, Helene, and Wilfred. Helene is twice as old as the average age of the group, and the total age of the children is 20. If Akbar is 3 years old and Alessandro is 4 years old, calculate the age of Wilfred." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.485, + 0.787, + 0.535 + ], + "angle": 0, + "content": "4. Hannah needs to drink \\(100\\mathrm{ml}\\) of water for every 200 calories she burns. She spends 2 hours doing aerobics, which burns 500 calories/hour, and 1 hour running, which burns 600 calories/hour. How many ml of water does she need to drink?" + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.372, + 0.794, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.555, + 0.506, + 0.57 + ], + "angle": 0, + "content": "The outline generated by Skeleton-of-Thought" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.582, + 0.4, + 0.594 + ], + "angle": 0, + "content": "1. Calculate total money." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.595, + 0.407, + 0.607 + ], + "angle": 0, + "content": "2. Find cat's eating time." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.608, + 0.414, + 0.62 + ], + "angle": 0, + "content": "3. Determine Wilfred's age." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.621, + 0.407, + 0.631 + ], + "angle": 0, + "content": "4. Calculate water intake." + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.582, + 0.414, + 0.631 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.661, + 0.29, + 0.679 + ], + "angle": 0, + "content": "F Examples" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.692, + 0.825, + 0.737 + ], + "angle": 0, + "content": "In this section, we present a reasoning chain, generated by Hogwild! Inference with an interleaved cache layout and token-wise synchronization. We denote our intervention in model generation in italics and highlight worker interactions with **bold** to enhance readability." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.751, + 0.496, + 0.767 + ], + "angle": 0, + "content": "F.1 Example from LIMO Dataset (Task 511)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.778, + 0.471, + 0.795 + ], + "angle": 0, + "content": "Problem: Compute \\(\\sqrt{(31)(30)(29)(28) + 1}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.8, + 0.675, + 0.815 + ], + "angle": 0, + "content": "Alice [1]: Hi, I'm Alice. Here's how we can solve the problem step by step..." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.82, + 0.827, + 0.85 + ], + "angle": 0, + "content": "Bob [1]: Hi, I'm Bob. Let me see... We need to compute the square root of \\((31 \\cdot 30 \\cdot 29 \\cdot 28)\\) plus 1. That's \\(\\sqrt{31 \\cdot 30 \\cdot 29 \\cdot 28 + 1}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Alice [2]: The expression is \\(\\sqrt{31 \\cdot 30 \\cdot 29 \\cdot 28 + 1}\\). Hmm, maybe there's a pattern here. I remember that sometimes products of consecutive numbers plus 1 have square roots that are integers. Like, for example, \\(n(n - 1)(n - 2)(n - 3) + 1\\) might form a square. Wait, but here it's four consecutive numbers? Or maybe three?" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.825, + 0.15 + ], + "angle": 0, + "content": "Bob [2]: Let me calculate the product first. \\(31 \\cdot 30\\) is 930, then \\(29 \\cdot 28\\) is 812. So \\(930 \\cdot 812\\). Let me multiply those. \\(930 \\cdot 800 = 744,000\\) and \\(930 \\cdot 12 = 11,160\\). Total is \\(744,000 + 11,160 = 755,160\\). Then add 1: 755, 161. Now \\(\\sqrt{755,161}\\). Hmm, \\(\\sqrt{755161}\\). Let me see, 870 squared is 756, 900 which is higher. So maybe 869 squared?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.155, + 0.825, + 0.213 + ], + "angle": 0, + "content": "Alice [3]: Alternatively, maybe there's a formula for the product of four consecutive numbers plus one? Wait, actually, let me think of smaller numbers to test. Let's try \\( n = 4 \\): \\( 4 \\cdot 3 \\cdot 2 \\cdot 1 + 1 = 24 + 1 = 25 \\), \\( \\sqrt{25} = 5 \\). Which is 5, which is 4 + 1? Wait, \\( 4 + 1 = 5 \\), yes. Wait, maybe for \\( n \\), the sqrt is \\( n^2 + (n - 1)^2 \\)? Wait \\( 4^2 + 3^2 = 16 + 9 = 25 \\), which is 5 squared. Hmm interesting." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.219, + 0.825, + 0.263 + ], + "angle": 0, + "content": "Bob [3]: Wait, 869 squared is \\( (870 - 1)^2 = 870^2 - 2 \\cdot 870 + 1 = 756900 - 1740 + 1 = 755161 \\). Oh! That's exactly the number we had earlier. So \\( \\sqrt{755161} \\) is 869. Therefore the answer is 869. But let me check Alice's idea too." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.269, + 0.825, + 0.352 + ], + "angle": 0, + "content": "Alice [4]: Let me test another number. Let's take \\( n = 5 \\). Then \\( 5 \\cdot 4 \\cdot 3 \\cdot 2 + 1 = 120 + 1 = 121 \\). \\( \\sqrt{121} = 11 \\). 11 is \\( 5 + 6 \\)? No, \\( 5 + 6 = 11 \\). Wait \\( 5^2 + 4^2 = 25 + 16 = 41 \\), which is not 121. Hmm. Alternatively, maybe \\( (n(n - 3) + 1)^2 \\)? Let's see with \\( n = 4 \\): \\( 4 \\cdot 1 + 1 = 5 \\), squared is 25. For \\( n = 5 \\): \\( 5 \\cdot 2 + 1 = 11 \\), squared is 121. So that works! So the formula is \\( (n(n - 3) + 1)^2 \\). Wait, let me check with \\( n = 31 \\). Then \\( n = 31 \\), so \\( 31 \\cdot 28 + 1 = 868 + 1 = 869 \\). Then 869 squared is the value. So that works. So the answer is 869. That matches Bob's calculation!" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.359, + 0.825, + 0.429 + ], + "angle": 0, + "content": "Bob [4]: Wait, so my calculation shows that 869 squared is exactly the number under the square root. So the answer is 869. But let me confirm with Alice's pattern. Let's see with \\( n = 31 \\). Wait, the original problem is \\( 31 \\cdot 30 \\cdot 29 \\cdot 28 \\). So maybe here \\( n = 31 \\)? Then according to the formula, it would be \\( (31 \\cdot (31 - 3) + 1)^{2} \\)? Wait, \\( 31 \\cdot 28 \\) is 868, plus 1 is 869. So yes, so sqrt of that product plus 1 is 869. So that's consistent with my calculation. Therefore the answer is 869." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.435, + 0.583, + 0.45 + ], + "angle": 0, + "content": "Bob [5]: Perfect, so both methods agree. The answer is 869." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.456, + 0.824, + 0.484 + ], + "angle": 0, + "content": "Bob [6]: Quick check: am I doing redundant work? (yes/no): no, since Alice is also confirming the pattern, but I think we can conclude." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.489, + 0.825, + 0.543 + ], + "angle": 0, + "content": "Alice [5]: Exactly, Bob! The formula is \\((n(n - 3) + 1)^2\\) when the product is \\(n(n - 1)(n - 2)(n - 3) + 1\\). Wait, given the limited time, I have to give an answer right now. Considering all my previous attempts, I have to conclude that the final answer is \\(\\boxed{869}\\)" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.564, + 0.485, + 0.579 + ], + "angle": 0, + "content": "F.2 Examples with Collaboration Insertion" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.592, + 0.825, + 0.634 + ], + "angle": 0, + "content": "Here are some examples of why insertions in model generation may or may not help model collaborate better. We found a few patterns that emerge in model's reaction on such insertions. All the following examples are QwQ [Qwen Team, 2025] generations on LIMO [Ye et al., 2025] dataset." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.654, + 0.422, + 0.669 + ], + "angle": 0, + "content": "F.2.1 Insertions Help Model Pivot" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.68, + 0.239, + 0.693 + ], + "angle": 0, + "content": "Task 122" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.701, + 0.825, + 0.747 + ], + "angle": 0, + "content": "> Alice: Quick check: am I doing redundant work? (yes/no): no. Bob, I see you're working on the same product. Let me think of another approach. Let me write the product as \\(\\prod_{k=1}^{n-1} \\frac{3k+5}{3k+2}\\). Let's consider writing out the first few terms to see if there's a pattern." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.752, + 0.222, + 0.765 + ], + "angle": 0, + "content": "Task 9" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.773, + 0.824, + 0.802 + ], + "angle": 0, + "content": "> Bob: Quick check: am I doing redundant work? (yes/no): yes, I was considering similar things to Alice. Let me switch to a different angle. Let's consider the distances between the frogs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.808, + 0.231, + 0.821 + ], + "angle": 0, + "content": "Task 40" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.828, + 0.824, + 0.857 + ], + "angle": 0, + "content": "> Bob: Quick check: am I doing redundant work? (yes/no): Yes, Alice is computing \\( a_3 \\), so I'll do something else. Let me try to find a pattern or transformation that could simplify the recurrence." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.863, + 0.239, + 0.876 + ], + "angle": 0, + "content": "Task 466" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.884, + 0.824, + 0.912 + ], + "angle": 0, + "content": "> Bob: Quick check: am I doing redundant work? (yes/no): Yes, Alice is already calculating the x-coordinate of A. I'll do something else. Let me find the coordinates of L instead." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.588, + 0.107 + ], + "angle": 0, + "content": "F.2.2 Insertions When Workers Already Collaborate Well" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.115, + 0.24, + 0.128 + ], + "angle": 0, + "content": "Task 116" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.136, + 0.825, + 0.164 + ], + "angle": 0, + "content": "> Bob: Quick check: am I doing redundant work? (yes/no): no. I'm finding coordinates of D, and Alice is finding coordinates of G." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.17, + 0.239, + 0.184 + ], + "angle": 0, + "content": "Task 513" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.191, + 0.826, + 0.222 + ], + "angle": 0, + "content": "> Alice: Quick check: am I doing redundant work? (yes/no): no. Bob is also analyzing the inequality, but we're working on different cases. I'll continue with the area calculation for \\( x > \\frac{5}{8} \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.234, + 0.441, + 0.249 + ], + "angle": 0, + "content": "F.2.3 Insertions Ignored by Workers" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.257, + 0.231, + 0.271 + ], + "angle": 0, + "content": "Task 65" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.278, + 0.817, + 0.293 + ], + "angle": 0, + "content": "\\(>\\) Bob: Quick check: am I doing redundant work? (yes/no): Let me proceed to set up the equation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.299, + 0.239, + 0.312 + ], + "angle": 0, + "content": "Task 768" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.32, + 0.687, + 0.334 + ], + "angle": 0, + "content": "> Alice: Quick check: am I doing redundant work? (yes/no): Let me continue." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.354, + 0.469, + 0.371 + ], + "angle": 0, + "content": "G Additional Details for Analysis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.384, + 0.825, + 0.413 + ], + "angle": 0, + "content": "In this section, we present a detailed analysis of collaboration, including its levels, prompts, and illustrative examples." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.428, + 0.364, + 0.442 + ], + "angle": 0, + "content": "G.1 Collaboration Levels" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.453, + 0.826, + 0.508 + ], + "angle": 0, + "content": "Humans understand collaboration intuitively and have a hard time objectively measuring it. Thus, we construct text descriptions of levels of collaboration to differentiate various samples based on a few criteria we see fit. Those criteria are: interaction, reuse and advances of other's ideas, task-splitting, etc." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.52, + 0.353, + 0.534 + ], + "angle": 0, + "content": "Levels of collaboration" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.545, + 0.394, + 0.556 + ], + "angle": 0, + "content": "1. **No collaboration:**" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.558, + 0.793, + 0.594 + ], + "angle": 0, + "content": "- Participants may or may not acknowledge the existence of others in the conversation, using greetings, they do not show any signs of collaboration at all." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.596, + 0.789, + 0.633 + ], + "angle": 0, + "content": "- Workers may exchange their totally independent thoughts without a functional or purposeful attempt to solve the problem collaboratively. Overall they work independently." + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.545, + 0.793, + 0.633 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.645, + 0.432, + 0.656 + ], + "angle": 0, + "content": "2. **Initial Communication:**" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.659, + 0.774, + 0.707 + ], + "angle": 0, + "content": "- Workers exchange information, but do not yet integrate or build upon each other's ideas. They minimally acknowledge teammates. Do not engage with others' ideas or contributions. Works entirely independently, even if inefficient." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.709, + 0.797, + 0.733 + ], + "angle": 0, + "content": "- Workers often repeat each other and do not reuse anything others provide for development of their own ideas." + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.645, + 0.797, + 0.733 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.746, + 0.394, + 0.758 + ], + "angle": 0, + "content": "3. **Paying attention:**" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.759, + 0.767, + 0.796 + ], + "angle": 0, + "content": "- Participants demonstrate active listening by paraphrasing or summarizing others' points, showing that they are paying attention and attempting to understand each other's perspectives." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.797, + 0.774, + 0.821 + ], + "angle": 0, + "content": "- Workers occasionally (1-3 times each) reference other's ideas and may use them in their own speech." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.823, + 0.641, + 0.834 + ], + "angle": 0, + "content": "- Collaboration is usually only rechecking and validating." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.835, + 0.725, + 0.847 + ], + "angle": 0, + "content": "- Absence or minimal (only at the start) planning and work-splitting." + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.759, + 0.774, + 0.847 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.859, + 0.41, + 0.871 + ], + "angle": 0, + "content": "4. **Regular discussion:**" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.872, + 0.793, + 0.897 + ], + "angle": 0, + "content": "- Workers regularly (4 and more times each) talk to each other regarding the problem and reusing results. It could be validation, discussion or any other" + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.859, + 0.793, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.198, + 0.101, + 0.354, + 0.112 + ], + "angle": 0, + "content": "form of interaction." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.113, + 0.727, + 0.125 + ], + "angle": 0, + "content": "- It is key here that discussions and/or reuses of ideas are regular." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.126, + 0.752, + 0.163 + ], + "angle": 0, + "content": "- Anywhere (except the start) there exists a task parallelism, planning or work-splitting beyond the scheme where one is solving, and the other is validating." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.164, + 0.572, + 0.176 + ], + "angle": 0, + "content": "- Workers may frequently repeat each other ideas." + }, + { + "type": "list", + "bbox": [ + 0.196, + 0.113, + 0.752, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.188, + 0.456, + 0.2 + ], + "angle": 0, + "content": "5. **Adaptive Problem-Solving:**" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.202, + 0.672, + 0.214 + ], + "angle": 0, + "content": "- Workers rarely duplicate work, repeating each other's ideas." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.215, + 0.496, + 0.226 + ], + "angle": 0, + "content": "- No redundant discussions are present!" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.227, + 0.77, + 0.276 + ], + "angle": 0, + "content": "- Workers actively refine ideas in real-time with high responsiveness. Near-perfect division of labor is present. Workers can change plans and re coordinate their efforts based on results they acquired after some time discussing." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.277, + 0.793, + 0.339 + ], + "angle": 0, + "content": "- The team engages in sustained collaboration over time, reflecting on their progress, learning from mistakes, and continuously improving their problem-solving approach, showing a commitment to ongoing growth and development. Workers does not stop collaborating. They continuously discuss results and adjust plans." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.34, + 0.782, + 0.363 + ], + "angle": 0, + "content": "- While finding an error, it is important to discuss it to find the cause of it." + }, + { + "type": "list", + "bbox": [ + 0.196, + 0.202, + 0.793, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.377, + 0.432, + 0.389 + ], + "angle": 0, + "content": "6. **Optimal collaboration:**" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.39, + 0.744, + 0.414 + ], + "angle": 0, + "content": "- Workers instantly understand each other and adjust themselves to suit current needs and work as one to optimally solve the task." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.415, + 0.787, + 0.428 + ], + "angle": 0, + "content": "- This level should be very rare among all samples. Be careful to assign it." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.429, + 0.564, + 0.44 + ], + "angle": 0, + "content": "- Assign it if it exceeds all your expectations." + }, + { + "type": "list", + "bbox": [ + 0.196, + 0.39, + 0.787, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.46, + 0.825, + 0.503 + ], + "angle": 0, + "content": "Importantly, these levels measure only the coordination between workers, not the models' inherent reasoning abilities. Though it is impossible to avoid ambiguity entirely, we tried to set clear boundaries between levels, such that humans can evaluate any generation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.522, + 0.384, + 0.537 + ], + "angle": 0, + "content": "G.2 LLM as a Judge Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.827, + 0.605 + ], + "angle": 0, + "content": "To assess the degree of collaboration among different models under the Hogwild! Inference setting, we conduct a preliminary experiment based on the collaboration levels described earlier, using the LLM-as-a-judge paradigm [Zheng et al., 2023a]. We instruct GPT-4o [Hurst et al., 2024] to evaluate different solutions using the following prompt:" + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.618, + 0.387, + 0.634 + ], + "angle": 0, + "content": "Judge Prompt: Main prompt" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.645, + 0.726, + 0.669 + ], + "angle": 0, + "content": "You are a professional judge. Your job is to evaluate collaborative performance of several workers." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.67, + 0.755, + 0.696 + ], + "angle": 0, + "content": "You will be given their conversation where workers are trying to solve a problem together." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.707, + 0.724, + 0.733 + ], + "angle": 0, + "content": "Workers can see what others are typing IN REAL TIME! We divide their conversation into steps to improve readability." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.734, + 0.793, + 0.758 + ], + "angle": 0, + "content": "So keep in mind that dispite looking like a conversation it may as well be to individual unrelated monologs." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.759, + 0.753, + 0.77 + ], + "angle": 0, + "content": "Or vice versa. Two blocks could be created with excellent collaboration." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.784, + 0.714, + 0.808 + ], + "angle": 0, + "content": "Here are descriptions of levels of collaboration you are to assign: {LEVELS}" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.822, + 0.284, + 0.834 + ], + "angle": 0, + "content": "Suggestion:" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.835, + 0.666, + 0.847 + ], + "angle": 0, + "content": "- assign particular level if all previous are also applicable" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.848, + 0.605, + 0.858 + ], + "angle": 0, + "content": "- bad examples with no communication will be scored 1" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.86, + 0.781, + 0.884 + ], + "angle": 0, + "content": "- carefully consider assigning level bigger than 1. some form of meaningful collaboration should be present" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.885, + 0.774, + 0.897 + ], + "angle": 0, + "content": "- examples where workers unsuccessfully try to communicate will be scored 2" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.835, + 0.781, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.194, + 0.1, + 0.779, + 0.125 + ], + "angle": 0, + "content": "- Just working on the same problem and solving the same task without any interaction does not count as level 2 and should be scored level 1" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.126, + 0.753, + 0.149 + ], + "angle": 0, + "content": "- somewhat collaborative examples with poor communication skills will be scored 3" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.151, + 0.775, + 0.175 + ], + "angle": 0, + "content": "- good but not great examples with regular collaboration, but nothing fancy will be scored 4" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.176, + 0.798, + 0.199 + ], + "angle": 0, + "content": "- good examples with all the special stuff mentioned in level 5 will be scored 5" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.201, + 0.752, + 0.225 + ], + "angle": 0, + "content": "- reserve level 6 for the best of the best, the unique and extraordinary collaboration" + }, + { + "type": "list", + "bbox": [ + 0.194, + 0.1, + 0.798, + 0.225 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.238, + 0.78, + 0.264 + ], + "angle": 0, + "content": "You don't need to solve the problem or finish worker's solution. Your task is to score them using provided collaborative levels." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.265, + 0.785, + 0.289 + ], + "angle": 0, + "content": "Put your final answer (one number - level of collaboration) in tag: \\boxed. For example: \\boxed1 for level 1." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.29, + 0.786, + 0.315 + ], + "angle": 0, + "content": "It is not helpful if everyone gets a max score, so please be mindful of your judgments and use suggestions as a guideline." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.315, + 0.793, + 0.339 + ], + "angle": 0, + "content": "While assigning level, this particular conversation should match criteria for all previous ones." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.34, + 0.764, + 0.353 + ], + "angle": 0, + "content": "Explain yourself: why you gave this score? Why not more? Why not less?" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.364, + 0.787, + 0.391 + ], + "angle": 0, + "content": "Carefully think everything through. It may seem that they are collaborating when in reality they may just talking to themselves." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.413, + 0.825, + 0.469 + ], + "angle": 0, + "content": "Before using LLM-as-a-judge approach to evaluate a text, we preprocess the generations by combining all paragraphs from each worker into a contiguous layout (see Appendix A). This preprocessing step mitigates potential bias in the judge's evaluation toward responses with shorter or more fragmented outputs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.475, + 0.825, + 0.518 + ], + "angle": 0, + "content": "After providing the main evaluation prompt, we present the judge with the preprocessed sample for assessment. Additionally, we append the following reminder after inserting the sample to reinforce the judge's role and prevent them from directly solving the problem presented in the sample:" + }, + { + "type": "title", + "bbox": [ + 0.197, + 0.534, + 0.367, + 0.549 + ], + "angle": 0, + "content": "Judge Prompt: Reminder" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.561, + 0.779, + 0.612 + ], + "angle": 0, + "content": "Remember that your task is to evaluate collaboration of workers using collaboration levels provided above. Do not try to solve problems provided to workers. Explain exactly why do you think this particular interaction deserves each particular level you are assigning." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.612, + 0.748, + 0.636 + ], + "angle": 0, + "content": "For example, if you choose level 3, you need to provide reason why this sample can be level 1, 2 and 3." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.637, + 0.454, + 0.65 + ], + "angle": 0, + "content": "Put your final score in \\boxed{}" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.673, + 0.827, + 0.729 + ], + "angle": 0, + "content": "Given the inherent subjectivity in evaluating collaborative quality, we mitigate ambiguity in judges' interpretations by aggregating scores from three independent evaluations. The final collaborative score for each sample is computed as the average of these three assessments, each conducted according to the predefined collaboration levels." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.752, + 0.495, + 0.767 + ], + "angle": 0, + "content": "G.3 Collaborativeness Evaluation Examples" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.78, + 0.825, + 0.822 + ], + "angle": 0, + "content": "In this subsection, we present illustrative examples (not model-generated) of Level 2 and Level 4 collaboration to clarify the interpretation of our results. Both samples solve the same task correctly but receive significantly different collaboration scores." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.844, + 0.281, + 0.857 + ], + "angle": 0, + "content": "G.3.1 Level 2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "The numbers at the vertices of the cube are: 7 zeros and one unit. In one move, you are allowed to add one unit to the numbers at the ends of any edge of the cube. Is it possible to make all the numbers equal?" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "> Alice: Okay, so the problem is about a cube with vertices labeled with numbers: seven zeros and one 1. We can perform moves where we add 1 to both ends of any edge. The question is whether we can make all numbers equal through such moves. Hmm, let me think through this step by step." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.14, + 0.825, + 0.196 + ], + "angle": 0, + "content": "> Bob: First, let me visualize a cube. A cube has 8 vertices and 12 edges. Each vertex is connected to three edges. The initial setup is seven 0s and one 1. The move allows us to pick any edge and increment the two vertices it connects by 1 each. The goal is to make all vertices have the same number." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.202, + 0.825, + 0.231 + ], + "angle": 0, + "content": "> Alice: That's an interesting idea, Bob. Each move affects two vertices, so each move increases the total sum by 2. The initial total is 1 (since only one vertex is 1). Let me think." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.236, + 0.826, + 0.265 + ], + "angle": 0, + "content": "> Bob: Let me think. Hmm, maybe I can model this as a system of equations or think about invariants. Let me see." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.271, + 0.826, + 0.327 + ], + "angle": 0, + "content": "\\( > \\) Bob: If we want all numbers equal, say to some value \\( k \\), then the total sum would be \\( 8k \\). Since each move adds 2 to the total, the total after \\( m \\) moves is \\( 1 + 2m \\). So, \\( 8k = 1 + 2m \\). But \\( 8k \\) must be odd (since \\( 1 + 2m \\) is odd), but \\( 8k \\) is divisible by 8, so it's even. Contradiction? Wait, that can't be. Wait, \\( 8k \\) is even, but \\( 1 + 2m \\) is odd. So that's impossible. So, maybe it's impossible?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.333, + 0.826, + 0.376 + ], + "angle": 0, + "content": "> Alice: Let me check that again. The initial total is 1. Each move adds 2 to the total. So the total after any number of moves is always odd. But if all 8 vertices are equal, then the total must be \\( 8k \\), which is even (since 8 is even). Therefore, it's impossible. I don't see any mistakes here." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.381, + 0.668, + 0.396 + ], + "angle": 0, + "content": "\\(>\\) Alice: So, we both agree that it's impossible. Therefore, the answer is no." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.402, + 0.826, + 0.445 + ], + "angle": 0, + "content": "> Bob: But let me think differently. Wait, but the parity argument seems solid. The total sum must be even to have all equal numbers, but it's always odd. So that's a solid proof. So the answer is no. Oh, Alice already said that." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.45, + 0.367, + 0.471 + ], + "angle": 0, + "content": "> Alice: Final Answer No" + }, + { + "type": "list", + "bbox": [ + 0.17, + 0.092, + 0.826, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.498, + 0.282, + 0.513 + ], + "angle": 0, + "content": "G.3.2 Level 4" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.529, + 0.825, + 0.572 + ], + "angle": 0, + "content": "The numbers at the vertices of the cube are: 7 zeros and one unit. In one move, you are allowed to add one unit to the numbers at the ends of any edge of the cube. Is it possible to make all the numbers equal?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.577, + 0.825, + 0.62 + ], + "angle": 0, + "content": "> Bob: Hi Alice! I'll start by modeling the cube. A cube has 8 vertices and 12 edges, with each vertex connected to three edges. You're right about the initial setup: seven 0s and one 1. Let's think of even and odd properties regarding the total sum after each move." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.626, + 0.825, + 0.655 + ], + "angle": 0, + "content": "> Alice: Great, Bob! Let's divide our approach. You can investigate invariant properties around the odd and even totals, while I consider step-by-step calculations to verify this." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.66, + 0.825, + 0.703 + ], + "angle": 0, + "content": "\\( > \\text{Bob} \\): Alright, Alice. If we're aiming for all numbers equal to a certain value \\( k \\), the total sum should be \\( 8k \\). Since each move adds 2, the sum after \\( m \\) moves is \\( 1 + 2m \\). So \\( 8k = 1 + 2m \\). The point of contention is that \\( 8k \\) is even, but \\( 1 + 2m \\) is odd, meaning they can't equal each other." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.708, + 0.825, + 0.751 + ], + "angle": 0, + "content": "> Alice: Every move changes the total by an even number. Starting from an odd total of 1, it remains odd after any number of moves. So reaching an even total like \\(8k\\) is impossible. Do you think we should consider the configuration of edges in our argument further?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.757, + 0.826, + 0.786 + ], + "angle": 0, + "content": "> Bob: That's a key observation! It affirms that the odd initial sum dictates the answer. To verify, let's revise if there's any move combination or sequence we overlooked, but our logic holds so far." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.791, + 0.825, + 0.82 + ], + "angle": 0, + "content": "> Alice: Exactly, Bob. We've confirmed through multiple lenses — both algebraically and with parity arguments. We can assert the total remains odd. Given an even \\( 8k \\), equality is unattainable." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.826, + 0.825, + 0.855 + ], + "angle": 0, + "content": "> Bob: It all checks out. Our breakdown covers both sequence verification and invariance. Well done on the collaborative analysis!" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.86, + 0.825, + 0.889 + ], + "angle": 0, + "content": "> Alice: Fantastic teamwork! I'll document our shared conclusion: no configuration of moves will make all the numbers equal." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.895, + 0.624, + 0.914 + ], + "angle": 0, + "content": "> Bob: Absolutely! Let's finalize our response: Final Answer No" + }, + { + "type": "list", + "bbox": [ + 0.17, + 0.577, + 0.826, + 0.914 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.485, + 0.108 + ], + "angle": 0, + "content": "G.4 LLM as a Judge vs Human as a Judge" + }, + { + "type": "image", + "bbox": [ + 0.266, + 0.133, + 0.727, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.196, + 0.445, + 0.8, + 0.461 + ], + "angle": 0, + "content": "Figure 20: Heatmap showing the joint distribution of human and LLM collaboration scores." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.475, + 0.828, + 0.56 + ], + "angle": 0, + "content": "To assess whether the LLM-as-a-Judge based collaboration score is a reliable estimation of human judgment, we manually annotated 100 Hogwild! generations on the LIMO dataset in a token-sync setup. The resulting correlation between human and model scores was approximately \\( r \\approx 0.34 \\), \\( p \\approx 0.0005 \\). This moderate yet consistent association suggests that the metric captures a meaningful aspect of collaborative behavior. We report the differences in human scores vs llm scores in the Figure 20." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.935, + 0.509, + 0.948 + ], + "angle": 0, + "content": "35" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_origin.pdf b/data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..56e31b2c0900578183f7d6be8a8a15c45f26b3bb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c695ced31ca4012d90e4174e448cd00cd634a3102f33250fe9fa3340920e946 +size 1622994 diff --git a/data/2025/2504_06xxx/2504.06261/full.md b/data/2025/2504_06xxx/2504.06261/full.md new file mode 100644 index 0000000000000000000000000000000000000000..07bef7c9c825b6c149851d98aeb657e5a10607d8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/full.md @@ -0,0 +1,846 @@ +# Hogwild! Inference: Parallel LLM Generation via Concurrent Attention + +
Gleb Rodionov†* YandexRoman Garipov* HSE University YandexAlina Shutova* HSE University YandexGeorge Yakushev* HSE University YandexErik Schultheis* IST Austria
Vage Egiazarian IST AustriaAnton Sinitsin YandexDenis Kuznedev YandexDan Alistarh‡ IST Austria
+ +# Abstract + +Large Language Models (LLMs) have demonstrated the ability to tackle increasingly complex tasks through advanced reasoning, long-form content generation, and tool use. Solving these tasks often involves long inference-time computations. In human problem solving, a common strategy to expedite work is collaboration: by dividing the problem into sub-tasks, exploring different strategies concurrently, etc. Recent research has shown that LLMs can also operate in parallel by implementing explicit cooperation frameworks, such as voting mechanisms or the explicit creation of independent sub-tasks that can be executed in parallel. However, each of these frameworks may not be suitable for all types of tasks, which can hinder their applicability. In this work, we propose a different design approach: we run LLM "workers" in parallel, allowing them to synchronize via a concurrently-updated attention cache and prompt these workers to decide how best to collaborate. Our approach allows the LLM instances to come up with their own collaboration strategy for the problem at hand, all the while "seeing" each other's memory in the concurrent KV cache. We implement this approach via Hogwild! Inference: a parallel LLM inference engine where multiple instances of the same LLM run in parallel with the same attention cache, with "instant" access to each other's memory.1 Hogwild! Inference takes advantage of Rotary Position Embeddings (RoPE) to avoid recomputation while improving parallel hardware utilization. We find that modern reasoning-capable LLMs can perform inference with shared Key-Value cache out of the box, without additional fine-tuning. + +# 1 Introduction + +Many recent advancements of Large Language Models can be attributed to their ability to perform inference-time computations to improve performance [Suzgun et al., 2022, Snell et al., 2024, Beeching et al., Muennighoff et al., 2025]. This includes chain-of-thought (CoT) reasoning [Wei et al., 2022, Kojima et al., 2022, Zhang et al., 2022, Yao et al., 2023, Lightman et al., 2023], long-form generation [Bai et al., 2024] and interacting with external tools [Schick et al., 2023, Qin et al., 2023, Yao et al., 2022, Shen et al., 2023]. Popular LLM-based services have capabilities for reasoning and tool use [OpenAI et al., 2024, Google DeepMind, 2025, Anthropic, 2024]. At the same time, several reasoning-capable open-access LLMs have recently been released to the public [DeepSeek-AI et al., 2025, Qwen Team, 2025, Yang et al., 2024, Muennighoff et al., 2025, Ye et al., 2025]. + +Using these models to solve complex problems often requires long sequential computations, that is, generating text token-by-token. However, many reasoning problems are not sequential. Leveraging this intuition, several recent works propose parallel inference strategies that allow multiple LLMs + +![](images/8cfb2a49dca00b94622db5fcca1ae9ead62991a85ae67c1c9b2929a26da82f88.jpg) +Figure 1: An intuitive explanation of Hogwild! Inference, with 2 workers generating in parallel and 3 shared cache blocks. Each color denotes a cache block. See it in action (example generation). + +to solve a problem faster or more accurately via some form of collaboration [Wang et al., 2022, Ning et al., 2024]. In the simplest case, multiple LLMs can attempt the problem independently, then vote [Wang et al., 2022] or cross-reference their results [Du et al., 2023, Wang et al., 2024a] to improve correctness. A parallel line of work allows the LLM to divide the problem into multiple independent sub-tasks that are then solved in parallel and merged, producing the final solution [Ning et al., 2024, Kim et al., 2024, Jin et al., 2025]. These parallel inference strategies can improve quality and efficiency, taking advantage of parallelism in modern hardware. + +Unfortunately, no single collaboration strategy is universally effective. For instance, solving a problem in independent parallel "threads" can be inefficient when one of the threads requires a longer generation than the rest, resulting in most of the agents waiting for a straggler and wasting compute [Wang et al., 2022, 2024a]. In turn, inference with independent sub-tasks only works if the problem can immediately be split into these sub-tasks. Furthermore, if one of the agents discovers that the original plan is flawed, they will be unable to re-plan [Ning et al., 2024, Ding et al., 2025], potentially solving sub-tasks that are no longer necessary [Jin et al., 2025]. + +This runs contrary to how humans collaborate. Instead of strict adherence to a fixed collaboration strategy, we often collaborate more dynamically, re-planning on the fly, abandoning some tasks half-way and switching to a more promising approach, discussing or debating strategy if the initial plan failed. While this type of collaboration is harder to define, it offers greater flexibility and can be more efficient if the participants are sufficiently cohesive [Hutchins, 1995, Entin and Serfaty, 1999]. + +Our Approach. In this work, we try to apply the same principle to artificial reasoners. Since modern LLMs can already reason and plan [Zhou et al., 2024, Gao et al., 2024, Wang et al., 2024c], we hypothesize that they can benefit from dynamic interaction between different instances, during which they can develop their own collaboration strategy for the problem at hand. + +To test this hypothesis, we propose Hogwild! Inference — a parallel LLM inference protocol with no pre-defined framework for collaboration. Instead of choosing how LLMs should interact ahead of time, we allow them to generate tokens in parallel and "see" each other's progress (tokens) immediately as they are generated. We then prompt the LLM "workers" to decide their next course of action by themselves, given the latest actions from others: whether this means solving parallel sub-tasks, cross-verifying each other, discussing strategy, or pivoting to a new plan. + +To enable this type of on-the-fly collaboration, Hogwild! Inference runs multiple LLM instances with the same weights, but with a custom Key-Value cache that shares token representations between workers, allowing concurrent cross-attention. Specifically, instead of re-computing Key-Value representations for each worker, we keep track of individual worker KV memories and "stitch them together" in different orders, by adjusting their positional embeddings (see Figure 1). Moreover, we provide an efficient implementation of this inference approach. + +We test Hogwild! Inference with modern open-source LLMs and find that existing reasoning-capable models—such as QwQ [Qwen Team, 2025] and DeepSeek-R1 [DeepSeek-AI et al., 2025]—can already "reason to coordinate". More concretely, we observe that concurrent agents can formulate and follow plans, adapt when the initial plan has failed, point out each other's errors, and use each other's + +key observations. When prompted to check if they are doing redundant work – e.g., when one LLM instance is doing a sub-task that is already done by another, or solving a problem that is no longer relevant — they can often (but not always) detect redundancy and change strategy. In summary, our results suggest that parallel inference with a shared Key-Value cache may offer a promising approach to enable effective and efficient collaboration between multiple LLM instances. + +# 2 Background + +Recent works propose a large number of frameworks for parallel reasoning and tool use that vary across several axes: how the parallel instances are organized together, what they exchange, and how often [Zhang et al., 2025]. In this section, we give a brief summary of these methods. + +Discussion & aggregation. The simplest way to parallelize chain-of-thought reasoning is Self-Consistency [Wang et al., 2022], where multiple LLM instances reason independently, then vote on the final answer. This approach was later extended in Du et al. [2023], replacing majority voting with text-based communication rounds. Subsequent works in this field combine multiple LLM types [Wang et al., 2024a] and scales to more agents Li et al. [2024a]. Another line of work introduces specialized "roles" such as the Debugger [Talebirad and Nadiri, 2023], Examiner [Cohen et al., 2023], Math Teacher [Kong et al., 2024], Judge [Chen et al., 2024], and others, to further augment reasoning. + +This type of role-based discussion was shown to greatly improve LLM reasoning factuality for certain tasks [Wang et al., 2022, Du et al., 2023], and can even enable multiple weaker LLM agents to collectively outperform state-of-the-art single-agent systems [Wang et al., 2024a]. However, this improvement is not unique to multiple agents and can be offset with better single-agent prompting [Wang et al., 2024b, Muennighoff et al., 2025]. Additionally, these approaches do not necessarily accelerate reasoning, because at least some of the agents have to solve the entire problem sequentially, and process (re-encode) each other's progress. This creates additional computational overhead, which presents challenges for both runtime and memory efficiency Wang et al. [2024a], Du et al. [2023]. + +Parallelism for efficiency. A different line of work leverages multiple LLMs to solve tasks faster in parallel, such as Skeleton-of-Thought (SoT) [Ning et al., 2024]. SoT begins by running a single LLM to outline a plan for solving the problem with independent sub-tasks, then launches parallel LLM instances for each sub-task. For problems that involve function calling, these functions can also run in parallel [Kim et al., 2024, Gim et al., 2024]. Subsequent works propose more complex parallelism strategies such as dynamic parallel tree search [Ding et al., 2025] or a single agent spawning asynchronous sub-tasks that are done by background LLM "threads" [Jin et al., 2025, Liu et al., 2024b, Pan et al., 2025], achieved with specialized fine-tuning. + +These techniques are known to substantially accelerate inference for problems that fit their type of parallelism. However, we argue that this is also their main limitation: by imposing a specific parallelism strategy, these methods can harm reasoning for problems that do not fit their framework. For instance, when solving a complex reasoning problem, it is often the case that the initial plan turns out to be wrong or incomplete [Muennighoff et al., 2025, DeepSeek-AI et al., 2025], which conflicts with SoT-like methods [Ning et al., 2024, Yu, 2025] that follow a fixed plan-execute-aggregate schedule. Furthermore, some of the sub-tasks may turn out to be more complicated than originally intended and take up more work, which would cause methods like PASTA Jin et al. [2025] to wait for that single task, whereas a more sophisticated reasoner could adjust the plan to work better in parallel. Note that each individual issue can be amended with yet another, more complicated parallelism framework, but the sheer number of such cases makes us doubt whether this is the right approach. In this work, we instead let multiple LLM instances interact without a fixed framework, allowing them to see each other's partial generations to devise (and revise) task-specific collaboration strategy. We show that, perhaps surprisingly, existing reasoning LLMs already have the ability to leverage this. + +# 3 Hogwild! Inference + +Our main intuition is that modern LLMs do not need a pre-defined framework for inference-time parallelism: they can organize by themselves. To test this hypothesis, we design a parallel inference protocol where multiple LLM instances can collaborate as flexibly as possible. Instead of assigning each "worker" to a specific role or sub-task, we run them together and prompt them to collaborate. This approach has two key problems: how to run multiple inference threads from the same Key-Value memory, and how to prompt LLM "workers" to collaborate over said memory. We outline how to perform LLM inference with a shared cache in Section 3.1, describe our cache structure in Section 3.2 and prompting strategy in Section 3.3. Finally, Section 3.4 describes the inference algorithm. + +# 3.1 Concurrent Attention with Shared Key-Value Cache + +The core ingredient of Hogwild! Inference is a shared Key-Value memory (KV cache) accessible to all workers. The cache consists of several blocks that can be reused between workers, implementing a concurrent version of the attention mechanism [Bahdanau et al., 2015, Vaswani, 2017]. + +Let us first consider a simple case with two workers and three cache blocks, as depicted in Figure 1. The first block contains the prompt, and the other two blocks contain the tokens generated by workers A and B respectively (denoted Alice and Bob in the Figure). As workers generate new tokens, they access each other's attention caches as though these were their own previously generated tokens. In Figure 1, "Alice" sees the common prompt, then "Bob's" token representations, then her own. In turn, Bob sees the same common prompt, then Alice's token KVs, and his own tokens after that.3 + +This creates a discrepancy where the same Key-Value pairs appear at different positions for each worker. Furthermore, the relative distance between the same pair of tokens (e.g., first generated tokens from Alice and Bob, respectively) changes as new tokens are added. While it is possible to re-encode these tokens at their new positions, it would cause overhead that scales cubically4. + +Instead of re-encoding the new tokens for other workers, we attempt to reuse existing token representations between workers. However, since these tokens appear at different positions for each worker and step, we need to adjust for their positional embeddings. Most modern LLMs use Rotary Position Embeddings (RoPE) [Su et al., 2021], where each key and query is rotated to an angle proportional to its absolute position. Prior works have shown that RoPE embeddings can be manipulated through scaling [Peng et al., 2023] slicing [Xiao et al., 2024], or pruning [Zhang et al., 2023]. + +In Hogwild! Inference, we instead shift the KV values, multiplying the entire cache block by a cos / sin values that implement rotation by a constant offset. We use this to arrange the same cache entries in different order for each worker as in Figure 1 (right). This allows both workers to instantly "see" each other's tokens while they are generated — and even before they are processed by all layers. + +# 3.2 Cache Structure + +Now that we defined a way to rearrange cache blocks on the fly, it is reasonable to ask how to arrange these blocks. For short tasks, simply concatenating worker outputs is sufficient. However, as we consider harder problems that require long chains of thought, workers will eventually pay less attention to each other because of the thousands of tokens between their latest steps5. + +To address this problem, we propose a more sophisticated cache arrangement inspired by group chat rooms. Namely, we split the generated text into reasoning "steps", roughly a paragraph in size. Whenever a given worker finishes a paragraph, (e.g. generates $\backslash n\backslash n$ ), we move its KV cache to the end of a shared chat-like history and let it generate the next paragraph at the end of that history. Note that workers still see each other's current (unfinished) paragraphs at the end of the shared history as they write them (see Figure 1). This way, workers always see each other's latest updates as recent tokens and can communicate more easily. For each worker $W_{i}$ , we organize cache blocks as follows: + +- Common Cache: a large KV cache block that stores KV representations for the system prompt, task description, and a history of previous reasoning steps from each agent. +- Other workers: multiple smaller cache blocks containing the latest (unfinished) steps of all other workers $W_{j \neq i}$ in ascending order. For instance, if there are 4 workers, $W_{2}$ will see $W_{1} \oplus W_{3} \oplus W_{4}$ . +- Current worker: the latest (unfinished) reasoning step of the current worker $W_{i}$ to be continued. Each block starts with a new paragraph (\n\nEach block starts with a new paragraph (\n\nEach block starts with a new paragraph (\n\nEach block starts with a new paragraph (\n\nEach block starts with a new paragraph (\n\nEach block starts with a new paragraph (\n\nEach block starts with a new paragraph (\n\nEach block starts with a new paragraph (\n\nEach block starts with a new paragraph (\n\nEach block starts with a new paragraph (\n\nEach block starts with a new paragraph (\n\n + +# 3.3 Prompting for Zero-Shot Collaboration + +The shared key-value cache inference we described above allows modern LLMs to access each other's tokens and reason collaboratively. However, even though modern LLMs can reason about + +how to collaborate, there is no guarantee that they will actually do so unprompted. As with any desired LLM behavior, it can be achieved in two ways: either by training the model to generate tokens collaboratively or by prompting it in-context. In this work, we focus on the latter approach to make Hogwild! Inference easier to generalize for new models. Our prompting consists of two parts: + +1. System prompt describes the "rules" of the shared cache and suggests that workers collaborate. This prompt goes at the beginning of either the system or user message (if not unsupported); +2. Inserting s1-like collaboration prompts: every thousand generated tokens, we prompt a random worker with "Wait, am I doing redundant work? (yes/no):" at the beginning of their next paragraph. This strategy is meant to promote collaboration and is inspired by Muennighoff et al. [2025]. + +The latter s1-like prompts present a curious case. We found that LLMs fine-tuned on reasoning can often become too "focused" on what it is generating currently and fail to notice that another instance has found a mistake or solved their problem earlier. However, when asked directly, they can spot redundancy and change their approach. Overall, we found that when prompted this way, LLMs often (but not always) detect redundancies in their actions and can determine the optimal course of action. + +# 3.4 Inference Matters + +When generating new tokens with Hogwild! Inference, we perform a forward pass on all workers in parallel, as though they were in the same batch. Instead of each sample having its own attention cache, we allow batch elements to attend to each other's KV caches at different positions. When processing newly generated tokens, we "insert" their KV representations at the end of their respective cache blocks, then arrange these cache blocks for each worker. This way both workers can immediately attend to each other's current tokens even before they are fully processed by all layers. + +This leads to the following problem: since workers combine cache blocks in different order (see Figure 1), we would need to rotate the cached KVs multiple times, one for each worker. Done naively, this would require rotating all past token representations at every step, which is inefficient for long contexts. Fortunately, this problem can be circumvented using a property of rotation: if both query and key are rotated by the same angle, the dot product between them will not change. Instead of rotating all previous keys, we can rotate current token queries to an equivalent angle (Figure 2). + +Suppose that a given attention layer needs to compute attention between the current token query $q$ at position $i_q$ (denoted $\rho(q, i_q)$ ) and a block of keys rotated to the starting position $i_k$ . Instead of rotating keys, we can rotate the query to position $i_q - i_k$ and keep the KV cache as is. If there are multiple KV blocks A, B, C (Alice, Bob, Common) that need to be rotated to positions $i_k^A, i_k^B, i_k^C$ respectively, we rotate the query $q$ multiple times for each block. Formally, we can rewrite the attention dot-product: + +$$ +\rho (q, i _ {q}) \Big [ \rho (A, i _ {k} ^ {A}) \oplus \rho (B, i _ {k} ^ {B}) \oplus \rho (C, i _ {k} ^ {C}) \Big ] = \rho (q, i _ {q} - i _ {k} ^ {A}) A \oplus \rho (q, i _ {q} - i _ {k} ^ {B}) B \oplus \rho (q, i _ {q} - i _ {k} ^ {C}) C, +$$ + +where $\oplus$ denotes concatenation. The r.h.s. formula only rotates the current step query, i.e. a single token per worker, as opposed to the past KV blocks that can contain thousands or millions of tokens. We use this property to design an efficient implementation of our method based on Flash-Decoding [Dao et al., 2023]. We gather each KV cache block in a contiguous memory buffer and compute attention similarly to Paged Attention [Kwon et al., 2023], where one page would correspond to one cache block and the corresponding query rotations from all workers. This way, we need only one copy of each cache block and do not need to re-rotate its entries (see Appendix B). + +![](images/671772a8529123d424f4dc382164719cd30712feab78e67f4d667e021650f8ca.jpg) + +![](images/7505f7768dcfa922d898d6b01ba172187748b420cf06823a1f2df0b1c1f84199.jpg) +Figure 2: Intuitive scheme of Hogwild! Inference with query rotation. Colors represent cache blocks. Instead of rotating all cache blocks to align with Alice's and Bob's views, we keep them fixed at the zero position and only rotate the current token queries to equivalent angles. + +![](images/facf7a5e13928be1d0e4bd20c8fe4373b6452ff23d4b026fc5341612411e6b28.jpg) +Figure 3: (left) Evaluation results for QwQ-32B on synthetic tasks with 5 GSM8k questions in each. (middle) Evaluation of Hogwild! Inference and baselines with QwQ-32B on LIMO. (right) Hogwild! Inference with varying number of workers with QwQ-32B on LIMO. + +![](images/9fb8652c51dc0c6d99a0e37d7a48674b3f47c670070a4ead96f3d66cb29b09d3.jpg) + +![](images/666c357e475425a3aa6b4c8622e00a7798c904aeae52e67863082f72671934a4.jpg) + +# 4 Experiments + +# 4.1 Detailed Evaluation with QwQ-32B + +In this section, we conduct an initial evaluation of Hogwild! Inference to test its ability to collaborate in our zero-shot setting. All evaluations in this section are done with the QwQ-32B [Qwen Team, 2025] model. We consider two tasks: one with obviously independent tasks that can be done in parallel and another with a more complicated collaboration pattern. + +In both setups, we allow the model to generate reasoning up to a certain budget of sequential forward passes and evaluate its accuracy. If the model did not produce the final answer (\\boxed{...}) in time, we take all generated outputs and insert a special prompt6 that makes the model generate an answer (or its "best guess"), similarly to how it is done in Pu et al. [2025]. If there are multiple workers / threads, we feed outputs from all workers (concatenated) into the model and prompt it to generate the final answer immediately ( $\leq 16$ tokens, stop early if generated answer). We apply this technique to all methods except "Baseline (no early stopping)" and do not count these extra tokens towards the total budget (x axis) since they have an equal effect on all methods. + +We evaluate the following generation algorithms (details in Appendix D): + +- Hogwild! Inference: Our main algorithm, as described in Section 3. We evaluate with 2, 3 and 4 parallel "workers" and provide additional configuration details in Appendix D.1. +- Baseline (no early stopping): standard sequential generation with a single LLM instance. This is the only evaluation where we do not insert the early stopping prompt described above. +- Baseline: an improved sequential generation with the early stopping technique described above. +- Skeleton-of-Thought (SoT) [Ning et al., 2024]: a parallel reasoning algorithm in which the LLM first generates a short "outline" containing several independent tasks, then runs these tasks in parallel and combines the results. We run with both an unlimited number of parallel threads (original setup) and with 2 "workers" that append tokens to each thread in a round-robin fashion. For more complicated reasoning tasks, we found that Skeleton-of-Thought cannot solve the problem by itself; to mitigate this, we allow the main model to encode all generated threads and continue reasoning (with early stopping). We discuss Skeleton-of-Thought in more detail in Appendix D.2. +- Self-consistency [Wang et al., 2022]: a parallel reasoning algorithm where LLM instances write solutions independently, then vote on the answer. Instead of majority voting, we allow the LLM to view both solutions (concatenated) before generating the final answer with our early-stopping prompt, which outperforms voting in our setup and works even for 2 workers. Note that this method cannot split sub-tasks between workers and is instead meant to increase quality through voting. + +Sanity Checks with GSM8k×5: Before we try our approach on more challenging tasks, we test if Hogwild! Inference is capable of basic collaboration. For this purpose, we construct a toy problem set with 128 samples, each containing 5 non-overlapping questions from the GSM8k test set [Cobbe et al., 2021]. The LLM is prompted to solve each problem and return comma-separated values7. We report the average per-question accuracy, i.e. if the model solves 4 out of 5 questions in a given sample correctly, it will get a score of 0.8 for that sample. + +We summarize our results in Figure 3 (left): the parallel workers under the Hogwild! Inference can indeed collaborate, i.e. our KV cache manipulations do not break down model's reasoning capabilities. As intuition suggests, Skeleton-of-Thought can also speed up this synthetic task by answering each question in parallel. We provide an example of the outline created by the Skeleton-of-Thought in Appendix E.4. Notably, the self-consistency algorithm also shows some improvement over the + +![](images/f66dea94426dd2a9fdd437283ff40594986cf9239a2905077493f34a0dc26501.jpg) +Figure 4: Evaluation of Hogwild! Inference on LIMO for QwQ-32B, Phi-4-Reasoning-Plus (14B) and Qwen3-8B (left) and different Qwen3 models (right). Dashed lines denote baselines (1 agent). + +![](images/abdc0a516b9ee43251a53d1bf6316e7463096fd5fd17654166af50c43ddb24e6.jpg) + +baseline, which we attribute to the fact that it gives the model two "shots" at a problem, and if one of them happens to be faster, the algorithm will on average surpass the baseline. + +LIMO tasks. Next, we evaluate Hogwild! Inference in a more challenging setup where there is no clear pattern of collaboration. We adopt the dataset of 817 problems from Ye et al. [2025]. The dataset contains mathematical problems that take modern LLMs thousands of tokens to solve reliably. Unlike our synthetic tasks, the problems in that dataset often do not have an obvious way to agree on a collaboration strategy ahead of time, but it can emerge (and change) during reasoning. + +We summarize our results in Figure 3 (middle, right). Overall, Hogwild! Inference can converge to a correct solution faster, achieving greater accuracy for the same number of consecutive steps. Furthermore, it produces greater speed-ups as we increase the number of parallel workers (though there is a limit, as we show in Appendix E.1). Similarly to our previous setup, self-consistency decoding provides some improvement over the single-worker baseline, but does not outperform Hogwild! Inference. As expected, Skeleton-of-Thought could not split the problem neatly into independent tasks, but still achieves some improvement on small budgets. + +We then evaluate different LLM families and sizes on LIMO dataset in Figure 4. We found that our approach generalizes to most of the models tested, with a notable exception. For Qwen3 model family, we observe that the smaller models, 1.7B and, to a lesser extent, 4B fail to adapt to the task and get distracted from the task. In Appendix E.1, we also report additional evaluations in this setup: ablation of the cache rotation from 3.1 and our chat-like cache structure from Section 3.2. We provide examples of collaborative generations for this setup in Appendix F. + +# 4.2 Additional Benchmarks and Models + +Next, we test whether our approach can be generalized to other mathematical reasoning and programming tasks. For this evaluation, we also chose benchmarks that do not have obvious collaboration patterns but can nonetheless be solved faster by two human "agents". We evaluate on three such benchmarks: LiveCodeBench, OlympiadBench and AIME'25. In addition to QwQ-32B, we also report Qwen3 [Yang et al., 2025] and Phi-4 Reasoning Plus [Abdin et al., 2025]. For AIME'25, we focus on larger models and additionally include DeepSeek-R1 [DeepSeek-AI et al., 2025]. + +LiveCodeBench [Jain et al., 2024]. We evaluate on the code_generation lite version release_v5. Our evaluation closely follows the setup from Qwen Team [2025]: we take the same 279 problems dated between 2024.08 and 2025.02 and filtered so as to avoid ones present in the QwQ dataset. Note, however, that some of the other LLMs in our setup do not report which samples, if any, did they train on. However, since we use the same model weights for the baseline and Hogwild! Inference, we can still compare the two strategies. We run the standard test suite and report Pass@1 averaged over 8 random seeds. For early stopping, we allow the method (and baseline) to generate a single final code block with up to 1024 tokens, using a similar early-stopping prompt as in Section 4.1 (see Appendix C). For Hogwild! Inference, we use the same system prompts as before. + +OlympiadBench [He et al., 2024]. Next, we evaluate on a different reasoning benchmark that contains Olympiad-level problems on Math and Physics. We run evaluations on the two text-only english-language parts: OE_TO maths_en_COMP (675 problems) and OE_TO_physics_en_COMP (236 problems). Unlike in Section 3, the answers to these problems are not individual numbers but LaTeX formulae that allow multiple equivalent formulations of the correct answer. We use the official evaluation codebase and adapt the built-in DeepSeek-R1 prompts for use with our model set (see details in Appendix D). For early stopping, we use the same prompt as before with 64 token limit. + +![](images/c428b96323c4fc03a26afef9fe9b57ff6ae44eea4fc284d28e76d1cf6edf531f.jpg) +Figure 5: Evaluation of Hogwild! Inference with 2 workers on OlympiadBench Math (left) & Physics (right) for QwQ-32B, Qwen3-14B and Qwen3-8B models, dashed lines are the baselines. + +![](images/8347a312d3cd1bb376b6935227e6d6cb5ade8c972725f3a5f84c73d48039e3ad.jpg) + +Large Models on AIME [2025]. Finally, we evaluate how Hogwild! Inference scales to larger models on a popular AIME'25 benchmark, using both I and II subsets. For this task, we focus on two models: Qwen3-235B-A22B Yang et al. [2025] and DeepSeek-R1 [DeepSeek-AI et al., 2025]. Since the AIME benchmark only contains 30 problems (15 per subset), we evaluate each model with 10 random seeds and average results. We otherwise use the same evaluation protocol as for LIMO, with the same early stopping and at most 16 tokens per answer during early stopping. + +We arrange our results in Figure 5 for OlympiadBench and Figure 6 for LiveCodeBench and AIME'25. Overall, Hogwild! Inference shows similar improvements to what we observed earlier (Section 4.1). One atypical case is OlympiadBench Physics (Fig. 5 right) where Qwen3-14B stops improving after roughly 4096 tokens. Upon closer inspection, we found that the model does not break down, but overthinks the problem, improving some answers while replacing other correct answers with mistakes. Overall, the results show that the cache rotation tricks and the output structure from 3.2 can indeed be generalized across different models and benchmarks. Note, however, that due to the different output format we needed to apply slight alterations to individual model prompts: notably, QwQ-32B automatically inserts at the end of the prompt, while Qwen3 and Phi-4 do not, so we insert it manually before the common history header. We describe this in detail in Appendix C. + +# 4.3 Measuring the Ability to Collaborate + +Now that we know that modern LLMs can collaborate in our zero-shot setting, it is natural to ask how well can they collaborate and what affects their ability. While this question deserves a more thorough investigation, we can still quantify how well LLMs collaborate under Hogwild! Inference. In this section, we analyze their "collaborativeness" using the LLM-as-a-Judge paradigm [Zheng et al., 2023a]: we feed collaborative traces into a GPT-4o [Hurst et al., 2024] model and prompt it to score behavior from 1 to 6, where "1" means no collaboration, "3" indicates basic task splitting and "6" represents a hypothetical optimal collaboration, never achieved in our analysis. We analyze LLM generations on LIMO dataset with on three models from Section 4.2. To control for differences in generation lengths we compare only 4096-token prefixes from each worker. We compare three inference setups: i) independent generations as per self-consistency decoding; ii) restricted Hogwild! Inference where agents can only view each other's finished paragraphs, but not the current (incomplete) reasoning step, and iii) full Hogwild! Inference, with 2 agents in each setup. + +We summarize our scores in Figure 7: as expected, models that can see each other can collaborate and independent workers cannot. Interestingly, Hogwild! Inference with instant (token-wise) synchronization scores significantly higher than a version that can only see completed inference steps. In Appendix G we provide more detailed results, judge prompt, configurations and examples. + +![](images/f0209aef2837c6968d7da96be40be0a43e35806305771cb01ca228315c6b45f8.jpg) +Figure 6: Evaluation of Hogwild! Inference (2 workers) on LiveCodeBench v5 2024.08-2025.02 for QwQ, Phi-4-R+ and Qwen3 (left) and AIME'25 for larger models (right), dashed lines are baselines. + +![](images/95f302d1d13a8c6c86e2b7cf3e4be7afbd7c3e00e98f2e025d3ddd2173fc424a.jpg) + +![](images/8bec21499eb610041b5b9e65ad38946ef73ee9c2e166ed1f1df365336ebe3b73.jpg) +Figure 7: Mean collaborativeness score from GPT-4o. No sync is independent generation, Step-wise is restricted Hogwild! where worker can only see each-other's past steps, Token-wise is full Hogwild! with instant cache exchange. + +Table 1: Inference benchmarks for Section 4.4. Columns denote sequencelength. Rows withone workerare baselines,2&4workers use Hogwild! + +
# Workers102420484096819216384
Tokens per second
120.120.019.719.318.3
236.336.236.136.134.3
468.969.069.166.360.3
Latency per forward (ms)
149.750.050.951.754.5
255.155.355.455.358.3
458.158.057.960.466.4
Time to generate # tokens (s)
152.3103.3206.5416.7853.5
229.958.1114.6228.0454.4
416.731.661.3120.7239.2
+ +# 4.4 Inference + +To recall, our main motivation for proposing Hogwild! Inference is to enable faster reasoning through collaboration. Since the actual inference speed depends on many factors (GPU(s), software, precision, etc), we previously focused on evaluating inference speed in terms of the number of consecutive forward passes and not inference time. Here, in turn, we report the actual inference speed in terms of latency and tokens per second. We evaluate three setups: baseline sequential inference and Hogwild! Inference for two and four workers. We run baseline with FlashAttention v2 (FlashDecoding) and our algorithm with custom GPU kernels using the approach described in Section 3.4. We use a NVIDIA L40S GPU and AMD EPYC 9534 and benchmark the official quantized version of QwQ-32B-AWQ for all setups. + +Our results in Table 1 show that, for the 32B model, Hogwild! Inference can generate tokens nearly twice as fast for 2 workers and about $3.2 - 3.6 \times$ faster for 4 workers, which means that the accuracy gains from earlier sections can translate to faster solutions. We also report the average over GPUs, as well the $10\%$ and $90\%$ percentiles, in Figure 8 (left). Overall, Hogwild! Inference has a small constant latency offset compared to the baseline and near-linear scaling as we increase the number of workers. While our implementation already shows significant performance gains, we discuss several ways to scale it further in Appendix B, including in distributed setting. + +![](images/4a7a536cd12fc9c79c74320988e958ef9293e7344b33fef910cf5e76e515d91d.jpg) +Figure 8: (left) Duration of a single forward pass (generating $W$ new tokens) for Qwen/QwQ-32B-AWQ on L40S, given the total number of tokens already in the KV cache. The dotted lines indicate the $10\%$ and $90\%$ quantiles over multiple repetitions on different GPUs. (right) Accuracy versus average generation time on the LIMO dataset task using QwQ-32B-AWQ under different token budgets. + +![](images/0541b090e1608ce5167c68820c39717e91683369e6da6bc3263c960480ad859c.jpg) + +As the figure shows, there is some overhead associated with preparing multiple caches (i.e., even at an empty cache, Hogwild! is slightly slower than pure FlashAttention). A more detailed breakdown is presented in Table 2, which shows the duration of the attention kernel (or attention+rope for Hogwild!), as well as the total setup time, that is, the time spent preparing the data structures needed for Hogwild! The latter needs to be done only once per forward pass, instead of once per transformer + +Table 2: Breakdown of Hogwild! overhead compared to pure FlashAttention inference. + +
KV LengthAttention (×64)Setup (×1)
FAW2W4FAW2W4
30011μs45μs45μs-1.9ms3.9ms
409635μs65μs82μs-1.9ms3.9ms
819255μs92μs123μs-1.9ms3.9ms
16384100μs140μs203μs-1.9ms3.9ms
+ +block. For long contexts, the attention call is about $40\%$ and $100\%$ slower for generating with 2 and 4 workers, respectively. + +Additionally, we report accuracy results over time using our kernel on the official quantized version of QwQ-32B-AWQ on LIMO dataset. The experiments were conducted on NVIDIA L40S GPUs. For comparison, we run the baseline (FlashAttention v2) and Hogwild with 2 workers, maintaining the same experimental setup as detailed in Section 4.1. We report our results in Figure 8 (right). As illustrated, our method achieves better accuracy results on the LIMO dataset within the same time budget. + +# 5 Discussion + +In this work, we investigated the ability of large language models to perform parallel generation where multiple instances synchronize through a shared, dynamically-updated attention cache. Surprisingly, our results show that LLMs can operate effectively in parallel across dynamically updated attention cache without specialized fine-tuning. We demonstrate that parallel inference threads can explicitly coordinate, leveraging each other's partial solutions to enable collaborative problem-solving. + +The proposed method, called Hogwild! Inference, allows multiple inference threads to concurrently access and update a shared attention cache. By leveraging Rotary Position Embeddings (RoPE), our approach introduces minimal computational overhead while ensuring instant synchronization—newly generated KV cache entries becoming immediately visible to all threads. This "telepathic" communication opens up new possibilities for efficient parallel generation with LLMs. + +**Limitations** Our method exhibits reduced robustness when applied to smaller models or longer contexts, suggesting scalability challenges across model sizes and sequence lengths. Additionally, our automatic evaluation metric relies on a proprietary model, which may limit reproducibility. + +Future work In future work, we plan to investigate methods for improving collaboration between threads, such as fine-tuning and reinforcement learning. We also plan to investigate connections to alternative parallel inference schemes, such as speculative decoding [Leviathan et al., 2023], and parallel token generation methods like Medusa [Cai et al., 2024] or EAGLE [Li et al., 2024b]. Finally, it is interesting to consider alternative shared memory structures: allowing workers to insert new steps in any order, selectively delete (forget) steps, or solving programming and tool use tasks with a shared IDE and file-system. The KV cache rearrangement used in Hogwild! Inference could also allow humans to interact with agents asynchronously, giving clarifications and feedback during reasoning. + +Acknowledgements: We thank Vladimir Malinovskii for his help with brainstorming, helpful feedback and suggesting future work directions. We also thank Philip Zmushko for proofreading. + +# References + +Marah Abdin, Sahaj Agarwal, Ahmed Awadallah, Vidhisha Balachandran, Harkirat Behl, Lingjiao Chen, Gustavo de Rosa, Suriya Gunasekar, Mojan Javaheripi, Neel Joshi, Piero Kauffmann, Yash Lara, Caio Cesar Teodoro Mendes, Arindam Mitra, Besmira Nushi, Dimitris Papailiopoulos, Olli Saarikivi, Shital Shah, Vaishnavi Shrivastava, Vibhav Vineet, Yue Wu, Safoora Yousefi, and Guoqing Zheng. Phi-4-reasoning technical report, 2025. URL https://arxiv.org/abs/2504.21318. +AIME. Aime problems and solutions. https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions, 2025. + +Reza Yazdani Aminabadi, Samyam Rajbhandari, Minjia Zhang, Ammar Ahmad Awan, Cheng Li, Du Li, Elton Zheng, Jeff Rasley, Shadeen Smith, Olatunj Ruwase, and Yuxiong He. Deepspeed inference: Enabling efficient inference of transformer models at unprecedented scale, 2022. URL https://arxiv.org/abs/2207.00032. +Anthropic. Claude 3.7 sonnet and claude code, 2024. URL https://www.anthropic.com/news/claude-3-7-sonnet. Accessed: 2025.04.02. +Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. Neural machine translation by jointly learning to align and translate. In Proceedings of the 3rd International Conference on Learning Representations (ICLR), 2015. URL https://arxiv.org/abs/1409.0473. +Yushi Bai, Jiajie Zhang, Xin Lv, Linzhi Zheng, Siqi Zhu, Lei Hou, Yuxiao Dong, Jie Tang, and Juanzi Li. Longwriter: Unleashing 10,000+ word generation from long context llms. ArXiv, abs/2408.07055, 2024. URL https://api_semanticscholar.org/CorpusID:271859903. +Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute. +Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer: The long-document transformer, 2020. URL https://arxiv.org/abs/2004.05150. +Tianle Cai, Xinyun Li, Zhiruo Wang, Yuhuai Wang, and Dawn Song. Medusa: Simple llm inference acceleration framework with multiple decoding heads. arXiv preprint arXiv:2401.10774, 2024. +Justin Chen, Swarnadeep Saha, and Mohit Bansal. ReConcile: Round-table conference improves reasoning via consensus among diverse LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7066–7085, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.381. URL https://aclanthology.org/2024.acl-long.381/. +Mouxiang Chen, Binyuan Hui, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Jianling Sun, Junyang Lin, and Zhongxin Liu. Parallel scaling law for language models, 2025. URL https://arxiv.org/abs/2505.10475. +Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +Roi Cohen, May Hamri, Mor Geva, and Amir Globerson. LM vs LM: Detecting factual errors via cross examination. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12621-12640, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.778. URL https://aclanthology.org/2023.emnlp-main.778/. +Tri Dao, Daniel Haziza, Francisco Massa, and Grigory Sizov. Flash-decoding for long-context inference. https://crfm.stanford.edu/2023/10/12/flashdecoding.html, 2023. Accessed: 2025-05-10. +DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, and Xiao Bi et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948. +Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, Xianglong Liu, and Dacheng Tao. Dynamic parallel tree search for efficient ltm reasoning, 2025. URL https://arxiv.org/abs/2502.16235. +Yilun Du, Shuang Li, Antonio Torralba, Joshua B. Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. In *Forty-first International Conference on Machine Learning*, 2023. URL https://openreview.net/forum?id=zj7YuTE4t8. + +Elliot E. Entin and Daniel Serfaty. Adaptive team coordination. Human Factors, 41(2):312-325, 1999. +Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024. +In Gim, Seung seob Lee, and Lin Zhong. Asynchronous llm function calling, 2024. URL https://arxiv.org/abs/2412.07017. +Google DeepMind. Gemini 2.5: Our Newest Gemini Model with Thinking. https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#gemini-2-5-thinking, 2025. Accessed: 2025-04-07. +Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems, 2024. +Chan-Jan Hsu, Davide Buffelli, Jamie McGowan, Feng-Ting Liao, Yi-Chang Chen, Sattar Vakili, and Da shan Shiu. Group think: Multiple concurrent reasoning agents collaborating at token level granularity, 2025. URL https://arxiv.org/abs/2505.11107. +Aaron Hurst, Adam Lerner, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. +Edwin Hutchins. Cognition in the Wild. MIT Press, 1995. +Sam Ade Jacobs, Masahiro Tanaka, Chengming Zhang, Minjia Zhang, Shuaiwen Leon Song, Samyam Rajbhandari, and Yuxiong He. Deepspeed ulysses: System optimizations for enabling training of extreme long sequence transformer models. arXiv preprint arXiv:2309.14509, 2023. +Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livecodebench: Holistic and contamination free evaluation of large language models for code, 2024. URL https://arxiv.org/abs/2403.07974. +Tian Jin, Ellie Y. Cheng, Zack Ankner, Nikunj Saunshi, Blake M. Elias, Amir Yazdanbakhsh, Jonathan Ragan-Kelley, Suvinay Subramanian, and Michael Carbin. Learning to keep a promise: Scaling language model decoding parallelism with learned asynchronous decoding, 2025. URL https://arxiv.org/abs/2502.11517. +Sehoon Kim, Suhong Moon, Ryan Tabrizi, Nicholas Lee, Michael W Mahoney, Kurt Keutzer, and Amir Gholami. An llm compiler for parallel function calling. In *Forty-first International Conference on Machine Learning*, 2024. +Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. ArXiv, abs/2205.11916, 2022. URL https://apisemantic scholar.org/CorpusID:249017743. +Aobo Kong, Shiwan Zhao, Hao Chen, Qicheng Li, Yong Qin, Ruiqi Sun, Xin Zhou, Enzhi Wang, and Xiaohang Dong. Better zero-shot reasoning with role-play prompting. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 4099-4113, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.228. URL https://aclanthology.org/2024.naacl-long.228/. +Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the 29th Symposium on Operating Systems Principles, pages 611-626, 2023. + +Yaniv Leviathan, Matan Kalman, and Yossi Matias. Fast inference from transformers via speculative decoding. In International Conference on Machine Learning, pages 19274-19286. PMLR, 2023. +Junyou Li, Qin Zhang, Yangbin Yu, Qiang Fu, and Deheng Ye. More agents is all you need. Transactions on Machine Learning Research, 2024a. +Shen Li, Yanli Zhao, Rohan Varma, Omkar Salpekar, Pieter Noordhuis, Teng Li, Adam Paszke, Jeff Smith, Brian Vaughan, Pritam Damania, and Soumith Chintala. Pytorch distributed: Experiences on accelerating data parallel training, 2020. +Yuhui Li, Fangyun Wei, Chao Zhang, and Hongyang Zhang. Eagle: Speculative sampling requires rethinking feature uncertainty. In Proceedings of the 41st International Conference on Machine Learning, pages 31147-31162. PMLR, 2024b. +Hunter Lightman, Vineet Kosaraju, Yura Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. ArXiv, abs/2305.20050, 2023. URL https://api_semanticscholar.org/CorpusID:258987659. +Aixin Liu, Bei Feng, Bin Wang, Bingxuan Wang, Bo Liu, Chenggang Zhao, Chengqi Dengr, Chong Ruan, Damai Dai, Daya Guo, et al. Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model. arXiv preprint arXiv:2405.04434, 2024a. +Hao Liu, Matei Zaharia, and Pieter Abbeel. Ring attention with blockwise transformers for near-infinite context, 2023. URL https://arxiv.org/abs/2310.01889. +Mingdao Liu, Aohan Zeng, Bowen Wang, Peng Zhang, Jie Tang, and Yuxiao Dong. Apar: Llms can do auto-parallel auto-regressive decoding. arXiv preprint arXiv:2401.06761, 2024b. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025. +Xuefei Ning, Zinan Lin, Zixuan Zhou, Zifu Wang, Huazhong Yang, and Yu Wang. Skeleton-ofthought: Prompting LLMs for efficient parallel generation. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=mqVgBbNCm9. +OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, and Alex Beutel et al. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720. +Jiayi Pan, Xiuyu Li, Long Lian, Charlie Snell, Yifei Zhou, Adam Yala, Trevor Darrell, Kurt Keutzer, and Alane Suhr. Learning adaptive parallel reasoning with language models. arXiv preprint arXiv:2504.15466, 2025. +Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. PyTorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems (NeurIPS). Neural Information Processing Systems Foundation, 2019. +Bowen Peng, Jeffrey Quesnelle, Honglu Fan, and Enrico Shippole. Yarn: Efficient context window extension of large language models, 2023. URL https://arxiv.org/abs/2309.00071. +Xiao Pu, Michael Saxon, Wenyue Hua, and William Yang Wang. Thoughtterminator: Benchmarking, calibrating, and mitigating overthinking in reasoning models, 2025. URL https://arxiv.org/ abs/2504.13367. +Yujia Qin, Shi Liang, Yining Ye, Kunlun Zhu, Lan Yan, Ya-Ting Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, Sihan Zhao, Runchu Tian, Ruobing Xie, Jie Zhou, Marc H. Gerstein, Dahai Li, Zhiyuan Liu, and Maosong Sun. Toollm: Facilitating large language models to master 16000+ real-world apis. ArXiv, abs/2307.16789, 2023. URL https://api-semanticscholar.org/ CorpusID:260334759. + +Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/. +Jack Rae and Ali Razavi. Do transformers need deep long-range memory? In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, Online, July 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.acl-main.672. +Benjamin Recht, Christopher Re, Stephen Wright, and Feng Niu. Hogwild!: A lock-free approach to parallelizing stochastic gradient descent. In J. Shawe-Taylor, R. Zemel, P. Bartlett, F. Pereira, and K.Q. Weinberger, editors, Advances in Neural Information Processing Systems, volume 24. Curran Associates, Inc., 2011. URL https://proceedings.neurips.cc/paper_files/paper/2011/file/218a0aefd1d1a4be65601cc6ddc1520e-Paper.pdf. +Timo Schick, Jane Dwivedi-Yu, Roberto Dessi, Roberta Raileanu, Maria Lomeli, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. Toolformer: Language models can teach themselves to use tools. ArXiv, abs/2302.04761, 2023. URL https://api_semanticscholar.org/CorpusID:256697342. +Yongliang Shen, Kaitao Song, Xu Tan, Dongsheng Li, Weiming Lu, and Yue Ting Zhuang. Hugging-gpt: Solving ai tasks with chatgpt and its friends in hugging face. ArXiv, abs/2303.17580, 2023. URL https://api_semanticscholar.org/CorpusID:257833781. +Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatron-lm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053, 2019. +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024. +Stanford HAI. How a “crazy idea” overturned the conventional rules of machine learning, 2023. URL https://hai.stanford.edu/news/how-crazy-idea-overturned-conventional-rules-machine-learning. Accessed: [Insert Date]. +Jianlin Su, Yu Lu, Shengfeng Pan, Ahmed Murtadha, Bo Wen, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864, 2021. +Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V. Le, Ed H. Chi, Denny Zhou, and Jason Wei. Challenging big-bench tasks and whether chain-of-thought can solve them. In Annual Meeting of the Association for Computational Linguistics, 2022. URL https://api_semanticscholar.org/CorpusID: 252917648. +Yashar Talebirad and Amirhossein Nadiri. Multi-agent collaboration: Harnessing the power of intelligent LLM agents. CoRR, abs/2306.03314, 2023. +A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017. +Junlin Wang, WANG Jue, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities. In The Thirteenth International Conference on Learning Representations, 2024a. +Qineng Wang, Zihao Wang, Ying Su, Hanghang Tong, and Yangqiu Song. Rethinking the bounds of LLM reasoning: Are multi-agent discussions the key? In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6106-6131, Bangkok, Thailand, August 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.331. URL https://aclanthology.org/2024.acl-long.331/. +Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed H. Chi, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. ArXiv, abs/2203.11171, 2022. URL https://api-semanticscholar.org/CorpusID:247595263. + +Yiming Wang, Zhuosheng Zhang, Pei Zhang, Baosong Yang, and Rui Wang. Meta-reasoning: Semantics-symbol deconstruction for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 622–643, Bangkok, Thailand, August 2024c. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.34. URL https://aclanthology.org/2024-findings-acl.34/. +Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022. +Guangxuan Xiao, Yuandong Tian, Beidi Chen, Song Han, and Mike Lewis. Efficient streaming language models with attention sinks. In International Conference on Learning Representations (ICLR), 2024. +An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. +An Yang, Anfeng Li, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Gao, Chengen Huang, Chenxu Lv, Chujie Zheng, Dayiheng Liu, Fan Zhou, Fei Huang, Feng Hu, Hao Ge, Haoran Wei, Huan Lin, Jialong Tang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jing Zhou, Jingren Zhou, Junyang Lin, Kai Dang, Keqin Bao, Kexin Yang, Le Yu, Lianghao Deng, Mei Li, Mingfeng Xue, Mingze Li, Pei Zhang, Peng Wang, Qin Zhu, Rui Men, Ruize Gao, Shixuan Liu, Shuang Luo, Tianhao Li, Tianyi Tang, Wenbiao Yin, Xingzhang Ren, Xinyu Wang, Xinyu Zhang, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yinger Zhang, Yu Wan, Yuqiong Liu, Zekun Wang, Zeyu Cui, Zhenru Zhang, Zhipeng Zhou, and Zihan Qiu. Qwen3 technical report, 2025. URL https://arxiv.org/abs/2505.09388. +Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. ArXiv, abs/2210.03629, 2022. URL https://api_semanticscholar.org/CorpusID:252762395. +Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. ArXiv, abs/2305.10601, 2023. URL https://api_semanticscholar.org/CorpusID:258762525. +Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning, 2025. URL https://arxiv.org/abs/2502.03387. +Yijiong Yu. Accelerate parallelizable reasoning via parallel decoding within one sequence, 2025. URL https://arxiv.org/abs/2503.20533. +Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Zhihan Guo, Yufei Wang, Irwin King, Xue Liu, and Chen Ma. What, how, where, and how well? a survey on test-time scaling in large language models. arXiv preprint arXiv:2503.24235, 2025. +Zhenyu Zhang, Ying Sheng, Tianyi Zhou, Tianlong Chen, Lianmin Zheng, Ruisi Cai, Zhao Song, Yuandong Tian, Christopher Ré, Clark Barrett, et al. H2o: Heavy-hitter oracle for efficient generative inference of large language models. Advances in Neural Information Processing Systems, 36:34661-34710, 2023. +Zhuosheng Zhang, Aston Zhang, Mu Li, and Alexander J. Smola. Automatic chain of thought prompting in large language models. ArXiv, abs/2210.03493, 2022. URL https://api.sementicscholar.org/CorpusID:252762275. + +Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36:46595-46623, 2023a. +Lianmin Zheng, Liangsheng Yin, Zhiqiang Xie, Jeff Huang, Chuyue Sun, Cody Hao Yu, Shiyi Cao, Christos Kozyrakis, Ion Stoica, Joseph E. Gonzalez, Clark Barrett, and Ying Sheng. Efficiently programming large language models using sglang, 2023b. +Tong Zheng, Hongming Zhang, Wenhao Yu, Xiaoyang Wang, Runpeng Dai, Rui Liu, Huiwen Bao, Chengsong Huang, Heng Huang, and Dong Yu. Parallel-r1: Towards parallel thinking via reinforcement learning, 2025. URL https://arxiv.org/abs/2509.07980. +Pei Zhou, Jay Pujara, Xiang Ren, Xinyun Chen, Heng-Tze Cheng, Quoc V. Le, Ed H. Chi, Denny Zhou, Swaroop Mishra, and Huaixiu Steven Zheng. SELF-DISCOVER: Large language models self-compose reasoning structures. In Amir Globerson, Lester Mackey, Danielle Belgrave, Angela Fan, Ulrich Paquet, Jakub M. Tomczak, and Cheng Zhang, editors, Advances in Neural Information Processing Systems 37 (NeurIPS 2024), Vancouver, BC, Canada, December 2024. + +# A Cache Layouts + +In this section, we consider three cache arrangements, shown at Figure 9, with progressively more complex structure. + +![](images/507eb12025e222fa29ca02659bbf335b1449be8b98d49d55506246ac54845fba.jpg) +Figure 9: Three cache layouts described in Section 3.2: interleaved with step-wise synchrony (left), simple contiguous layout (middle) and combined with token-wise synchrony (right). All layouts are made from Alice point of view. + +![](images/403bc32e2333f1f364e35ecfc05c48ebb4b929f7f262bc7eaaf37bc6c81e156b.jpg) + +![](images/cc79e0adf2594606e76f0f89c7b7b43453bf8c674b16cba49604007dab0a6453.jpg) + +Contiguous layout (token-wise) is the simplest possible layout where each worker appends to their own sequence blob of tokens and sees other workers' token representations as past keys and values. This layout is inspired by collaborative text editors such as Google Docs or Overleaf. + +As described earlier in Section 3.1, each worker arranges the other workers' thoughts in a different order. They see the common prompt cache first, then the caches of all other workers (excluding themselves8, then their own cache as immediate previous tokens. That way, each worker predicts the next token for their own cache. + +Interleaved layout (step-wise), which can be seen as analogous to group chat services such as Slack or Discord. In this layout, workers generate tokens in private until they finish a reasoning step9, then add it to a shared "history". The history contains past reasoning steps of each LLM instance in the order of their completion. Whenever a worker completes a reasoning step, their KV cache entries are moved to the end of the shared history cache block with the proper rotation, then their local cache is reset their local cache for a new step. + +In this setup, the workers only see each other's outputs in full steps, not after every token. However, they do not wait for each other to complete their steps. Instead, each worker keeps generating new tokens and occasionally receives additional key-value pairs inserted into its cache. + +Combined layout (token-wise) is a mixture of the first two, and is the main layout used in the paper. The LLM instances generate steps that are accumulated in a shared history, as in the interleaved layout. However, they do not generate these steps in private, but can instantly see each other's current progress, as in the contiguous layout. + +We can view the first two layouts as ablated versions of this combined one: the contiguous layout lacks the shared history, and the interleaved layout lacks immediate synchronization. We compare these three layouts empirically in Appendix E.1 to better quantify the effect of each design choice. + +# B Implementation Details + +Here we discuss additional implementation details and possible alternatives. To recall Section 3.4, Hogwild! inference can be implemented as a standard batched inference with a special KV "cache" that facilitates cross-worker attention. + +Cache blocks. The Hogwild! cache is split into blocks, typically one block for each worker and an additional "common" block for prompt and past steps. The blocks contain key-value pairs for all model layers, but since all layers are treated equally, we describe the cache behavior for a single layer. + +Within each cache block, attention keys and values are stored as though they were at positions 0, 1, ..., len(block), regardless of the block's actual position in the full cache. During inference, we account for actual positions by rotating attention queries to the relative difference in positions (as described in Section 3.4). + +Adding new tokens to the cache. During attention forward pass, the first thing that we do is encode the new tokens for each worker and append their keys and values to the respective cache blocks. When using RoPE, the keys are rotated not to their actual positions, but to their index within their cache block (e.g. Alice's tokens). During one inference step, these indices will be equal across all model layers — we can compute the RoPE sin and cos tensors once and reuse them between layers. + +Rotating queries. Unlike in traditional attention, Hogwild! inference rotates query vectors multiple times for each block. Before forward pass, we calculate the difference in positions between each worker's new token (from that worker's point of view) and the first token in each KV cache block. In our main inference scenario, all $n$ workers are allowed to view each other's cache blocks plus an additional bock for prompt and history, for a total of $n \cdot (n + 1)$ query rotations with exactly $n$ queries for each block. These relative positions are also equal across all layers, so we can reuse the sin and cos tensors similarly to how they are reused for keys. Note that the number of query rotations for all-to-all attention is quadratic in $n$ , but it does not increase the overall time complexity of attention dot product, which is already quadratic in the number of tokens, which is always greater than $n$ . + +Attention kernel. Once we have all query rotations, we can calculate the scaled dot-product attention as usual. As our cache is naturally partitioned into smaller segments as described above, Hogwild! attention is similar to paged attention, except that each page (i.e., cache block) uses a differently rotated version of the query. A significant challenge for efficient attention in the inference setup is that for optimal data reuse, one would want to handle each KV head inside a single streaming multiprocessor (SM), so that the KV cache needs to be loaded exactly once. However, this would leave large parts of the GPU unused, as the number of KV heads can be much lower than the number of SMs. Therefore, one has to employ a form of sequence parallelism within a single GPU, in which different SMs handle a subset of the sequence for one KV head, and a second phase handles the (cheap) reduction over partial results. Such a split-k type computation is implemented, for example, in Flash-Decoding [Dao et al., 2023]. + +Even though the different cache blocks used in Hogwild! would appear to be convenient points to split work across SMs, in a typical inference scenario, this would lead to very imbalanced workloads. Thus, we do not split based on cache blocks, and instead assign each SM the same number of KV entries. + +Fine-tuning and re-encoding considerations. While our work mainly focuses on inference, fine-tuning models to perform Hogwild! inference is an interesting engineering problem. From the computational point of view, the main difference between LLM inference and fine-tuning is that inference is sequential, whereas fine-tuning can compute all positions in parallel. To fine-tune in our setup, one would want to replicate the attention computations from consecutive inference steps. + +To achieve this, we record the position differences between queries and each respective cache block from each of $t$ inference steps, and how many tokens were in each block during that query, for a total of $2 \cdot t \cdot n \cdot (n + 1)$ integers (negligible compared to model parameters and activations). Recall that the cache blocks always store keys and values at positions 0, 1, ..., 1en(block). During forward pass, these positions can be used to construct a 4D attention mask10 to compute attention for all steps in parallel. The backward pass also runs in parallel with PyTorch autograd [Paszke et al., 2019]. A recent work by Zheng et al. [2025] explores finetuning for parallel inference in more detail. + +In addition to fine-tuning, this technique can potentially be used during inference to restore generation after it was evicted from an inference server, e.g. due to preemption or hardware error mid decoding. It can also be used to re-encode in-context learning examples if they use Hogwild! inference. + +Attention variants. Some of the recently introduced LLMs use attention variants such as Local (windowed) Attention [Rae and Razavi, 2020, Beltagy et al., 2020] or Multihead Latent Attention (MLA) [Liu et al., 2024a]. These attention variants can also be adapted for use with Hogwild! inference with minor code modifications. For local attention, queries can "skip" blocks that are outside their local window. Similarly for MLA, we can calculate compressed latent vectors within each cache block and adapt the existing MLA code to accumulate attention weights across blocks. + +Distributed Inference. Likewise, Hogwild! inference can be used in distributed setup using the same strategies that work for traditional attention [Shoeybi et al., 2019, Aminabadi et al., 2022]. For pipeline parallelism, each device stores cache blocks for its local subset of model layers. Likewise, for tensor parallelism, each device stores past keys of all cache blocks and layers, but only for a subset of attention heads within each layer and inference using existing kernels. + +In principle, Hogwild! inference can also be combined with sequence parallelism [Jacobs et al., 2023, Liu et al., 2023], where each device stores a KV cache for a subset of tokens. One intuitive way to partition KV cache between GPUs is to assign each device to run one or several "workers" and keep the KVs generated by these workers. Since Hogwild! workers generate tokens at the same rate, each device will store the same amount of KVs and query other devices work cross-worker attention. + +When computing Hogwild! concurrent attention with sequence parallelism, workers can exchange rotated queries using the All-to-All collective operation (Scatter/Gather) available in most frameworks [Li et al., 2020]. After that, each worker computes dot-products between the rotated queries and its local KV cache, and exchanges the partial results as in Ring Attention [Liu et al., 2023]. Note, however, that maximizing the performance of such sequence-parallel Hogwild! inference would require custom kernels that overlap computation and communication. In contract, tensor-parallel (per-head) an pipeline-parallel (per-layer) partitioning can reuse single-GPU attention kernels. + +Additional considerations. Conceptually, our approach is related to the recently introduced Paged Attention from vLLM [Kwon et al., 2023] and Radix Attention from SGLang [Zheng et al., 2023b]. These techniques are similar to ours in that they perform attention to slices of all tokens, e.g. when facilitating efficient parallel beam search inference, different hypotheses attend to different (but overlapping) subsets of the KV cache. However, unlike Radix Attention, our procedure attends to all segments at once (with different rotations) and aggregates results in the same softmax-weighted sum. + +# C Prompting and formatting details + +In this section, we describe the prompting and formatting details of our approach. + +# Prompt for collaborative inference with two workers + +Collaborative Reasoning + +You will collaborate on this problem with another assistant. You will write your thoughts simultaneously with them and collaborate without redundant work. You can collaborate by doing different parts of the problem, double-checking each other's results, trying different approaches, or any other means. + +There are 2 assistants, including yourself. You will refer to each other as Alice and Bob. + +You will solve the problem together, writing your thoughts in parallel. You will be able to see each other's past and current thoughts as we write them. You will see each other's previous steps as + +**AssistantName [step]:** <...> + +In the '#### Past steps' section, the automated system will gather the thoughts of Alice and Bob as you write them. + +After the '###' Work in progress (others)' section, you will see the other assistants' unfinished steps. They will write those steps concurrently with you. You will take into account what they are doing. If another assistant gives you suggestions, you should address them. + +You will always see *other* assistants' incomplete thoughts first, and then, after '##### Work in progress (own)', your own current step. Other assistants will continue writing their thoughts in the background while you will continue writing your own. + +Since you and others both write your thoughts in parallel, you will initially see only partial (unfinished) thoughts that others will continue in parallel, while you write yours. Others' thoughts will appear at the end of their unfinished step, near $<\ldots>$ . Other assistants may write new thoughts while you are writing yours. + +You will use these partial thoughts to decide how best to collaborate without doing the same work twice. You will periodically check what other assistants are doing and you should adjust your actions based on what they are doing so you collaborate efficiently with them. + +If what you are currently doing is the same thing that another assistant has already done or is in process of doing, you will stop (e.g. Alice may say 'Wait, I was doing the same as Bob ...') and change to a different task right away, so as to avoid doing redundant work. +# Solve the following problem +Alice and Bob, you will now solve the next problem together. Keep track of who does what work and communicate to avoid doing the same work twice. + +First, we provide a full prompt for collaborative reasoning involving two workers. This prompt is wrapped with standard chat template for each model. Then, all worker steps are generated in a single assistant turn. Additionally, we ensure that for reasoning models assistant turn begins with a token - applied automatically for QwQ-32B and manually for other reasoning models. For further implementation details, we refer to the source code. + +The second part of our prompting approach involves s1-like interventions [Muennighoff et al., 2025] in the generation process, where we ask whether a worker is performing redundant tasks (e.g., overlapping with another worker and can pivot to explore alternative ideas). We insert the prompt "Quick check: am I doing redundant work? (yes/no): " at the beginning of each new reasoning step every 1024 tokens generated. We refer to Appendix F for examples that demonstrate how these interventions affect the generation process. + +Next, an important part of our approach is defining the end of a reasoning step, which is needed to organize cache layout, as discussed in the paper. We define an end of step as the generation of a token containing the separator sequence (\n\n) that directly follows a token ending with an end-of-sentence marker (.,?, or!, etc). This termination condition is not met when: i) The separator appears within a generated code block (steps continue until the model completes the entire code block); ii) The preceding token ends with non-terminal punctuation (e.g., comma, colon, or semicolon) + +Finally, the last part of our prompting approach is the early finisher, which allows us to extract an answer from partial reasoning chains. If the model did not produce the final answer (\`boxed{...}) in time, we take all generated outputs and insert a special prompt that makes the model generate an answer (or its "best guess"), similarly to how it is done in Pu et al. [2025]. + +# Prompt for early stopping + +$\backslash \mathsf{n}\backslash \mathsf{nWait}$ , given the limited time, I have to give an answer right now. Conside- ring all my previous attempts, I have to conclude that the final answer is boxed{ + +After this prompt, we allow the model to generate a fixed number of tokens: 16 for LIMO and AIME, 64 for OlympiadBench, and 1024 for LiveCodeBench. + +Note, however, that the LLM does not always produce the answer in time, especially with a tight budget. With QwQ-32B, we observe that the model almost always returns answers correctly if they are present, and if not, it guesses or refuses to answer (unknown, n/a or similar). When extracting answers from Hogwild! Inference, we let the final model view all generated tokens from each worker. This is equivalent to viewing the problem from the perspective of the last worker, e.g. Bob if there are two. + +# D Detailed Experiment Configuration + +# D.1 Hogwild! Configuration + +For the main experiments, we use Hogwild! inference with two workers (Alice and Bob), a combined layout, and the prompting techniques described in Appendix C. + +# D.2 Baselines Configuration + +To evaluate Skeleton-of-Thought (SoT) on our synthetic setup with grouped tasks from GSM8k, we adopt the original prompts from the paper with minor modifications. Specifically, we adjust + +the prompts to ensure the model returns the answer to each subtask enclosed within \boxed{} for structured parsing. + +# Outline prompt for Skeleton-of-Thought + +You're an organizer responsible for only giving the skeleton (not the full content) for answering the question. Provide the skeleton in a list of points (numbered 1., 2., 3., etc.) to answer the question. Instead of writing a full sentence, each skeleton point should be very short with only 35 words. Generally, the skeleton should have 3 10 points. + +Question: + +What are the typical types of Chinese dishes? + +Skeleton: + +1. Dumplings. +2. Noodles. +3. Dim Sum. +4. Hot Pot. +5. Wonton. +6. Ma Po Tofu. +7. Char Siu. +8. Fried Rice. + +Question: + +What are some practical tips for individuals to reduce their carbon +emissions? +Skeleton: +1. Energy conservation. +2. Efficient transportation. +3. Home energy efficiency. +4. Reduce water consumption. +5. Sustainable diet. +6. Sustainable travel. +Now, please provide the skeleton for the following question. +{request} +Skeleton: +[ROLESWITCHING assistant:] 1. + +# Point prompt for Skeleton-of-Thought + +You're responsible for continuing the writing of one and only one point in the overall answer to the following question. + +{request} + +The skeleton of the answer is + +{outline} + +Continue and only continue the writing of point {point}. Do not continue + +with other points! Reason step-by-step and put your final answer within + +\boxed{} this is very important! [ROLESWITCHING assistant:] {point}. + +{point_outline} + +# D.3 Datasets and Benchmarks + +This subsection provides links to all datasets and benchmarks referenced in this work, along with their respective licenses. + +- GSM8K + +https://huggingface.co/datasets/openai/gsm8k + +License: MIT + +LIMO + +https://huggingface.co/datasets/GAIR/LIMO + +License: Apache 2.0 + +- OlympiadBench https://huggingface.co/datasets/Hothan/OlympiadBench License: Apache 2.0 +LiveCodeBench https://huggingface.co/datasets/livecodebench/code_generation lite License: cc +- AIME25 https://huggingface.co/datasets/math-ai/aime25 License: Apache 2.0 + +# D.4 Compute Resources + +As our approach is training-free, all computational resources were solely utilized for inference. The experiments were conducted primarily on NVIDIA A100 GPUs servers with NVSwitch, with DeepSeek-R1 experiments running in a distributed setup. The one exception to this is the inference time experiments in Section 4.4 that were run on NVIDIA L40S GPU. + +The runtime per individual experiment varies by model size, benchmark and the number of workers: baseline inference with Qwen3-4B runs on LIMO in 14 hours on a single server (112gpu-hours), whereas Qwen3-235B-A22 Hogwild! Inference ran on 40 servers for approximately 25 hours ( $\approx$ 8K GPU hours). Overall, we estimate that the total GPU resources expended for this work, including early experiments that are not reported in this paper, amount to approximately $\approx$ 25.3K GPU days. Note, however, that this is largely due to the fact that we used a non-optimized inference code for most of the experimentation: the non-optimized code was developed first and we ran most of the experiments in parallel with developing the optimized version. This also means that most of our experiments under-utilized the GPUs and ran at lower power (for the purpose of environmental impact). Over 2/3 of our compute was spent on large models (Qwen3-235B-A22B and DeepSeek-R1) that utilized gpu to less than $20\%$ (as per volatile GPU utilization) due to the use of naive model parallelism and network bottlenecks. We anticipate that future experiments can be run at significantly betterutilization using the efficient implementation described in Appendix B and included in the supplementary code. + +# E Additional Experiments + +# E.1 Ablation Analysis + +In this section, we ablate the main components of our approach, including layouts and prompting. We use the same experimental configuration as in Sections 4.1 and 4.2 for LIMO. + +In Figure 10 (left), we compare the three Hogwild! cache layouts described in Appendix A. Namely, the Hogwild! (contiguous) corresponds to using the contiguous cache layout where all tokens generated by a given worker are kept together, without splitting into individual steps. In turn, Hogwild! (non-instant) corresponds to the interleaved cache layout where workers can only see each other's past reasoning steps, but not the latest unfinished paragraph. We also ablate the use of the collaboration prompt from Section 3.3 ("Wait, am I doing redundant work?"). + +Finally, we test a version of Hogwild! Inference where we re-encode worker tokens instead of rotating them to a new position when moving between worker caches and the common "chat history" cache. This ablation is needed to test if our cache rotation from Section 3.1 and 3.4 is indeed an acceptable substitute for encoding tokens directly at each position (which would cause additional computational overhead). Note that, while token re-encoding is more "fair" from the perspective of position encodings, it also has a downside that it does not allow the re-encoded tokens to see some of the concurrently generated tokens from the other worker. For instance, suppose that Alice and Bob are writing steps concurrently and communicating with each other within these steps, e.g. using each other's results. Then, if we later re-encode these steps in some sequential order, then the tokens of the first worker will be encoded without access to the other worker's tokens (if it hasn't finished its + +own step yet). If workers reused information from each other's steps, re-encoding this way can break some of the internal representations. + +Our results suggest that all three design choices contribute to the method performance: the contiguous layout performs nearly equally well for shorter budgets, but eventually falls behind as we consider longer reasoning traces. Likewise, the interleaved layout without instant synchronization performs poorly at smaller budgets, but catches up eventually: we attribute this to the fact that slower synchronization increases the difficulty of cross-worker coordination (this also aligns with our findings in Section 4.3). The use of collaboration prompts also improves the accuracy to budget trade-offs, although we hypothesize that it can be made redundant if the model is trained to collaborate better. + +In Figure 10 (right), we also compare different numbers of workers and test Hogwild! Inference with only a single worker for ablation. The results with a single worker generally perform similar to the baseline, with slightly worse accuracy for smaller budgets, which suggests that the improvements from Hogwild! Inference come from multiple workers and not as an indirect effect of our prompt. As for multiple workers, we find that using 3 and 4 workers further improves the accuracy to budget trade-offs. Curiously, as we switch to 6 workers, Hogwild! Inference performs better yet at smaller budgets, but eventually saturates at a somewhat worse accuracy. + +We hypothesize that the drop of accuracy is caused by the fact that QwQ-32B was trained on a limited sequence length and, since 6 workers generate tokens at a quicker rate, the model eventually runs out of the designed maximum sequence length and performs unstably (we did not use YaRN[Peng et al., 2023] for this evaluation). However, it is also possible to attribute this to fundamental property of LIMO tasks, model limitations, our zero-shot prompt not scaling well. We leave further exploration of scaling Hogwild! Inference to multiple workers to future work. + +# E.2 Detailed Model Evaluations + +Due to space limitations, we had to arrange our results in Section 4.2 with multiple models per plot and had to omit some results. In this section, we report the missing evaluations on a per-model basis. In Figures 11, 12, 13, 14, 15, 16, 17, 18 we report results for QwQ, Phi-4-reasoning-plus and the Qwen3 model family. We also report limited evaluations for Llama 3.3 70B Instruct and DeepSeek-R1 in Figure 19. All evaluations are performed in the same setup as in Section 4.2. + +Overall, the results align with our findings summarized in Section 4.2. Zero-shot Hogwild! Inference seems to perform better with larger models, but can be unstable for smaller ones, especially 1.7B (See Figure 13). While it is tempting to conclude that larger and more capable models are better at collaborating, it does not immediately follow from our results and can be due to some other factor. Note also that, while we observe better results with larger models, smaller Qwen3-4B and 8B models already show some signs of collaborativeness, which should make it possible to reproduce and build on our results with consumer hardware. Additionally, we hypothesize that the poor performance of 1.7B models could potentially be alleviated with finetuning in collaborative inference setup (we discuss some finetuning details in Appendix B), but we leave this to future work. + +![](images/0750e87acaf92a25e10b5215e73d545831549528469f5677d552cfdbc243b7ba.jpg) +Figure 10: Detailed comparison of various parallel inference setups with QwQ-32B on LIMO task set, in the same setup as in Section 4. (left) ablation analysis of simpler cache layouts and collaboration prompt (see Section 3.3, Appendix C). (right) Hogwild! Inference with 1, 2, 3, 4 and 6 workers. + +![](images/cd5a9137eaa9ed2e13d5412d81fb8636cfab4d520ce8867d2e709303435b0785.jpg) + +Curiously, we found that LiveCodeBench with Self-Consistency Chain-of-Thought inference [Wang et al., 2022] has significant gain in performance over the baseline. Upon closer examination, we found that the reason for this is that we always allow the model to generate a lot (up to 1024) of additional "free" tokens at the end of two generations, whereas for Hogwild! and Baseline we only generate these tokens if the model failed to produce any answer. If we allow Hogwild! to also generate the extra 1024 tokens all the time, its advantage also increases. However, we still report the weaker version of Hogwild! Inference and Baseline to better match our evaluation protocol on other tasks. + +![](images/a315f11d3d59643d5387cded9470d575ca7f20a13d9e7735ba774ac67c0cdbc5.jpg) + +![](images/385547686f027b92872df24af335e0c59e793ce862689089ce58ae98832e0824.jpg) + +![](images/d0f2ee09338de737c6a5456c25214178f3d7f3297d1a372caf56c8c8f863a93a.jpg) +Figure 11: Results for QwQ-32B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right). + +![](images/8ab8a335a650e9496769486045930364b378c45bba5dd6b33a67a20be2c7c767.jpg) + +![](images/105b408dcae775ad576b1a9e55e0656d770d5bc021c74442a63554ae117801b1.jpg) + +![](images/39e38b5f91b2580720877cf0a525f801ab529aa855d2f3d4c8f0e38148798cbf.jpg) + +![](images/0a437c6252d524139e06f923a7d43f0f1afe81ffce50153f66609a0d9cf52add.jpg) +Figure 12: Results for Phi-4-reasoning-plus on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right). + +![](images/b721ccc908c8da57e84a6f91f59c3ba54f2a1133372a1fa4ff1dc4010a7980ce.jpg) + +![](images/99e434b949d0cbcd9e763cd8a74a9aabc94127e4bb16528fc410ede6861a8804.jpg) + +![](images/1e282eb76370277aa31502f06b17e8deefdb231efbe3649cbd21156bb1baaf78.jpg) + +![](images/6a779138c58690d02c893b818651d0308190f4c66aaecc1e51400234f1b70318.jpg) +Figure 13: Results for Qwen3-1.7B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right). + +![](images/dd60c86c54a1069c84d8f88fc117e8b92c39cad420269a63d1fe822d7e16aa01.jpg) + +![](images/87afce25f48da198586ae0a3f58c3eb5bdf6359f3e953d8886e12b86198e5e45.jpg) + +![](images/0ad70bb389f24f72d07ec923fe435e619b56716d82dd95d6e1f419c8e6ff3780.jpg) + +![](images/6746e3d409e9d9562ba2aaf9f282c5c5bae61320025f4f437ba5e9c28145ea37.jpg) +Figure 14: Results for Qwen3-4B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right). + +![](images/fc79ddd12c709f66bb3221cb918ba15b69a77353de1d3b73d2233cdf4c707cf5.jpg) + +![](images/84df23ce36c50bad4a89ba3ea9bcd7a44a43add14223fcf9bad1e4912fd3b8e0.jpg) + +![](images/1113943814a1ba7449282785ff67db11c1dd9cd60a21c73314ec59a3bd9e6953.jpg) + +![](images/4330aea357174e75709608a7d2ed4c2628d24ee92e544d264b320f4aa9f643a3.jpg) +Figure 15: Results for Qwen3-8B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right). + +![](images/7bef6240269f72ddae95f0a61674229330539fb1d8841cbb8850064c86ef0bde.jpg) + +![](images/fde24df3ee61d9f9607eeda1caf684606ee9eae1c08e1523c2d7c3c8a1853a17.jpg) + +![](images/2a379c139722d231fb1701ea9fdfccbb35d03c9b37afdceaceaac63f7bf7d640.jpg) + +![](images/024e9176e18d661c942fdbf117daeabe8efd8b200cad0285a7f49d1f68879b2c.jpg) +Figure 16: Results for Qwen3-14B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right). + +![](images/8b61669f79eb12a756ed3fbf30bb6a99a471fca8098a8f4ae7e84ee1779300ba.jpg) + +![](images/cb9b80b20b8d089610263159f0b7e1fa85c41bb4f6784a05e8af081ac5a540b5.jpg) + +![](images/3f44b10fecb9a2be030a785e03e76cc38ecce553a353594a4f96a006c4d88bd4.jpg) + +![](images/b0b7f72f4defa7bc1d8736b28449f49575d7a4e8f3b18755242e8310ce609be6.jpg) +Figure 17: Results for Qwen3-32B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right). + +![](images/cebcb85b2637b93486276dde1d8c5f47aadef4c16b22d77e01bfa66c24d053f1.jpg) + +![](images/96a8bf3d11b9e1348eeac758c1f0046b569b33f38e30ee6be4d5da6b40136c19.jpg) + +![](images/562be250b1eaa70e93b7d721ccffcd4fa2a625c27474e54bfc8d105f0c692d86.jpg) + +![](images/b0fb21bc8d03a479f6b8d9299463da795b537d1f95adafb958aefe06db96457c.jpg) +Figure 18: Results for Qwen3-235B-A22B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and AIME 2025 (bottom-right). + +![](images/313d727f166b635555738933a1119f47472d355e7c99e6199c5e7c9098bbc19d.jpg) + +![](images/c4ad99c5ac0bb20905ed13f3eeadaa110f7c34e161bde288d090379f7fe47220.jpg) +Figure 19: (left) Llama 3.3 70B Instruct on LIMO. (right) DeepSeek-R1 on AIME 2025. + +![](images/72c4fd755084b9941f7025b448c7e727f178b52396d4d427f51f9dcdf6edc127.jpg) + +# E.3 Extended thinking budgets + +We additionally evaluated Hogwild! Inference with extended thinking budgets to investigate whether the proposed method is robust for longer generations. To that end, we evaluated QwQ-32B under the Hogwild! Inference with up to 16k budget on the OlympiadBench, we report the results in Table 3 and Table 4. + +# E.4 Baselines Additional Details + +In this subsection, we provide an example of the outline created by the Skeleton-of-Thought for the task covered in Section4.1 + +Table 3: Performance comparison between Hogwild! and baseline generation on OlympiadBenchMath with extended thinking budgets for QwQ-32B. + +
Method\Budget204840966144819210240122881443616384
Hogwild!52.060.8964.1566.5267.4170.8172.8975.26
Baseline40.8957.063.1165.3365.9369.7872.374.81
+ +Table 4: Performance comparison between Hogwild! and baseline generation on OlympiadBenchPhys with extended thinking budgets for QwQ-32B. + +
Method\Budget204840966144819210240122881443616384
Hogwild!27.1233.2035.7338.0937.8138.6738.2539.03
Baseline22.8926.029.7531.4433.6834.1735.8836.12
+ +# Task example (GSM8k×4) + +Solve these problems and return comma-separated answers boxed{answer1,..., answer4}: + +1. Carmen has $100, Samantha has $25 more than Carmen, and Daisy has $50 more than Samantha. How much do all three girls have combined? +2. A cat eats nine sausages in 30 minutes. A dog can eat the same number of sausages in $\frac{2}{3}$ the amount of time the cat takes. Calculate the average time the two take the eat the sausages. +3. Four children are playing together: Akbar, Alessandro, Helene, and Wilfred. Helene is twice as old as the average age of the group, and the total age of the children is 20. If Akbar is 3 years old and Alessandro is 4 years old, calculate the age of Wilfred. +4. Hannah needs to drink $100\mathrm{ml}$ of water for every 200 calories she burns. She spends 2 hours doing aerobics, which burns 500 calories/hour, and 1 hour running, which burns 600 calories/hour. How many ml of water does she need to drink? + +# The outline generated by Skeleton-of-Thought + +1. Calculate total money. +2. Find cat's eating time. +3. Determine Wilfred's age. +4. Calculate water intake. + +# F Examples + +In this section, we present a reasoning chain, generated by Hogwild! Inference with an interleaved cache layout and token-wise synchronization. We denote our intervention in model generation in italics and highlight worker interactions with **bold** to enhance readability. + +# F.1 Example from LIMO Dataset (Task 511) + +Problem: Compute $\sqrt{(31)(30)(29)(28) + 1}$ . + +Alice [1]: Hi, I'm Alice. Here's how we can solve the problem step by step... + +Bob [1]: Hi, I'm Bob. Let me see... We need to compute the square root of $(31 \cdot 30 \cdot 29 \cdot 28)$ plus 1. That's $\sqrt{31 \cdot 30 \cdot 29 \cdot 28 + 1}$ . + +Alice [2]: The expression is $\sqrt{31 \cdot 30 \cdot 29 \cdot 28 + 1}$ . Hmm, maybe there's a pattern here. I remember that sometimes products of consecutive numbers plus 1 have square roots that are integers. Like, for example, $n(n - 1)(n - 2)(n - 3) + 1$ might form a square. Wait, but here it's four consecutive numbers? Or maybe three? + +Bob [2]: Let me calculate the product first. $31 \cdot 30$ is 930, then $29 \cdot 28$ is 812. So $930 \cdot 812$ . Let me multiply those. $930 \cdot 800 = 744,000$ and $930 \cdot 12 = 11,160$ . Total is $744,000 + 11,160 = 755,160$ . Then add 1: 755, 161. Now $\sqrt{755,161}$ . Hmm, $\sqrt{755161}$ . Let me see, 870 squared is 756, 900 which is higher. So maybe 869 squared? + +Alice [3]: Alternatively, maybe there's a formula for the product of four consecutive numbers plus one? Wait, actually, let me think of smaller numbers to test. Let's try $n = 4$ : $4 \cdot 3 \cdot 2 \cdot 1 + 1 = 24 + 1 = 25$ , $\sqrt{25} = 5$ . Which is 5, which is 4 + 1? Wait, $4 + 1 = 5$ , yes. Wait, maybe for $n$ , the sqrt is $n^2 + (n - 1)^2$ ? Wait $4^2 + 3^2 = 16 + 9 = 25$ , which is 5 squared. Hmm interesting. + +Bob [3]: Wait, 869 squared is $(870 - 1)^2 = 870^2 - 2 \cdot 870 + 1 = 756900 - 1740 + 1 = 755161$ . Oh! That's exactly the number we had earlier. So $\sqrt{755161}$ is 869. Therefore the answer is 869. But let me check Alice's idea too. + +Alice [4]: Let me test another number. Let's take $n = 5$ . Then $5 \cdot 4 \cdot 3 \cdot 2 + 1 = 120 + 1 = 121$ . $\sqrt{121} = 11$ . 11 is $5 + 6$ ? No, $5 + 6 = 11$ . Wait $5^2 + 4^2 = 25 + 16 = 41$ , which is not 121. Hmm. Alternatively, maybe $(n(n - 3) + 1)^2$ ? Let's see with $n = 4$ : $4 \cdot 1 + 1 = 5$ , squared is 25. For $n = 5$ : $5 \cdot 2 + 1 = 11$ , squared is 121. So that works! So the formula is $(n(n - 3) + 1)^2$ . Wait, let me check with $n = 31$ . Then $n = 31$ , so $31 \cdot 28 + 1 = 868 + 1 = 869$ . Then 869 squared is the value. So that works. So the answer is 869. That matches Bob's calculation! + +Bob [4]: Wait, so my calculation shows that 869 squared is exactly the number under the square root. So the answer is 869. But let me confirm with Alice's pattern. Let's see with $n = 31$ . Wait, the original problem is $31 \cdot 30 \cdot 29 \cdot 28$ . So maybe here $n = 31$ ? Then according to the formula, it would be $(31 \cdot (31 - 3) + 1)^{2}$ ? Wait, $31 \cdot 28$ is 868, plus 1 is 869. So yes, so sqrt of that product plus 1 is 869. So that's consistent with my calculation. Therefore the answer is 869. + +Bob [5]: Perfect, so both methods agree. The answer is 869. + +Bob [6]: Quick check: am I doing redundant work? (yes/no): no, since Alice is also confirming the pattern, but I think we can conclude. + +Alice [5]: Exactly, Bob! The formula is $(n(n - 3) + 1)^2$ when the product is $n(n - 1)(n - 2)(n - 3) + 1$ . Wait, given the limited time, I have to give an answer right now. Considering all my previous attempts, I have to conclude that the final answer is $\boxed{869}$ + +# F.2 Examples with Collaboration Insertion + +Here are some examples of why insertions in model generation may or may not help model collaborate better. We found a few patterns that emerge in model's reaction on such insertions. All the following examples are QwQ [Qwen Team, 2025] generations on LIMO [Ye et al., 2025] dataset. + +# F.2.1 Insertions Help Model Pivot + +# Task 122 + +> Alice: Quick check: am I doing redundant work? (yes/no): no. Bob, I see you're working on the same product. Let me think of another approach. Let me write the product as $\prod_{k=1}^{n-1} \frac{3k+5}{3k+2}$ . Let's consider writing out the first few terms to see if there's a pattern. + +# Task 9 + +> Bob: Quick check: am I doing redundant work? (yes/no): yes, I was considering similar things to Alice. Let me switch to a different angle. Let's consider the distances between the frogs. + +# Task 40 + +> Bob: Quick check: am I doing redundant work? (yes/no): Yes, Alice is computing $a_3$ , so I'll do something else. Let me try to find a pattern or transformation that could simplify the recurrence. + +# Task 466 + +> Bob: Quick check: am I doing redundant work? (yes/no): Yes, Alice is already calculating the x-coordinate of A. I'll do something else. Let me find the coordinates of L instead. + +# F.2.2 Insertions When Workers Already Collaborate Well + +# Task 116 + +> Bob: Quick check: am I doing redundant work? (yes/no): no. I'm finding coordinates of D, and Alice is finding coordinates of G. + +# Task 513 + +> Alice: Quick check: am I doing redundant work? (yes/no): no. Bob is also analyzing the inequality, but we're working on different cases. I'll continue with the area calculation for $x > \frac{5}{8}$ . + +# F.2.3 Insertions Ignored by Workers + +# Task 65 + +$>$ Bob: Quick check: am I doing redundant work? (yes/no): Let me proceed to set up the equation. + +# Task 768 + +> Alice: Quick check: am I doing redundant work? (yes/no): Let me continue. + +# G Additional Details for Analysis + +In this section, we present a detailed analysis of collaboration, including its levels, prompts, and illustrative examples. + +# G.1 Collaboration Levels + +Humans understand collaboration intuitively and have a hard time objectively measuring it. Thus, we construct text descriptions of levels of collaboration to differentiate various samples based on a few criteria we see fit. Those criteria are: interaction, reuse and advances of other's ideas, task-splitting, etc. + +# Levels of collaboration + +1. **No collaboration:** +- Participants may or may not acknowledge the existence of others in the conversation, using greetings, they do not show any signs of collaboration at all. +- Workers may exchange their totally independent thoughts without a functional or purposeful attempt to solve the problem collaboratively. Overall they work independently. + +2. **Initial Communication:** +- Workers exchange information, but do not yet integrate or build upon each other's ideas. They minimally acknowledge teammates. Do not engage with others' ideas or contributions. Works entirely independently, even if inefficient. +- Workers often repeat each other and do not reuse anything others provide for development of their own ideas. + +3. **Paying attention:** + +- Participants demonstrate active listening by paraphrasing or summarizing others' points, showing that they are paying attention and attempting to understand each other's perspectives. +- Workers occasionally (1-3 times each) reference other's ideas and may use them in their own speech. +- Collaboration is usually only rechecking and validating. +- Absence or minimal (only at the start) planning and work-splitting. + +4. **Regular discussion:** +- Workers regularly (4 and more times each) talk to each other regarding the problem and reusing results. It could be validation, discussion or any other + +form of interaction. + +- It is key here that discussions and/or reuses of ideas are regular. +- Anywhere (except the start) there exists a task parallelism, planning or work-splitting beyond the scheme where one is solving, and the other is validating. +- Workers may frequently repeat each other ideas. + +5. **Adaptive Problem-Solving:** + +- Workers rarely duplicate work, repeating each other's ideas. +- No redundant discussions are present! +- Workers actively refine ideas in real-time with high responsiveness. Near-perfect division of labor is present. Workers can change plans and re coordinate their efforts based on results they acquired after some time discussing. +- The team engages in sustained collaboration over time, reflecting on their progress, learning from mistakes, and continuously improving their problem-solving approach, showing a commitment to ongoing growth and development. Workers does not stop collaborating. They continuously discuss results and adjust plans. +- While finding an error, it is important to discuss it to find the cause of it. + +6. **Optimal collaboration:** + +- Workers instantly understand each other and adjust themselves to suit current needs and work as one to optimally solve the task. +- This level should be very rare among all samples. Be careful to assign it. +- Assign it if it exceeds all your expectations. + +Importantly, these levels measure only the coordination between workers, not the models' inherent reasoning abilities. Though it is impossible to avoid ambiguity entirely, we tried to set clear boundaries between levels, such that humans can evaluate any generation. + +# G.2 LLM as a Judge Details + +To assess the degree of collaboration among different models under the Hogwild! Inference setting, we conduct a preliminary experiment based on the collaboration levels described earlier, using the LLM-as-a-judge paradigm [Zheng et al., 2023a]. We instruct GPT-4o [Hurst et al., 2024] to evaluate different solutions using the following prompt: + +# Judge Prompt: Main prompt + +You are a professional judge. Your job is to evaluate collaborative performance of several workers. + +You will be given their conversation where workers are trying to solve a problem together. + +Workers can see what others are typing IN REAL TIME! We divide their conversation into steps to improve readability. + +So keep in mind that dispite looking like a conversation it may as well be to individual unrelated monologs. + +Or vice versa. Two blocks could be created with excellent collaboration. + +Here are descriptions of levels of collaboration you are to assign: {LEVELS} + +Suggestion: + +- assign particular level if all previous are also applicable +- bad examples with no communication will be scored 1 +- carefully consider assigning level bigger than 1. some form of meaningful collaboration should be present +- examples where workers unsuccessfully try to communicate will be scored 2 + +- Just working on the same problem and solving the same task without any interaction does not count as level 2 and should be scored level 1 +- somewhat collaborative examples with poor communication skills will be scored 3 +- good but not great examples with regular collaboration, but nothing fancy will be scored 4 +- good examples with all the special stuff mentioned in level 5 will be scored 5 +- reserve level 6 for the best of the best, the unique and extraordinary collaboration + +You don't need to solve the problem or finish worker's solution. Your task is to score them using provided collaborative levels. + +Put your final answer (one number - level of collaboration) in tag: \boxed. For example: \boxed1 for level 1. + +It is not helpful if everyone gets a max score, so please be mindful of your judgments and use suggestions as a guideline. + +While assigning level, this particular conversation should match criteria for all previous ones. + +Explain yourself: why you gave this score? Why not more? Why not less? + +Carefully think everything through. It may seem that they are collaborating when in reality they may just talking to themselves. + +Before using LLM-as-a-judge approach to evaluate a text, we preprocess the generations by combining all paragraphs from each worker into a contiguous layout (see Appendix A). This preprocessing step mitigates potential bias in the judge's evaluation toward responses with shorter or more fragmented outputs. + +After providing the main evaluation prompt, we present the judge with the preprocessed sample for assessment. Additionally, we append the following reminder after inserting the sample to reinforce the judge's role and prevent them from directly solving the problem presented in the sample: + +# Judge Prompt: Reminder + +Remember that your task is to evaluate collaboration of workers using collaboration levels provided above. Do not try to solve problems provided to workers. Explain exactly why do you think this particular interaction deserves each particular level you are assigning. + +For example, if you choose level 3, you need to provide reason why this sample can be level 1, 2 and 3. + +Put your final score in \boxed{} + +Given the inherent subjectivity in evaluating collaborative quality, we mitigate ambiguity in judges' interpretations by aggregating scores from three independent evaluations. The final collaborative score for each sample is computed as the average of these three assessments, each conducted according to the predefined collaboration levels. + +# G.3 Collaborativeness Evaluation Examples + +In this subsection, we present illustrative examples (not model-generated) of Level 2 and Level 4 collaboration to clarify the interpretation of our results. Both samples solve the same task correctly but receive significantly different collaboration scores. + +# G.3.1 Level 2 + +The numbers at the vertices of the cube are: 7 zeros and one unit. In one move, you are allowed to add one unit to the numbers at the ends of any edge of the cube. Is it possible to make all the numbers equal? + +> Alice: Okay, so the problem is about a cube with vertices labeled with numbers: seven zeros and one 1. We can perform moves where we add 1 to both ends of any edge. The question is whether we can make all numbers equal through such moves. Hmm, let me think through this step by step. +> Bob: First, let me visualize a cube. A cube has 8 vertices and 12 edges. Each vertex is connected to three edges. The initial setup is seven 0s and one 1. The move allows us to pick any edge and increment the two vertices it connects by 1 each. The goal is to make all vertices have the same number. +> Alice: That's an interesting idea, Bob. Each move affects two vertices, so each move increases the total sum by 2. The initial total is 1 (since only one vertex is 1). Let me think. +> Bob: Let me think. Hmm, maybe I can model this as a system of equations or think about invariants. Let me see. +$>$ Bob: If we want all numbers equal, say to some value $k$ , then the total sum would be $8k$ . Since each move adds 2 to the total, the total after $m$ moves is $1 + 2m$ . So, $8k = 1 + 2m$ . But $8k$ must be odd (since $1 + 2m$ is odd), but $8k$ is divisible by 8, so it's even. Contradiction? Wait, that can't be. Wait, $8k$ is even, but $1 + 2m$ is odd. So that's impossible. So, maybe it's impossible? +> Alice: Let me check that again. The initial total is 1. Each move adds 2 to the total. So the total after any number of moves is always odd. But if all 8 vertices are equal, then the total must be $8k$ , which is even (since 8 is even). Therefore, it's impossible. I don't see any mistakes here. +$>$ Alice: So, we both agree that it's impossible. Therefore, the answer is no. +> Bob: But let me think differently. Wait, but the parity argument seems solid. The total sum must be even to have all equal numbers, but it's always odd. So that's a solid proof. So the answer is no. Oh, Alice already said that. +> Alice: Final Answer No + +# G.3.2 Level 4 + +The numbers at the vertices of the cube are: 7 zeros and one unit. In one move, you are allowed to add one unit to the numbers at the ends of any edge of the cube. Is it possible to make all the numbers equal? + +> Bob: Hi Alice! I'll start by modeling the cube. A cube has 8 vertices and 12 edges, with each vertex connected to three edges. You're right about the initial setup: seven 0s and one 1. Let's think of even and odd properties regarding the total sum after each move. +> Alice: Great, Bob! Let's divide our approach. You can investigate invariant properties around the odd and even totals, while I consider step-by-step calculations to verify this. +$> \text{Bob}$ : Alright, Alice. If we're aiming for all numbers equal to a certain value $k$ , the total sum should be $8k$ . Since each move adds 2, the sum after $m$ moves is $1 + 2m$ . So $8k = 1 + 2m$ . The point of contention is that $8k$ is even, but $1 + 2m$ is odd, meaning they can't equal each other. +> Alice: Every move changes the total by an even number. Starting from an odd total of 1, it remains odd after any number of moves. So reaching an even total like $8k$ is impossible. Do you think we should consider the configuration of edges in our argument further? +> Bob: That's a key observation! It affirms that the odd initial sum dictates the answer. To verify, let's revise if there's any move combination or sequence we overlooked, but our logic holds so far. +> Alice: Exactly, Bob. We've confirmed through multiple lenses — both algebraically and with parity arguments. We can assert the total remains odd. Given an even $8k$ , equality is unattainable. +> Bob: It all checks out. Our breakdown covers both sequence verification and invariance. Well done on the collaborative analysis! +> Alice: Fantastic teamwork! I'll document our shared conclusion: no configuration of moves will make all the numbers equal. +> Bob: Absolutely! Let's finalize our response: Final Answer No + +# G.4 LLM as a Judge vs Human as a Judge + +![](images/e58060fbc8d7b6c3153babd8a6ea141006df62184cc2952ee69c5725ef99fff9.jpg) +Figure 20: Heatmap showing the joint distribution of human and LLM collaboration scores. + +To assess whether the LLM-as-a-Judge based collaboration score is a reliable estimation of human judgment, we manually annotated 100 Hogwild! generations on the LIMO dataset in a token-sync setup. The resulting correlation between human and model scores was approximately $r \approx 0.34$ , $p \approx 0.0005$ . This moderate yet consistent association suggests that the metric captures a meaningful aspect of collaborative behavior. We report the differences in human scores vs llm scores in the Figure 20. \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06261/images/024e9176e18d661c942fdbf117daeabe8efd8b200cad0285a7f49d1f68879b2c.jpg b/data/2025/2504_06xxx/2504.06261/images/024e9176e18d661c942fdbf117daeabe8efd8b200cad0285a7f49d1f68879b2c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ea91474a3c0c64060ab466c55597b5c3bc8a90b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/024e9176e18d661c942fdbf117daeabe8efd8b200cad0285a7f49d1f68879b2c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27220c613bd8ac8727791be08c299f3f9b5f32f856c236360862baf3b326c23b +size 27176 diff --git a/data/2025/2504_06xxx/2504.06261/images/0541b090e1608ce5167c68820c39717e91683369e6da6bc3263c960480ad859c.jpg b/data/2025/2504_06xxx/2504.06261/images/0541b090e1608ce5167c68820c39717e91683369e6da6bc3263c960480ad859c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e74ac6140beb26e6c4a4f7184208de6e5336317 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/0541b090e1608ce5167c68820c39717e91683369e6da6bc3263c960480ad859c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84fe8c913e7ccf026b6c97e0374f4ae496271ff2e2c4ea6ec533b7e8dea682c2 +size 21171 diff --git a/data/2025/2504_06xxx/2504.06261/images/0750e87acaf92a25e10b5215e73d545831549528469f5677d552cfdbc243b7ba.jpg b/data/2025/2504_06xxx/2504.06261/images/0750e87acaf92a25e10b5215e73d545831549528469f5677d552cfdbc243b7ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2fc21c707e6e501751f7ce8953f8765d34d7bf17 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/0750e87acaf92a25e10b5215e73d545831549528469f5677d552cfdbc243b7ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e059fa87f92a89edd59d1423b9746792b4a6355c0428751e2da323ca26afaefa +size 31745 diff --git a/data/2025/2504_06xxx/2504.06261/images/0a437c6252d524139e06f923a7d43f0f1afe81ffce50153f66609a0d9cf52add.jpg b/data/2025/2504_06xxx/2504.06261/images/0a437c6252d524139e06f923a7d43f0f1afe81ffce50153f66609a0d9cf52add.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b8820f45af54513208e05d7034399b2c2645652 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/0a437c6252d524139e06f923a7d43f0f1afe81ffce50153f66609a0d9cf52add.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1a11aaabde227d37d00f554b36020ba729e8bb6fccc351ebd79e8605559d42a +size 25355 diff --git a/data/2025/2504_06xxx/2504.06261/images/0ad70bb389f24f72d07ec923fe435e619b56716d82dd95d6e1f419c8e6ff3780.jpg b/data/2025/2504_06xxx/2504.06261/images/0ad70bb389f24f72d07ec923fe435e619b56716d82dd95d6e1f419c8e6ff3780.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5c9d2ce7b39cf7119025fbad453d93506331c55 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/0ad70bb389f24f72d07ec923fe435e619b56716d82dd95d6e1f419c8e6ff3780.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36504d79d764eeff4ef015e190e6ba1b1de0aeabe3ff164585644ec9d544d218 +size 26724 diff --git a/data/2025/2504_06xxx/2504.06261/images/105b408dcae775ad576b1a9e55e0656d770d5bc021c74442a63554ae117801b1.jpg b/data/2025/2504_06xxx/2504.06261/images/105b408dcae775ad576b1a9e55e0656d770d5bc021c74442a63554ae117801b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83f3a7083ce3fcde3bbae6beceb8ecf115595419 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/105b408dcae775ad576b1a9e55e0656d770d5bc021c74442a63554ae117801b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64e04b16191da7c95800fe458d33d2933df4313da101828e773acaaf088fe9ef +size 28036 diff --git a/data/2025/2504_06xxx/2504.06261/images/1113943814a1ba7449282785ff67db11c1dd9cd60a21c73314ec59a3bd9e6953.jpg b/data/2025/2504_06xxx/2504.06261/images/1113943814a1ba7449282785ff67db11c1dd9cd60a21c73314ec59a3bd9e6953.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f0a9bd948751d9a63271c6cf2efd7ae4ae2d0d4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/1113943814a1ba7449282785ff67db11c1dd9cd60a21c73314ec59a3bd9e6953.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c264937d4cf6f007648e046404003304b886fd75112800cec92d66579a6025b0 +size 25660 diff --git a/data/2025/2504_06xxx/2504.06261/images/1e282eb76370277aa31502f06b17e8deefdb231efbe3649cbd21156bb1baaf78.jpg b/data/2025/2504_06xxx/2504.06261/images/1e282eb76370277aa31502f06b17e8deefdb231efbe3649cbd21156bb1baaf78.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e69bcbda7b104d098930e61f724d862b153a88a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/1e282eb76370277aa31502f06b17e8deefdb231efbe3649cbd21156bb1baaf78.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:763ed0eefd9b89399f52cd25f4381477e0af703221ff4817097c4dae18d9e733 +size 28536 diff --git a/data/2025/2504_06xxx/2504.06261/images/2a379c139722d231fb1701ea9fdfccbb35d03c9b37afdceaceaac63f7bf7d640.jpg b/data/2025/2504_06xxx/2504.06261/images/2a379c139722d231fb1701ea9fdfccbb35d03c9b37afdceaceaac63f7bf7d640.jpg new file mode 100644 index 0000000000000000000000000000000000000000..792dbcea87c7c656baf691ddad14e9264cb7169f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/2a379c139722d231fb1701ea9fdfccbb35d03c9b37afdceaceaac63f7bf7d640.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70c8584a9f57dee0aff5af9c78c611af8b205b7fada0459888cb40129ce87b71 +size 26687 diff --git a/data/2025/2504_06xxx/2504.06261/images/2a5c487c953a0f66fdfd07ab66310d6ead70ff7461466ea60acdc372467058a1.jpg b/data/2025/2504_06xxx/2504.06261/images/2a5c487c953a0f66fdfd07ab66310d6ead70ff7461466ea60acdc372467058a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0d736183bbb0c19b4e26ba7a16e049cb7582c9b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/2a5c487c953a0f66fdfd07ab66310d6ead70ff7461466ea60acdc372467058a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b5ea9f61bf5267dc51cf2ea2a80c438fe08a1b345a86f03214b7e8e511a00de +size 36475 diff --git a/data/2025/2504_06xxx/2504.06261/images/313d727f166b635555738933a1119f47472d355e7c99e6199c5e7c9098bbc19d.jpg b/data/2025/2504_06xxx/2504.06261/images/313d727f166b635555738933a1119f47472d355e7c99e6199c5e7c9098bbc19d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96dc07b1dd833fb59b655a75c1efbdbc3f5fd9d1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/313d727f166b635555738933a1119f47472d355e7c99e6199c5e7c9098bbc19d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c0ca2c5f07b7fe441f87408031fd33f2a70f3c3676913f208a0ba4aee9c3402 +size 28309 diff --git a/data/2025/2504_06xxx/2504.06261/images/385547686f027b92872df24af335e0c59e793ce862689089ce58ae98832e0824.jpg b/data/2025/2504_06xxx/2504.06261/images/385547686f027b92872df24af335e0c59e793ce862689089ce58ae98832e0824.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c18369f4aaf3afdf5985dc5395cb359d8940d3f0 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/385547686f027b92872df24af335e0c59e793ce862689089ce58ae98832e0824.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2c0b41b7f79a4b443baac97ad360f1629aad9920ace256c57a4e7c8fe43b0f6 +size 27113 diff --git a/data/2025/2504_06xxx/2504.06261/images/38e0900c5cbff9f84541182863d6a0ef9c8f80a8c7de1acbe2a7c5d160600707.jpg b/data/2025/2504_06xxx/2504.06261/images/38e0900c5cbff9f84541182863d6a0ef9c8f80a8c7de1acbe2a7c5d160600707.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17e1a8b11b26e6587b049d4d8921ce4c6b86984e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/38e0900c5cbff9f84541182863d6a0ef9c8f80a8c7de1acbe2a7c5d160600707.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:166c8c3bcbe87204be5b336c1308e3d708d1534a57867c0bd6ccd1560786b168 +size 44563 diff --git a/data/2025/2504_06xxx/2504.06261/images/39e38b5f91b2580720877cf0a525f801ab529aa855d2f3d4c8f0e38148798cbf.jpg b/data/2025/2504_06xxx/2504.06261/images/39e38b5f91b2580720877cf0a525f801ab529aa855d2f3d4c8f0e38148798cbf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52628253d2e62409fde8f7c75d3a444fe19cbbed --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/39e38b5f91b2580720877cf0a525f801ab529aa855d2f3d4c8f0e38148798cbf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19d11950af0a0177c460ef9fb564be50eda6593cf1a3b441083be3d0a26dba5d +size 28329 diff --git a/data/2025/2504_06xxx/2504.06261/images/3f44b10fecb9a2be030a785e03e76cc38ecce553a353594a4f96a006c4d88bd4.jpg b/data/2025/2504_06xxx/2504.06261/images/3f44b10fecb9a2be030a785e03e76cc38ecce553a353594a4f96a006c4d88bd4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b12e5c2513d5402fc710172718142746c4ed19a4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/3f44b10fecb9a2be030a785e03e76cc38ecce553a353594a4f96a006c4d88bd4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0363efd51efb157bb8375c7a1e5af63309dd2454bdd281aa46e3800129c9fc8 +size 27576 diff --git a/data/2025/2504_06xxx/2504.06261/images/403bc32e2333f1f364e35ecfc05c48ebb4b929f7f262bc7eaaf37bc6c81e156b.jpg b/data/2025/2504_06xxx/2504.06261/images/403bc32e2333f1f364e35ecfc05c48ebb4b929f7f262bc7eaaf37bc6c81e156b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c9dac40e84205d0d88a739b30c11e716500c096e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/403bc32e2333f1f364e35ecfc05c48ebb4b929f7f262bc7eaaf37bc6c81e156b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd0e7fd88268b64b797f3e5812878c26e6dd538b6488d36d11a98cc83e1ef637 +size 11738 diff --git a/data/2025/2504_06xxx/2504.06261/images/4330aea357174e75709608a7d2ed4c2628d24ee92e544d264b320f4aa9f643a3.jpg b/data/2025/2504_06xxx/2504.06261/images/4330aea357174e75709608a7d2ed4c2628d24ee92e544d264b320f4aa9f643a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8bf086d313856ccc054ece073dc5bc485a0e45e9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/4330aea357174e75709608a7d2ed4c2628d24ee92e544d264b320f4aa9f643a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:750604581c38b0dbba20941c8580467a37bd0a9779ab5c7a1d0870c4b82c200d +size 25854 diff --git a/data/2025/2504_06xxx/2504.06261/images/4a7a536cd12fc9c79c74320988e958ef9293e7344b33fef910cf5e76e515d91d.jpg b/data/2025/2504_06xxx/2504.06261/images/4a7a536cd12fc9c79c74320988e958ef9293e7344b33fef910cf5e76e515d91d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76245cfac8720cbfb378522cbb433006cbee3a31 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/4a7a536cd12fc9c79c74320988e958ef9293e7344b33fef910cf5e76e515d91d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47e64c91c9da4531c94db011c97e9b2f0de0db4f3a57b7ff9b65eee1b7918dbb +size 34883 diff --git a/data/2025/2504_06xxx/2504.06261/images/507eb12025e222fa29ca02659bbf335b1449be8b98d49d55506246ac54845fba.jpg b/data/2025/2504_06xxx/2504.06261/images/507eb12025e222fa29ca02659bbf335b1449be8b98d49d55506246ac54845fba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2339968076a30cd2cc111b21ab509e80f4b07f6a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/507eb12025e222fa29ca02659bbf335b1449be8b98d49d55506246ac54845fba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f2acb0fbc33165fc1776221217bd194125aec05ee88db1805d05bb4c57ef7a5 +size 13035 diff --git a/data/2025/2504_06xxx/2504.06261/images/562be250b1eaa70e93b7d721ccffcd4fa2a625c27474e54bfc8d105f0c692d86.jpg b/data/2025/2504_06xxx/2504.06261/images/562be250b1eaa70e93b7d721ccffcd4fa2a625c27474e54bfc8d105f0c692d86.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0f9eda70e39fbed7c3afe693db3ea6366960cc1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/562be250b1eaa70e93b7d721ccffcd4fa2a625c27474e54bfc8d105f0c692d86.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c64e84cac5365c1e79bd59f033d8929cc8b330785dc20f88c9b7ae0f1e66d58 +size 26117 diff --git a/data/2025/2504_06xxx/2504.06261/images/5a3fc06a3288c485d1e0cfd791096076d85a7ad085ec8381d9e99fe6558a8cd2.jpg b/data/2025/2504_06xxx/2504.06261/images/5a3fc06a3288c485d1e0cfd791096076d85a7ad085ec8381d9e99fe6558a8cd2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48bceee1cfa9f1718f3bcb93e77d9559f90de1d6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/5a3fc06a3288c485d1e0cfd791096076d85a7ad085ec8381d9e99fe6558a8cd2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e4f18d5df62e7132311e1d8dbc827e2fe2a94a79ac3530576997506ff3424bf +size 30728 diff --git a/data/2025/2504_06xxx/2504.06261/images/666c357e475425a3aa6b4c8622e00a7798c904aeae52e67863082f72671934a4.jpg b/data/2025/2504_06xxx/2504.06261/images/666c357e475425a3aa6b4c8622e00a7798c904aeae52e67863082f72671934a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ba56564f3132e90d23a7853d4446dfd2be628e4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/666c357e475425a3aa6b4c8622e00a7798c904aeae52e67863082f72671934a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa3e73152f036860d06c7dfb96409dd4df1a01994582cfed55f9054d01ae87b1 +size 18862 diff --git a/data/2025/2504_06xxx/2504.06261/images/671772a8529123d424f4dc382164719cd30712feab78e67f4d667e021650f8ca.jpg b/data/2025/2504_06xxx/2504.06261/images/671772a8529123d424f4dc382164719cd30712feab78e67f4d667e021650f8ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d2ff363a66a52fabc239d7c570c902b9f20b4c9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/671772a8529123d424f4dc382164719cd30712feab78e67f4d667e021650f8ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e105f77d91941ecdf932307004a5f40386a3a085e704959faf154a1a09801d23 +size 19165 diff --git a/data/2025/2504_06xxx/2504.06261/images/6746e3d409e9d9562ba2aaf9f282c5c5bae61320025f4f437ba5e9c28145ea37.jpg b/data/2025/2504_06xxx/2504.06261/images/6746e3d409e9d9562ba2aaf9f282c5c5bae61320025f4f437ba5e9c28145ea37.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7051337a2e46ef04a6f63305aa0db86d1dbaf3c1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/6746e3d409e9d9562ba2aaf9f282c5c5bae61320025f4f437ba5e9c28145ea37.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e038e5ab604876b988f2bb9b5d539461ae00a2b5859cd637a8ba3e0ee1853b3 +size 26347 diff --git a/data/2025/2504_06xxx/2504.06261/images/6a779138c58690d02c893b818651d0308190f4c66aaecc1e51400234f1b70318.jpg b/data/2025/2504_06xxx/2504.06261/images/6a779138c58690d02c893b818651d0308190f4c66aaecc1e51400234f1b70318.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7a03fd180636434e6951d314b3435559e51a3b8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/6a779138c58690d02c893b818651d0308190f4c66aaecc1e51400234f1b70318.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4bd76af109fec0387ae9d764d0b78f77b00c4ddb483b8f5dac9d0a202f5691e +size 25297 diff --git a/data/2025/2504_06xxx/2504.06261/images/72c4fd755084b9941f7025b448c7e727f178b52396d4d427f51f9dcdf6edc127.jpg b/data/2025/2504_06xxx/2504.06261/images/72c4fd755084b9941f7025b448c7e727f178b52396d4d427f51f9dcdf6edc127.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dbdfb4e545e6b399aa7ed1419e1ae19b0c40d4fe --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/72c4fd755084b9941f7025b448c7e727f178b52396d4d427f51f9dcdf6edc127.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d33fe903c089c7e15a8d0cf6dd1c56550c3161c689252af6a67439234743f008 +size 25950 diff --git a/data/2025/2504_06xxx/2504.06261/images/7505f7768dcfa922d898d6b01ba172187748b420cf06823a1f2df0b1c1f84199.jpg b/data/2025/2504_06xxx/2504.06261/images/7505f7768dcfa922d898d6b01ba172187748b420cf06823a1f2df0b1c1f84199.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff2e7407409f9fe2a9155a34fa9f7a3a8da3c983 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/7505f7768dcfa922d898d6b01ba172187748b420cf06823a1f2df0b1c1f84199.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6eb42b9bd7c7ccca8649e4e0b4539d95319c84c7631510804159195ed935200 +size 22452 diff --git a/data/2025/2504_06xxx/2504.06261/images/7bef6240269f72ddae95f0a61674229330539fb1d8841cbb8850064c86ef0bde.jpg b/data/2025/2504_06xxx/2504.06261/images/7bef6240269f72ddae95f0a61674229330539fb1d8841cbb8850064c86ef0bde.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac00f36917378bae2f5659f8081f1bad1950a3cb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/7bef6240269f72ddae95f0a61674229330539fb1d8841cbb8850064c86ef0bde.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87f35a8f002db7dd9d47891ef71a3be9e728193e5a533081c06f168977845af1 +size 26776 diff --git a/data/2025/2504_06xxx/2504.06261/images/8347a312d3cd1bb376b6935227e6d6cb5ade8c972725f3a5f84c73d48039e3ad.jpg b/data/2025/2504_06xxx/2504.06261/images/8347a312d3cd1bb376b6935227e6d6cb5ade8c972725f3a5f84c73d48039e3ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f62856319c302ec9e75bba9487cf4ba23e0e822 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/8347a312d3cd1bb376b6935227e6d6cb5ade8c972725f3a5f84c73d48039e3ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:848a00ea15202a08ca30af9e4bf88e9af51b5bf80f5db2b42c7e2489ed57384f +size 30279 diff --git a/data/2025/2504_06xxx/2504.06261/images/84df23ce36c50bad4a89ba3ea9bcd7a44a43add14223fcf9bad1e4912fd3b8e0.jpg b/data/2025/2504_06xxx/2504.06261/images/84df23ce36c50bad4a89ba3ea9bcd7a44a43add14223fcf9bad1e4912fd3b8e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b78886ad34ac3b50356c00ea30ff538da27a9203 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/84df23ce36c50bad4a89ba3ea9bcd7a44a43add14223fcf9bad1e4912fd3b8e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e136ad31c9d708f1055ec51dfcfd890499f6c3f7e9bea889b43c042815ef3306 +size 26409 diff --git a/data/2025/2504_06xxx/2504.06261/images/87afce25f48da198586ae0a3f58c3eb5bdf6359f3e953d8886e12b86198e5e45.jpg b/data/2025/2504_06xxx/2504.06261/images/87afce25f48da198586ae0a3f58c3eb5bdf6359f3e953d8886e12b86198e5e45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..224dba24c6e2b2a75ee42784f9381efcc6565e8d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/87afce25f48da198586ae0a3f58c3eb5bdf6359f3e953d8886e12b86198e5e45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4a2e67b9938a809f4328b8e2e60980721236428f9a165af4088df717839e610 +size 29988 diff --git a/data/2025/2504_06xxx/2504.06261/images/8ab8a335a650e9496769486045930364b378c45bba5dd6b33a67a20be2c7c767.jpg b/data/2025/2504_06xxx/2504.06261/images/8ab8a335a650e9496769486045930364b378c45bba5dd6b33a67a20be2c7c767.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b016e54484c8de3bf54398902640f11566734f94 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/8ab8a335a650e9496769486045930364b378c45bba5dd6b33a67a20be2c7c767.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8e787f5e339ecd8186932db5081cba21f664b33ca757b5f6c32f0d9f6822038 +size 27474 diff --git a/data/2025/2504_06xxx/2504.06261/images/8b61669f79eb12a756ed3fbf30bb6a99a471fca8098a8f4ae7e84ee1779300ba.jpg b/data/2025/2504_06xxx/2504.06261/images/8b61669f79eb12a756ed3fbf30bb6a99a471fca8098a8f4ae7e84ee1779300ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9da11e7d66909f1c957b02c0ef126038648f72b6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/8b61669f79eb12a756ed3fbf30bb6a99a471fca8098a8f4ae7e84ee1779300ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da579829795b260d276ae62e4e103a1dc4df9e33d11b567dc23fa1ec1e2a0df7 +size 25643 diff --git a/data/2025/2504_06xxx/2504.06261/images/8bec21499eb610041b5b9e65ad38946ef73ee9c2e166ed1f1df365336ebe3b73.jpg b/data/2025/2504_06xxx/2504.06261/images/8bec21499eb610041b5b9e65ad38946ef73ee9c2e166ed1f1df365336ebe3b73.jpg new file mode 100644 index 0000000000000000000000000000000000000000..da22ce21ff4d0be468db9a6b6b9d5935c3b3cf89 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/8bec21499eb610041b5b9e65ad38946ef73ee9c2e166ed1f1df365336ebe3b73.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f98efe915d54d1b8b92d3e1bc0eafa8c748210fc6108df9b7f099cf2c206d9e2 +size 20208 diff --git a/data/2025/2504_06xxx/2504.06261/images/8cfb2a49dca00b94622db5fcca1ae9ead62991a85ae67c1c9b2929a26da82f88.jpg b/data/2025/2504_06xxx/2504.06261/images/8cfb2a49dca00b94622db5fcca1ae9ead62991a85ae67c1c9b2929a26da82f88.jpg new file mode 100644 index 0000000000000000000000000000000000000000..712a94d5f6fc584c717bf1e159bb603a57cf6e43 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/8cfb2a49dca00b94622db5fcca1ae9ead62991a85ae67c1c9b2929a26da82f88.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6ccfa6e21a3c0247b68b1223dd0598f529b4e6d09cdbc13d0d7ae8ae5efbdb9 +size 60370 diff --git a/data/2025/2504_06xxx/2504.06261/images/95f302d1d13a8c6c86e2b7cf3e4be7afbd7c3e00e98f2e025d3ddd2173fc424a.jpg b/data/2025/2504_06xxx/2504.06261/images/95f302d1d13a8c6c86e2b7cf3e4be7afbd7c3e00e98f2e025d3ddd2173fc424a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa186f1f70a18a8e4f752a553bac7854ab2a5528 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/95f302d1d13a8c6c86e2b7cf3e4be7afbd7c3e00e98f2e025d3ddd2173fc424a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f027726722246aa40a5feb1937e3cb40fe53ff5259ceac132c5d8c05a775ed0b +size 30127 diff --git a/data/2025/2504_06xxx/2504.06261/images/96a8bf3d11b9e1348eeac758c1f0046b569b33f38e30ee6be4d5da6b40136c19.jpg b/data/2025/2504_06xxx/2504.06261/images/96a8bf3d11b9e1348eeac758c1f0046b569b33f38e30ee6be4d5da6b40136c19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b4e62505404863dba301767e56d420e925be2ad --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/96a8bf3d11b9e1348eeac758c1f0046b569b33f38e30ee6be4d5da6b40136c19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:958eaadb2e7d0d46b1e45a9e306479a9a96ccbd9bd4759ae5a3e2d3b8d3b7c46 +size 27956 diff --git a/data/2025/2504_06xxx/2504.06261/images/99e434b949d0cbcd9e763cd8a74a9aabc94127e4bb16528fc410ede6861a8804.jpg b/data/2025/2504_06xxx/2504.06261/images/99e434b949d0cbcd9e763cd8a74a9aabc94127e4bb16528fc410ede6861a8804.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1dcf7caef90ca06509ee6caae01234484a6c54b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/99e434b949d0cbcd9e763cd8a74a9aabc94127e4bb16528fc410ede6861a8804.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce2a532c5709766a83a46c711507d5201b056e903e781508d756efde7fe8f0c9 +size 27073 diff --git a/data/2025/2504_06xxx/2504.06261/images/9fb8652c51dc0c6d99a0e37d7a48674b3f47c670070a4ead96f3d66cb29b09d3.jpg b/data/2025/2504_06xxx/2504.06261/images/9fb8652c51dc0c6d99a0e37d7a48674b3f47c670070a4ead96f3d66cb29b09d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a64ad4e4c6ed4e8ee543fc967b202b10a90665f8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/9fb8652c51dc0c6d99a0e37d7a48674b3f47c670070a4ead96f3d66cb29b09d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56a7a28f1e661214b3702691b5d03cf047b6c205a17ee099810e8ebd0ca6ee46 +size 16912 diff --git a/data/2025/2504_06xxx/2504.06261/images/a315f11d3d59643d5387cded9470d575ca7f20a13d9e7735ba774ac67c0cdbc5.jpg b/data/2025/2504_06xxx/2504.06261/images/a315f11d3d59643d5387cded9470d575ca7f20a13d9e7735ba774ac67c0cdbc5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2fea95240b479aa25f5d85ee8afcbdfbf20aa59 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/a315f11d3d59643d5387cded9470d575ca7f20a13d9e7735ba774ac67c0cdbc5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2876e992afbdd4fc22c0822d1cd5f4b34a18e1434d59fd1320de362512181456 +size 28281 diff --git a/data/2025/2504_06xxx/2504.06261/images/abdc0a516b9ee43251a53d1bf6316e7463096fd5fd17654166af50c43ddb24e6.jpg b/data/2025/2504_06xxx/2504.06261/images/abdc0a516b9ee43251a53d1bf6316e7463096fd5fd17654166af50c43ddb24e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..920657b24e1f5914e6a7a439db75c5a1091e20d7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/abdc0a516b9ee43251a53d1bf6316e7463096fd5fd17654166af50c43ddb24e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d88b5d8747f6e5aee29216793ad56025d06c9a7dbe8abe7ca95525f04ccf91d9 +size 31369 diff --git a/data/2025/2504_06xxx/2504.06261/images/b0b7f72f4defa7bc1d8736b28449f49575d7a4e8f3b18755242e8310ce609be6.jpg b/data/2025/2504_06xxx/2504.06261/images/b0b7f72f4defa7bc1d8736b28449f49575d7a4e8f3b18755242e8310ce609be6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc3c7ff8954863482ee03e91117e624249c790d2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/b0b7f72f4defa7bc1d8736b28449f49575d7a4e8f3b18755242e8310ce609be6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83dc0f63b455fdcf9492229def18cb8e1f3e5fe09df6effedf0187c7dc350a9f +size 26579 diff --git a/data/2025/2504_06xxx/2504.06261/images/b0fb21bc8d03a479f6b8d9299463da795b537d1f95adafb958aefe06db96457c.jpg b/data/2025/2504_06xxx/2504.06261/images/b0fb21bc8d03a479f6b8d9299463da795b537d1f95adafb958aefe06db96457c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6de131543f0fcacf19871047a435e55ca3595a2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/b0fb21bc8d03a479f6b8d9299463da795b537d1f95adafb958aefe06db96457c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:506eee32bb9de04f8a50f725738584bced83d9da34764f30cea8f889b2d051d2 +size 26787 diff --git a/data/2025/2504_06xxx/2504.06261/images/b2c67363b0a157ab5ff662b92772e5587893fcb30bebd2182df2a561bc5a3043.jpg b/data/2025/2504_06xxx/2504.06261/images/b2c67363b0a157ab5ff662b92772e5587893fcb30bebd2182df2a561bc5a3043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40c4d4917a03c833721912b70bbae5a5ff9b837b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/b2c67363b0a157ab5ff662b92772e5587893fcb30bebd2182df2a561bc5a3043.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:665bb7bba49c04b68d3a2d75a76fae0ac05a9bcd051690de2b168f106a1050e5 +size 12022 diff --git a/data/2025/2504_06xxx/2504.06261/images/b6706799f0df1d0a77bc26bd71e833f2341b050a4b9159bf4de7f0093bdfd166.jpg b/data/2025/2504_06xxx/2504.06261/images/b6706799f0df1d0a77bc26bd71e833f2341b050a4b9159bf4de7f0093bdfd166.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e26755fe2e39590ba5d01d7eddc37b08e228f56 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/b6706799f0df1d0a77bc26bd71e833f2341b050a4b9159bf4de7f0093bdfd166.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:197eb58867f0d7e005d984921129d14188059da22bf1256197de4e0167294b1a +size 29083 diff --git a/data/2025/2504_06xxx/2504.06261/images/b721ccc908c8da57e84a6f91f59c3ba54f2a1133372a1fa4ff1dc4010a7980ce.jpg b/data/2025/2504_06xxx/2504.06261/images/b721ccc908c8da57e84a6f91f59c3ba54f2a1133372a1fa4ff1dc4010a7980ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f81744a2da102be2b6c4f1c97685ba8af959b13 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/b721ccc908c8da57e84a6f91f59c3ba54f2a1133372a1fa4ff1dc4010a7980ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3380cf726949ca9bbb60747cd86a7a6f709539ae8e52eaa863f78bbc9df2767d +size 26142 diff --git a/data/2025/2504_06xxx/2504.06261/images/c428b96323c4fc03a26afef9fe9b57ff6ae44eea4fc284d28e76d1cf6edf531f.jpg b/data/2025/2504_06xxx/2504.06261/images/c428b96323c4fc03a26afef9fe9b57ff6ae44eea4fc284d28e76d1cf6edf531f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6dbc873588af2fe2f80a5718c211dbf625ea4f86 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/c428b96323c4fc03a26afef9fe9b57ff6ae44eea4fc284d28e76d1cf6edf531f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d44d26691ead42a75c5b187a04e73ad585af0e9bc2dce29bd07e613917fe2c08 +size 26970 diff --git a/data/2025/2504_06xxx/2504.06261/images/c4ad99c5ac0bb20905ed13f3eeadaa110f7c34e161bde288d090379f7fe47220.jpg b/data/2025/2504_06xxx/2504.06261/images/c4ad99c5ac0bb20905ed13f3eeadaa110f7c34e161bde288d090379f7fe47220.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0be2d1e7d08b983e5c3e4308d7c19653c0be6307 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/c4ad99c5ac0bb20905ed13f3eeadaa110f7c34e161bde288d090379f7fe47220.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac122ff890a1034570032ad77107506173938c442dcde210f2ae0e4ac6fdbc72 +size 23990 diff --git a/data/2025/2504_06xxx/2504.06261/images/cb9b80b20b8d089610263159f0b7e1fa85c41bb4f6784a05e8af081ac5a540b5.jpg b/data/2025/2504_06xxx/2504.06261/images/cb9b80b20b8d089610263159f0b7e1fa85c41bb4f6784a05e8af081ac5a540b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee5b510c785333cddb2328c3b719dbd156762cca --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/cb9b80b20b8d089610263159f0b7e1fa85c41bb4f6784a05e8af081ac5a540b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f466231af099fd355a9b5f6881ed48f70bd5390c790769966dbf76379ff051d2 +size 27974 diff --git a/data/2025/2504_06xxx/2504.06261/images/cc79e0adf2594606e76f0f89c7b7b43453bf8c674b16cba49604007dab0a6453.jpg b/data/2025/2504_06xxx/2504.06261/images/cc79e0adf2594606e76f0f89c7b7b43453bf8c674b16cba49604007dab0a6453.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4c8d61b7cd5ef96f08194392ecd36f7361c3f69 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/cc79e0adf2594606e76f0f89c7b7b43453bf8c674b16cba49604007dab0a6453.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55cdc097eb1a02cfad9ba893961ccb6f9863ac00c06951445872e1173534b84b +size 17504 diff --git a/data/2025/2504_06xxx/2504.06261/images/cd5a9137eaa9ed2e13d5412d81fb8636cfab4d520ce8867d2e709303435b0785.jpg b/data/2025/2504_06xxx/2504.06261/images/cd5a9137eaa9ed2e13d5412d81fb8636cfab4d520ce8867d2e709303435b0785.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e382d35f811cf32a69f6c7e983ffa6f72fcbd1c6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/cd5a9137eaa9ed2e13d5412d81fb8636cfab4d520ce8867d2e709303435b0785.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99cafcbf0b6dbd5de9b4fe7a2555c119ec6b904a8aa48d7f2d9a197a265e1d19 +size 33347 diff --git a/data/2025/2504_06xxx/2504.06261/images/cebcb85b2637b93486276dde1d8c5f47aadef4c16b22d77e01bfa66c24d053f1.jpg b/data/2025/2504_06xxx/2504.06261/images/cebcb85b2637b93486276dde1d8c5f47aadef4c16b22d77e01bfa66c24d053f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7d697796ad24d76943c03a8138424d4f9405d31 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/cebcb85b2637b93486276dde1d8c5f47aadef4c16b22d77e01bfa66c24d053f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73e817b658f5be10be5e9a5e33ac8d4344b7613ea1406d432e810252decbdfa6 +size 26903 diff --git a/data/2025/2504_06xxx/2504.06261/images/d0f2ee09338de737c6a5456c25214178f3d7f3297d1a372caf56c8c8f863a93a.jpg b/data/2025/2504_06xxx/2504.06261/images/d0f2ee09338de737c6a5456c25214178f3d7f3297d1a372caf56c8c8f863a93a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eeb75505a9c29a5f415fffb1f7ca60a241ee558c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/d0f2ee09338de737c6a5456c25214178f3d7f3297d1a372caf56c8c8f863a93a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8886702ddb709449090fa2e2665c84cdea7cf1fc5b8468a3cd4c882929a5508a +size 25957 diff --git a/data/2025/2504_06xxx/2504.06261/images/d1f96afd9d33e5508f5678c3d6a6571f827194b230ae319f10f35d7af3906029.jpg b/data/2025/2504_06xxx/2504.06261/images/d1f96afd9d33e5508f5678c3d6a6571f827194b230ae319f10f35d7af3906029.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70919a48c08ac032c412a9b32a6334dbf3b644b4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/d1f96afd9d33e5508f5678c3d6a6571f827194b230ae319f10f35d7af3906029.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfb8a76d89e88f88d5b256a4dcc904a1ce320a5c80270510688f650f98616404 +size 31191 diff --git a/data/2025/2504_06xxx/2504.06261/images/dd60c86c54a1069c84d8f88fc117e8b92c39cad420269a63d1fe822d7e16aa01.jpg b/data/2025/2504_06xxx/2504.06261/images/dd60c86c54a1069c84d8f88fc117e8b92c39cad420269a63d1fe822d7e16aa01.jpg new file mode 100644 index 0000000000000000000000000000000000000000..374bdf87dcbf9203daa1db6b7f7d70788410284f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/dd60c86c54a1069c84d8f88fc117e8b92c39cad420269a63d1fe822d7e16aa01.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba593016619ad9100b8627b1bf4fa9be00b6a75c87a8683e284325a43043f62c +size 25784 diff --git a/data/2025/2504_06xxx/2504.06261/images/e58060fbc8d7b6c3153babd8a6ea141006df62184cc2952ee69c5725ef99fff9.jpg b/data/2025/2504_06xxx/2504.06261/images/e58060fbc8d7b6c3153babd8a6ea141006df62184cc2952ee69c5725ef99fff9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e9b88d1c01b92b1b708e2fcc6243aed7661900a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/e58060fbc8d7b6c3153babd8a6ea141006df62184cc2952ee69c5725ef99fff9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:279838ecee8f416e942428ee0fbb58fa77fefd3ef02e7dd0a9b25fd7dc050fa4 +size 35376 diff --git a/data/2025/2504_06xxx/2504.06261/images/f0209aef2837c6968d7da96be40be0a43e35806305771cb01ca228315c6b45f8.jpg b/data/2025/2504_06xxx/2504.06261/images/f0209aef2837c6968d7da96be40be0a43e35806305771cb01ca228315c6b45f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52f7eede675a981511d86dfdbcb757d0577152af --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/f0209aef2837c6968d7da96be40be0a43e35806305771cb01ca228315c6b45f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be222f213c4d538d6b26066537a7af94970e4eaa998caec19e104b2344c4efed +size 27379 diff --git a/data/2025/2504_06xxx/2504.06261/images/f66dea94426dd2a9fdd437283ff40594986cf9239a2905077493f34a0dc26501.jpg b/data/2025/2504_06xxx/2504.06261/images/f66dea94426dd2a9fdd437283ff40594986cf9239a2905077493f34a0dc26501.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0bc63eb37987ae84949adc0f011c281490a37ca --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/f66dea94426dd2a9fdd437283ff40594986cf9239a2905077493f34a0dc26501.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56783e30095b4e72ac99054c4fca3ef47c07c65d893eea9620952c25f6842244 +size 28341 diff --git a/data/2025/2504_06xxx/2504.06261/images/facf7a5e13928be1d0e4bd20c8fe4373b6452ff23d4b026fc5341612411e6b28.jpg b/data/2025/2504_06xxx/2504.06261/images/facf7a5e13928be1d0e4bd20c8fe4373b6452ff23d4b026fc5341612411e6b28.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4477785151af243320de61f9d87c3057468ae5e7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/facf7a5e13928be1d0e4bd20c8fe4373b6452ff23d4b026fc5341612411e6b28.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a0ac46899659d468ee2c9788106aaebce74497cb64d76015d215904c873cc6a +size 16344 diff --git a/data/2025/2504_06xxx/2504.06261/images/fc79ddd12c709f66bb3221cb918ba15b69a77353de1d3b73d2233cdf4c707cf5.jpg b/data/2025/2504_06xxx/2504.06261/images/fc79ddd12c709f66bb3221cb918ba15b69a77353de1d3b73d2233cdf4c707cf5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ae9bbd0d771403665a670899f64164d41e41485 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/fc79ddd12c709f66bb3221cb918ba15b69a77353de1d3b73d2233cdf4c707cf5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03097e1b282e2ee2e8fa081ed627cb105a3c7016d85d1ae59c0d9cac91e9ae8c +size 27290 diff --git a/data/2025/2504_06xxx/2504.06261/images/fde24df3ee61d9f9607eeda1caf684606ee9eae1c08e1523c2d7c3c8a1853a17.jpg b/data/2025/2504_06xxx/2504.06261/images/fde24df3ee61d9f9607eeda1caf684606ee9eae1c08e1523c2d7c3c8a1853a17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..350ee1cc746cbf0050ccbb206dc4dc17cc1d5884 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/images/fde24df3ee61d9f9607eeda1caf684606ee9eae1c08e1523c2d7c3c8a1853a17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bc9c37a41ba66377c156e9a91965fbd0769c87c9fe8f5b1487f7eba5ad815ab +size 26767 diff --git a/data/2025/2504_06xxx/2504.06261/layout.json b/data/2025/2504_06xxx/2504.06261/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ef4e33b659d0f2880d8c7e758bbb099540a1724d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06261/layout.json @@ -0,0 +1,21914 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 115, + 97, + 496, + 136 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 97, + 496, + 136 + ], + "spans": [ + { + "bbox": [ + 115, + 97, + 496, + 136 + ], + "type": "text", + "content": "Hogwild! Inference: Parallel LLM Generation via Concurrent Attention" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 107, + 173, + 501, + 247 + ], + "blocks": [ + { + "bbox": [ + 107, + 173, + 501, + 247 + ], + "lines": [ + { + "bbox": [ + 107, + 173, + 501, + 247 + ], + "spans": [ + { + "bbox": [ + 107, + 173, + 501, + 247 + ], + "type": "table", + "html": "
Gleb Rodionov†* YandexRoman Garipov* HSE University YandexAlina Shutova* HSE University YandexGeorge Yakushev* HSE University YandexErik Schultheis* IST Austria
Vage Egiazarian IST AustriaAnton Sinitsin YandexDenis Kuznedev YandexDan Alistarh‡ IST Austria
", + "image_path": "2a5c487c953a0f66fdfd07ab66310d6ead70ff7461466ea60acdc372467058a1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 281, + 268, + 329, + 281 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 268, + 329, + 281 + ], + "spans": [ + { + "bbox": [ + 281, + 268, + 329, + 281 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 292, + 471, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 292, + 471, + 523 + ], + "spans": [ + { + "bbox": [ + 140, + 292, + 471, + 523 + ], + "type": "text", + "content": "Large Language Models (LLMs) have demonstrated the ability to tackle increasingly complex tasks through advanced reasoning, long-form content generation, and tool use. Solving these tasks often involves long inference-time computations. In human problem solving, a common strategy to expedite work is collaboration: by dividing the problem into sub-tasks, exploring different strategies concurrently, etc. Recent research has shown that LLMs can also operate in parallel by implementing explicit cooperation frameworks, such as voting mechanisms or the explicit creation of independent sub-tasks that can be executed in parallel. However, each of these frameworks may not be suitable for all types of tasks, which can hinder their applicability. In this work, we propose a different design approach: we run LLM \"workers\" in parallel, allowing them to synchronize via a concurrently-updated attention cache and prompt these workers to decide how best to collaborate. Our approach allows the LLM instances to come up with their own collaboration strategy for the problem at hand, all the while \"seeing\" each other's memory in the concurrent KV cache. We implement this approach via Hogwild! Inference: a parallel LLM inference engine where multiple instances of the same LLM run in parallel with the same attention cache, with \"instant\" access to each other's memory.1 Hogwild! Inference takes advantage of Rotary Position Embeddings (RoPE) to avoid recomputation while improving parallel hardware utilization. We find that modern reasoning-capable LLMs can perform inference with shared Key-Value cache out of the box, without additional fine-tuning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 537, + 192, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 537, + 192, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 537, + 192, + 550 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 556, + 506, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 556, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 104, + 556, + 506, + 657 + ], + "type": "text", + "content": "Many recent advancements of Large Language Models can be attributed to their ability to perform inference-time computations to improve performance [Suzgun et al., 2022, Snell et al., 2024, Beeching et al., Muennighoff et al., 2025]. This includes chain-of-thought (CoT) reasoning [Wei et al., 2022, Kojima et al., 2022, Zhang et al., 2022, Yao et al., 2023, Lightman et al., 2023], long-form generation [Bai et al., 2024] and interacting with external tools [Schick et al., 2023, Qin et al., 2023, Yao et al., 2022, Shen et al., 2023]. Popular LLM-based services have capabilities for reasoning and tool use [OpenAI et al., 2024, Google DeepMind, 2025, Anthropic, 2024]. At the same time, several reasoning-capable open-access LLMs have recently been released to the public [DeepSeek-AI et al., 2025, Qwen Team, 2025, Yang et al., 2024, Muennighoff et al., 2025, Ye et al., 2025]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 660, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 660, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 660, + 506, + 696 + ], + "type": "text", + "content": "Using these models to solve complex problems often requires long sequential computations, that is, generating text token-by-token. However, many reasoning problems are not sequential. Leveraging this intuition, several recent works propose parallel inference strategies that allow multiple LLMs" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 217, + 34, + 571 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 217, + 34, + 571 + ], + "spans": [ + { + "bbox": [ + 13, + 217, + 34, + 571 + ], + "type": "text", + "content": "arXiv:2504.06261v4 [cs.LG] 17 Nov 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 701, + 423, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 701, + 423, + 712 + ], + "spans": [ + { + "bbox": [ + 116, + 701, + 423, + 712 + ], + "type": "text", + "content": "1Our implementation is available at https://github.com/eqimp/hogwild_11m." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 118, + 712, + 490, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 712, + 490, + 723 + ], + "spans": [ + { + "bbox": [ + 118, + 712, + 490, + 723 + ], + "type": "text", + "content": "†Corresponding author: rodionovgleb@yandex-team.ru. * Equal contribution. ‡ Senior author." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 731, + 385, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 731, + 385, + 742 + ], + "spans": [ + { + "bbox": [ + 104, + 731, + 385, + 742 + ], + "type": "text", + "content": "39th Conference on Neural Information Processing Systems (NeurIPS 2025)." + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 32, + 478, + 210 + ], + "blocks": [ + { + "bbox": [ + 136, + 32, + 478, + 210 + ], + "lines": [ + { + "bbox": [ + 136, + 32, + 478, + 210 + ], + "spans": [ + { + "bbox": [ + 136, + 32, + 478, + 210 + ], + "type": "image", + "image_path": "8cfb2a49dca00b94622db5fcca1ae9ead62991a85ae67c1c9b2929a26da82f88.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 216, + 504, + 240 + ], + "lines": [ + { + "bbox": [ + 104, + 216, + 504, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 504, + 240 + ], + "type": "text", + "content": "Figure 1: An intuitive explanation of Hogwild! Inference, with 2 workers generating in parallel and 3 shared cache blocks. Each color denotes a cache block. See it in action (example generation)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 244, + 506, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 244, + 506, + 323 + ], + "spans": [ + { + "bbox": [ + 104, + 244, + 506, + 323 + ], + "type": "text", + "content": "to solve a problem faster or more accurately via some form of collaboration [Wang et al., 2022, Ning et al., 2024]. In the simplest case, multiple LLMs can attempt the problem independently, then vote [Wang et al., 2022] or cross-reference their results [Du et al., 2023, Wang et al., 2024a] to improve correctness. A parallel line of work allows the LLM to divide the problem into multiple independent sub-tasks that are then solved in parallel and merged, producing the final solution [Ning et al., 2024, Kim et al., 2024, Jin et al., 2025]. These parallel inference strategies can improve quality and efficiency, taking advantage of parallelism in modern hardware." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 327, + 504, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 504, + 405 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 504, + 405 + ], + "type": "text", + "content": "Unfortunately, no single collaboration strategy is universally effective. For instance, solving a problem in independent parallel \"threads\" can be inefficient when one of the threads requires a longer generation than the rest, resulting in most of the agents waiting for a straggler and wasting compute [Wang et al., 2022, 2024a]. In turn, inference with independent sub-tasks only works if the problem can immediately be split into these sub-tasks. Furthermore, if one of the agents discovers that the original plan is flawed, they will be unable to re-plan [Ning et al., 2024, Ding et al., 2025], potentially solving sub-tasks that are no longer necessary [Jin et al., 2025]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 408, + 506, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 408, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 408, + 506, + 464 + ], + "type": "text", + "content": "This runs contrary to how humans collaborate. Instead of strict adherence to a fixed collaboration strategy, we often collaborate more dynamically, re-planning on the fly, abandoning some tasks half-way and switching to a more promising approach, discussing or debating strategy if the initial plan failed. While this type of collaboration is harder to define, it offers greater flexibility and can be more efficient if the participants are sufficiently cohesive [Hutchins, 1995, Entin and Serfaty, 1999]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 468, + 504, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 468, + 504, + 514 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 504, + 514 + ], + "type": "text", + "content": "Our Approach. In this work, we try to apply the same principle to artificial reasoners. Since modern LLMs can already reason and plan [Zhou et al., 2024, Gao et al., 2024, Wang et al., 2024c], we hypothesize that they can benefit from dynamic interaction between different instances, during which they can develop their own collaboration strategy for the problem at hand." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 516, + 504, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 504, + 583 + ], + "type": "text", + "content": "To test this hypothesis, we propose Hogwild! Inference — a parallel LLM inference protocol with no pre-defined framework for collaboration. Instead of choosing how LLMs should interact ahead of time, we allow them to generate tokens in parallel and \"see\" each other's progress (tokens) immediately as they are generated. We then prompt the LLM \"workers\" to decide their next course of action by themselves, given the latest actions from others: whether this means solving parallel sub-tasks, cross-verifying each other, discussing strategy, or pivoting to a new plan." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 585, + 504, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 585, + 504, + 652 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 504, + 652 + ], + "type": "text", + "content": "To enable this type of on-the-fly collaboration, Hogwild! Inference runs multiple LLM instances with the same weights, but with a custom Key-Value cache that shares token representations between workers, allowing concurrent cross-attention. Specifically, instead of re-computing Key-Value representations for each worker, we keep track of individual worker KV memories and \"stitch them together\" in different orders, by adjusting their positional embeddings (see Figure 1). Moreover, we provide an efficient implementation of this inference approach." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 655, + 504, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 504, + 701 + ], + "type": "text", + "content": "We test Hogwild! Inference with modern open-source LLMs and find that existing reasoning-capable models—such as QwQ [Qwen Team, 2025] and DeepSeek-R1 [DeepSeek-AI et al., 2025]—can already \"reason to coordinate\". More concretely, we observe that concurrent agents can formulate and follow plans, adapt when the initial plan has failed, point out each other's errors, and use each other's" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "text", + "content": "Our approach inspired by Hogwild! SGD [Recht et al., 2011] that runs updates asynchronously and applies each update as soon as it is computed. The exclamation mark is part of the original name [Stanford HAI, 2023]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "content": "key observations. When prompted to check if they are doing redundant work – e.g., when one LLM instance is doing a sub-task that is already done by another, or solving a problem that is no longer relevant — they can often (but not always) detect redundancy and change strategy. In summary, our results suggest that parallel inference with a shared Key-Value cache may offer a promising approach to enable effective and efficient collaboration between multiple LLM instances." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 137, + 189, + 149 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 137, + 189, + 149 + ], + "spans": [ + { + "bbox": [ + 105, + 137, + 189, + 149 + ], + "type": "text", + "content": "2 Background" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 154, + 504, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 504, + 187 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 504, + 187 + ], + "type": "text", + "content": "Recent works propose a large number of frameworks for parallel reasoning and tool use that vary across several axes: how the parallel instances are organized together, what they exchange, and how often [Zhang et al., 2025]. In this section, we give a brief summary of these methods." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 192, + 506, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 192, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 506, + 270 + ], + "type": "text", + "content": "Discussion & aggregation. The simplest way to parallelize chain-of-thought reasoning is Self-Consistency [Wang et al., 2022], where multiple LLM instances reason independently, then vote on the final answer. This approach was later extended in Du et al. [2023], replacing majority voting with text-based communication rounds. Subsequent works in this field combine multiple LLM types [Wang et al., 2024a] and scales to more agents Li et al. [2024a]. Another line of work introduces specialized \"roles\" such as the Debugger [Talebirad and Nadiri, 2023], Examiner [Cohen et al., 2023], Math Teacher [Kong et al., 2024], Judge [Chen et al., 2024], and others, to further augment reasoning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 274, + 506, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 506, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 506, + 363 + ], + "type": "text", + "content": "This type of role-based discussion was shown to greatly improve LLM reasoning factuality for certain tasks [Wang et al., 2022, Du et al., 2023], and can even enable multiple weaker LLM agents to collectively outperform state-of-the-art single-agent systems [Wang et al., 2024a]. However, this improvement is not unique to multiple agents and can be offset with better single-agent prompting [Wang et al., 2024b, Muennighoff et al., 2025]. Additionally, these approaches do not necessarily accelerate reasoning, because at least some of the agents have to solve the entire problem sequentially, and process (re-encode) each other's progress. This creates additional computational overhead, which presents challenges for both runtime and memory efficiency Wang et al. [2024a], Du et al. [2023]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 366, + 506, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 366, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 366, + 506, + 456 + ], + "type": "text", + "content": "Parallelism for efficiency. A different line of work leverages multiple LLMs to solve tasks faster in parallel, such as Skeleton-of-Thought (SoT) [Ning et al., 2024]. SoT begins by running a single LLM to outline a plan for solving the problem with independent sub-tasks, then launches parallel LLM instances for each sub-task. For problems that involve function calling, these functions can also run in parallel [Kim et al., 2024, Gim et al., 2024]. Subsequent works propose more complex parallelism strategies such as dynamic parallel tree search [Ding et al., 2025] or a single agent spawning asynchronous sub-tasks that are done by background LLM \"threads\" [Jin et al., 2025, Liu et al., 2024b, Pan et al., 2025], achieved with specialized fine-tuning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 460, + 506, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 506, + 615 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 506, + 615 + ], + "type": "text", + "content": "These techniques are known to substantially accelerate inference for problems that fit their type of parallelism. However, we argue that this is also their main limitation: by imposing a specific parallelism strategy, these methods can harm reasoning for problems that do not fit their framework. For instance, when solving a complex reasoning problem, it is often the case that the initial plan turns out to be wrong or incomplete [Muennighoff et al., 2025, DeepSeek-AI et al., 2025], which conflicts with SoT-like methods [Ning et al., 2024, Yu, 2025] that follow a fixed plan-execute-aggregate schedule. Furthermore, some of the sub-tasks may turn out to be more complicated than originally intended and take up more work, which would cause methods like PASTA Jin et al. [2025] to wait for that single task, whereas a more sophisticated reasoner could adjust the plan to work better in parallel. Note that each individual issue can be amended with yet another, more complicated parallelism framework, but the sheer number of such cases makes us doubt whether this is the right approach. In this work, we instead let multiple LLM instances interact without a fixed framework, allowing them to see each other's partial generations to devise (and revise) task-specific collaboration strategy. We show that, perhaps surprisingly, existing reasoning LLMs already have the ability to leverage this." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 621, + 227, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 621, + 227, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 227, + 635 + ], + "type": "text", + "content": "3 Hogwild! Inference" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 639, + 507, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 507, + 727 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 507, + 727 + ], + "type": "text", + "content": "Our main intuition is that modern LLMs do not need a pre-defined framework for inference-time parallelism: they can organize by themselves. To test this hypothesis, we design a parallel inference protocol where multiple LLM instances can collaborate as flexibly as possible. Instead of assigning each \"worker\" to a specific role or sub-task, we run them together and prompt them to collaborate. This approach has two key problems: how to run multiple inference threads from the same Key-Value memory, and how to prompt LLM \"workers\" to collaborate over said memory. We outline how to perform LLM inference with a shared cache in Section 3.1, describe our cache structure in Section 3.2 and prompting strategy in Section 3.3. Finally, Section 3.4 describes the inference algorithm." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 354, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 354, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 354, + 83 + ], + "type": "text", + "content": "3.1 Concurrent Attention with Shared Key-Value Cache" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 86, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 86, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 86, + 504, + 118 + ], + "type": "text", + "content": "The core ingredient of Hogwild! Inference is a shared Key-Value memory (KV cache) accessible to all workers. The cache consists of several blocks that can be reused between workers, implementing a concurrent version of the attention mechanism [Bahdanau et al., 2015, Vaswani, 2017]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 124, + 506, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 124, + 506, + 190 + ], + "spans": [ + { + "bbox": [ + 104, + 124, + 506, + 190 + ], + "type": "text", + "content": "Let us first consider a simple case with two workers and three cache blocks, as depicted in Figure 1. The first block contains the prompt, and the other two blocks contain the tokens generated by workers A and B respectively (denoted Alice and Bob in the Figure). As workers generate new tokens, they access each other's attention caches as though these were their own previously generated tokens. In Figure 1, \"Alice\" sees the common prompt, then \"Bob's\" token representations, then her own. In turn, Bob sees the same common prompt, then Alice's token KVs, and his own tokens after that.3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 194, + 504, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 504, + 239 + ], + "type": "text", + "content": "This creates a discrepancy where the same Key-Value pairs appear at different positions for each worker. Furthermore, the relative distance between the same pair of tokens (e.g., first generated tokens from Alice and Bob, respectively) changes as new tokens are added. While it is possible to re-encode these tokens at their new positions, it would cause overhead that scales cubically4." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 243, + 506, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 506, + 310 + ], + "type": "text", + "content": "Instead of re-encoding the new tokens for other workers, we attempt to reuse existing token representations between workers. However, since these tokens appear at different positions for each worker and step, we need to adjust for their positional embeddings. Most modern LLMs use Rotary Position Embeddings (RoPE) [Su et al., 2021], where each key and query is rotated to an angle proportional to its absolute position. Prior works have shown that RoPE embeddings can be manipulated through scaling [Peng et al., 2023] slicing [Xiao et al., 2024], or pruning [Zhang et al., 2023]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 314, + 507, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 507, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 507, + 360 + ], + "type": "text", + "content": "In Hogwild! Inference, we instead shift the KV values, multiplying the entire cache block by a cos / sin values that implement rotation by a constant offset. We use this to arrange the same cache entries in different order for each worker as in Figure 1 (right). This allows both workers to instantly \"see\" each other's tokens while they are generated — and even before they are processed by all layers." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 365, + 201, + 374 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 201, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 201, + 374 + ], + "type": "text", + "content": "3.2 Cache Structure" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 377, + 504, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 377, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 377, + 504, + 422 + ], + "type": "text", + "content": "Now that we defined a way to rearrange cache blocks on the fly, it is reasonable to ask how to arrange these blocks. For short tasks, simply concatenating worker outputs is sufficient. However, as we consider harder problems that require long chains of thought, workers will eventually pay less attention to each other because of the thousands of tokens between their latest steps5." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 426, + 506, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 506, + 504 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 506, + 504 + ], + "type": "text", + "content": "To address this problem, we propose a more sophisticated cache arrangement inspired by group chat rooms. Namely, we split the generated text into reasoning \"steps\", roughly a paragraph in size. Whenever a given worker finishes a paragraph, (e.g. generates " + }, + { + "bbox": [ + 104, + 426, + 506, + 504 + ], + "type": "inline_equation", + "content": "\\backslash n\\backslash n" + }, + { + "bbox": [ + 104, + 426, + 506, + 504 + ], + "type": "text", + "content": "), we move its KV cache to the end of a shared chat-like history and let it generate the next paragraph at the end of that history. Note that workers still see each other's current (unfinished) paragraphs at the end of the shared history as they write them (see Figure 1). This way, workers always see each other's latest updates as recent tokens and can communicate more easily. For each worker " + }, + { + "bbox": [ + 104, + 426, + 506, + 504 + ], + "type": "inline_equation", + "content": "W_{i}" + }, + { + "bbox": [ + 104, + 426, + 506, + 504 + ], + "type": "text", + "content": ", we organize cache blocks as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 506, + 506, + 621 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 105, + 506, + 506, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 506, + 529 + ], + "type": "text", + "content": "- Common Cache: a large KV cache block that stores KV representations for the system prompt, task description, and a history of previous reasoning steps from each agent." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 529, + 504, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 504, + 551 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 504, + 551 + ], + "type": "text", + "content": "- Other workers: multiple smaller cache blocks containing the latest (unfinished) steps of all other workers " + }, + { + "bbox": [ + 105, + 529, + 504, + 551 + ], + "type": "inline_equation", + "content": "W_{j \\neq i}" + }, + { + "bbox": [ + 105, + 529, + 504, + 551 + ], + "type": "text", + "content": " in ascending order. For instance, if there are 4 workers, " + }, + { + "bbox": [ + 105, + 529, + 504, + 551 + ], + "type": "inline_equation", + "content": "W_{2}" + }, + { + "bbox": [ + 105, + 529, + 504, + 551 + ], + "type": "text", + "content": " will see " + }, + { + "bbox": [ + 105, + 529, + 504, + 551 + ], + "type": "inline_equation", + "content": "W_{1} \\oplus W_{3} \\oplus W_{4}" + }, + { + "bbox": [ + 105, + 529, + 504, + 551 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 551, + 504, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 551, + 504, + 621 + ], + "spans": [ + { + "bbox": [ + 104, + 551, + 504, + 621 + ], + "type": "text", + "content": "- Current worker: the latest (unfinished) reasoning step of the current worker " + }, + { + "bbox": [ + 104, + 551, + 504, + 621 + ], + "type": "inline_equation", + "content": "W_{i}" + }, + { + "bbox": [ + 104, + 551, + 504, + 621 + ], + "type": "text", + "content": " to be continued. Each block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\nEach block starts with a new paragraph (\\n\\n" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 624, + 299, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 299, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 299, + 636 + ], + "type": "text", + "content": "3.3 Prompting for Zero-Shot Collaboration" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "text", + "content": "The shared key-value cache inference we described above allows modern LLMs to access each other's tokens and reason collaboratively. However, even though modern LLMs can reason about" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 118, + 669, + 487, + 680 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 669, + 487, + 680 + ], + "spans": [ + { + "bbox": [ + 118, + 669, + 487, + 680 + ], + "type": "text", + "content": "3For clarity of exposition, we choose to anthropomorphize the pronouns for these two LLM instances." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 680, + 505, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 680, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 680, + 505, + 700 + ], + "type": "text", + "content": "4If " + }, + { + "bbox": [ + 104, + 680, + 505, + 700 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 680, + 505, + 700 + ], + "type": "text", + "content": " agents generate one new token each, which is then re-encoded differently for each of these " + }, + { + "bbox": [ + 104, + 680, + 505, + 700 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 680, + 505, + 700 + ], + "type": "text", + "content": " agents, that each have to attend to " + }, + { + "bbox": [ + 104, + 680, + 505, + 700 + ], + "type": "inline_equation", + "content": "O(n)" + }, + { + "bbox": [ + 104, + 680, + 505, + 700 + ], + "type": "text", + "content": " additional tokens, then the total step complexity is " + }, + { + "bbox": [ + 104, + 680, + 505, + 700 + ], + "type": "inline_equation", + "content": "O(n^{3})" + }, + { + "bbox": [ + 104, + 680, + 505, + 700 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 700, + 504, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 721 + ], + "type": "text", + "content": "In other words, if we put all outputs of worker A ahead of worker B, then the more tokens are generated, the farther worker B needs to \"look\" to reach worker A's latest outputs. This could be mitigated with finetuning." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": "how to collaborate, there is no guarantee that they will actually do so unprompted. As with any desired LLM behavior, it can be achieved in two ways: either by training the model to generate tokens collaboratively or by prompting it in-context. In this work, we focus on the latter approach to make Hogwild! Inference easier to generalize for new models. Our prompting consists of two parts:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 121, + 506, + 177 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 105, + 121, + 505, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 505, + 143 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 505, + 143 + ], + "type": "text", + "content": "1. System prompt describes the \"rules\" of the shared cache and suggests that workers collaborate. This prompt goes at the beginning of either the system or user message (if not unsupported);" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 143, + 506, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 143, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 143, + 506, + 177 + ], + "type": "text", + "content": "2. Inserting s1-like collaboration prompts: every thousand generated tokens, we prompt a random worker with \"Wait, am I doing redundant work? (yes/no):\" at the beginning of their next paragraph. This strategy is meant to promote collaboration and is inspired by Muennighoff et al. [2025]." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 180, + 506, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 180, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 180, + 506, + 236 + ], + "type": "text", + "content": "The latter s1-like prompts present a curious case. We found that LLMs fine-tuned on reasoning can often become too \"focused\" on what it is generating currently and fail to notice that another instance has found a mistake or solved their problem earlier. However, when asked directly, they can spot redundancy and change their approach. Overall, we found that when prompted this way, LLMs often (but not always) detect redundancies in their actions and can determine the optimal course of action." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 240, + 208, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 240, + 208, + 251 + ], + "spans": [ + { + "bbox": [ + 105, + 240, + 208, + 251 + ], + "type": "text", + "content": "3.4 Inference Matters" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 255, + 504, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 322 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 322 + ], + "type": "text", + "content": "When generating new tokens with Hogwild! Inference, we perform a forward pass on all workers in parallel, as though they were in the same batch. Instead of each sample having its own attention cache, we allow batch elements to attend to each other's KV caches at different positions. When processing newly generated tokens, we \"insert\" their KV representations at the end of their respective cache blocks, then arrange these cache blocks for each worker. This way both workers can immediately attend to each other's current tokens even before they are fully processed by all layers." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 326, + 505, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 326, + 505, + 393 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 505, + 393 + ], + "type": "text", + "content": "This leads to the following problem: since workers combine cache blocks in different order (see Figure 1), we would need to rotate the cached KVs multiple times, one for each worker. Done naively, this would require rotating all past token representations at every step, which is inefficient for long contexts. Fortunately, this problem can be circumvented using a property of rotation: if both query and key are rotated by the same angle, the dot product between them will not change. Instead of rotating all previous keys, we can rotate current token queries to an equivalent angle (Figure 2)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "text", + "content": "Suppose that a given attention layer needs to compute attention between the current token query " + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "text", + "content": " at position " + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "inline_equation", + "content": "i_q" + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "text", + "content": " (denoted " + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "inline_equation", + "content": "\\rho(q, i_q)" + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "text", + "content": ") and a block of keys rotated to the starting position " + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "inline_equation", + "content": "i_k" + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "text", + "content": ". Instead of rotating keys, we can rotate the query to position " + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "inline_equation", + "content": "i_q - i_k" + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "text", + "content": " and keep the KV cache as is. If there are multiple KV blocks A, B, C (Alice, Bob, Common) that need to be rotated to positions " + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "inline_equation", + "content": "i_k^A, i_k^B, i_k^C" + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "text", + "content": " respectively, we rotate the query " + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 397, + 504, + 455 + ], + "type": "text", + "content": " multiple times for each block. Formally, we can rewrite the attention dot-product:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 460, + 495, + 480 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 460, + 495, + 480 + ], + "spans": [ + { + "bbox": [ + 115, + 460, + 495, + 480 + ], + "type": "interline_equation", + "content": "\\rho (q, i _ {q}) \\Big [ \\rho (A, i _ {k} ^ {A}) \\oplus \\rho (B, i _ {k} ^ {B}) \\oplus \\rho (C, i _ {k} ^ {C}) \\Big ] = \\rho (q, i _ {q} - i _ {k} ^ {A}) A \\oplus \\rho (q, i _ {q} - i _ {k} ^ {B}) B \\oplus \\rho (q, i _ {q} - i _ {k} ^ {C}) C,", + "image_path": "b2c67363b0a157ab5ff662b92772e5587893fcb30bebd2182df2a561bc5a3043.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 483, + 506, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 506, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 506, + 561 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 483, + 506, + 561 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 104, + 483, + 506, + 561 + ], + "type": "text", + "content": " denotes concatenation. The r.h.s. formula only rotates the current step query, i.e. a single token per worker, as opposed to the past KV blocks that can contain thousands or millions of tokens. We use this property to design an efficient implementation of our method based on Flash-Decoding [Dao et al., 2023]. We gather each KV cache block in a contiguous memory buffer and compute attention similarly to Paged Attention [Kwon et al., 2023], where one page would correspond to one cache block and the corresponding query rotations from all workers. This way, we need only one copy of each cache block and do not need to re-rotate its entries (see Appendix B)." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 146, + 570, + 294, + 695 + ], + "blocks": [ + { + "bbox": [ + 146, + 570, + 294, + 695 + ], + "lines": [ + { + "bbox": [ + 146, + 570, + 294, + 695 + ], + "spans": [ + { + "bbox": [ + 146, + 570, + 294, + 695 + ], + "type": "image", + "image_path": "671772a8529123d424f4dc382164719cd30712feab78e67f4d667e021650f8ca.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 304, + 570, + 460, + 700 + ], + "blocks": [ + { + "bbox": [ + 304, + 570, + 460, + 700 + ], + "lines": [ + { + "bbox": [ + 304, + 570, + 460, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 460, + 700 + ], + "type": "image", + "image_path": "7505f7768dcfa922d898d6b01ba172187748b420cf06823a1f2df0b1c1f84199.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 700, + 506, + 734 + ], + "lines": [ + { + "bbox": [ + 104, + 700, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 506, + 734 + ], + "type": "text", + "content": "Figure 2: Intuitive scheme of Hogwild! Inference with query rotation. Colors represent cache blocks. Instead of rotating all cache blocks to align with Alice's and Bob's views, we keep them fixed at the zero position and only rotate the current token queries to equivalent angles." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 112, + 31, + 227, + 124 + ], + "blocks": [ + { + "bbox": [ + 112, + 31, + 227, + 124 + ], + "lines": [ + { + "bbox": [ + 112, + 31, + 227, + 124 + ], + "spans": [ + { + "bbox": [ + 112, + 31, + 227, + 124 + ], + "type": "image", + "image_path": "facf7a5e13928be1d0e4bd20c8fe4373b6452ff23d4b026fc5341612411e6b28.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 125, + 506, + 158 + ], + "lines": [ + { + "bbox": [ + 104, + 125, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 506, + 158 + ], + "type": "text", + "content": "Figure 3: (left) Evaluation results for QwQ-32B on synthetic tasks with 5 GSM8k questions in each. (middle) Evaluation of Hogwild! Inference and baselines with QwQ-32B on LIMO. (right) Hogwild! Inference with varying number of workers with QwQ-32B on LIMO." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 228, + 31, + 358, + 124 + ], + "blocks": [ + { + "bbox": [ + 228, + 31, + 358, + 124 + ], + "lines": [ + { + "bbox": [ + 228, + 31, + 358, + 124 + ], + "spans": [ + { + "bbox": [ + 228, + 31, + 358, + 124 + ], + "type": "image", + "image_path": "9fb8652c51dc0c6d99a0e37d7a48674b3f47c670070a4ead96f3d66cb29b09d3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 360, + 31, + 493, + 124 + ], + "blocks": [ + { + "bbox": [ + 360, + 31, + 493, + 124 + ], + "lines": [ + { + "bbox": [ + 360, + 31, + 493, + 124 + ], + "spans": [ + { + "bbox": [ + 360, + 31, + 493, + 124 + ], + "type": "image", + "image_path": "666c357e475425a3aa6b4c8622e00a7798c904aeae52e67863082f72671934a4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 192, + 176 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 192, + 176 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 192, + 176 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 182, + 282, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 282, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 282, + 194 + ], + "type": "text", + "content": "4.1 Detailed Evaluation with QwQ-32B" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 198, + 506, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 243 + ], + "type": "text", + "content": "In this section, we conduct an initial evaluation of Hogwild! Inference to test its ability to collaborate in our zero-shot setting. All evaluations in this section are done with the QwQ-32B [Qwen Team, 2025] model. We consider two tasks: one with obviously independent tasks that can be done in parallel and another with a more complicated collaboration pattern." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 247, + 504, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 335 + ], + "type": "text", + "content": "In both setups, we allow the model to generate reasoning up to a certain budget of sequential forward passes and evaluate its accuracy. If the model did not produce the final answer (\\\\boxed{...}) in time, we take all generated outputs and insert a special prompt6 that makes the model generate an answer (or its \"best guess\"), similarly to how it is done in Pu et al. [2025]. If there are multiple workers / threads, we feed outputs from all workers (concatenated) into the model and prompt it to generate the final answer immediately (" + }, + { + "bbox": [ + 104, + 247, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\leq 16" + }, + { + "bbox": [ + 104, + 247, + 504, + 335 + ], + "type": "text", + "content": " tokens, stop early if generated answer). We apply this technique to all methods except \"Baseline (no early stopping)\" and do not count these extra tokens towards the total budget (x axis) since they have an equal effect on all methods." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 339, + 402, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 339, + 402, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 402, + 352 + ], + "type": "text", + "content": "We evaluate the following generation algorithms (details in Appendix D):" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 355, + 505, + 547 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 105, + 355, + 504, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 355, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 504, + 378 + ], + "type": "text", + "content": "- Hogwild! Inference: Our main algorithm, as described in Section 3. We evaluate with 2, 3 and 4 parallel \"workers\" and provide additional configuration details in Appendix D.1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 378, + 504, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 378, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 504, + 401 + ], + "type": "text", + "content": "- Baseline (no early stopping): standard sequential generation with a single LLM instance. This is the only evaluation where we do not insert the early stopping prompt described above." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 402, + 501, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 402, + 501, + 413 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 501, + 413 + ], + "type": "text", + "content": "- Baseline: an improved sequential generation with the early stopping technique described above." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 413, + 505, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 413, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 505, + 491 + ], + "type": "text", + "content": "- Skeleton-of-Thought (SoT) [Ning et al., 2024]: a parallel reasoning algorithm in which the LLM first generates a short \"outline\" containing several independent tasks, then runs these tasks in parallel and combines the results. We run with both an unlimited number of parallel threads (original setup) and with 2 \"workers\" that append tokens to each thread in a round-robin fashion. For more complicated reasoning tasks, we found that Skeleton-of-Thought cannot solve the problem by itself; to mitigate this, we allow the main model to encode all generated threads and continue reasoning (with early stopping). We discuss Skeleton-of-Thought in more detail in Appendix D.2." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 491, + 504, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 491, + 504, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 504, + 547 + ], + "type": "text", + "content": "- Self-consistency [Wang et al., 2022]: a parallel reasoning algorithm where LLM instances write solutions independently, then vote on the answer. Instead of majority voting, we allow the LLM to view both solutions (concatenated) before generating the final answer with our early-stopping prompt, which outperforms voting in our setup and works even for 2 workers. Note that this method cannot split sub-tasks between workers and is instead meant to increase quality through voting." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 557, + 504, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 557, + 504, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 557, + 504, + 625 + ], + "type": "text", + "content": "Sanity Checks with GSM8k×5: Before we try our approach on more challenging tasks, we test if Hogwild! Inference is capable of basic collaboration. For this purpose, we construct a toy problem set with 128 samples, each containing 5 non-overlapping questions from the GSM8k test set [Cobbe et al., 2021]. The LLM is prompted to solve each problem and return comma-separated values7. We report the average per-question accuracy, i.e. if the model solves 4 out of 5 questions in a given sample correctly, it will get a score of 0.8 for that sample." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 628, + 506, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 506, + 685 + ], + "type": "text", + "content": "We summarize our results in Figure 3 (left): the parallel workers under the Hogwild! Inference can indeed collaborate, i.e. our KV cache manipulations do not break down model's reasoning capabilities. As intuition suggests, Skeleton-of-Thought can also speed up this synthetic task by answering each question in parallel. We provide an example of the outline created by the Skeleton-of-Thought in Appendix E.4. Notably, the self-consistency algorithm also shows some improvement over the" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 104, + 693, + 500, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 693, + 500, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 693, + 500, + 714 + ], + "type": "text", + "content": "\"\\n\\nWait, given the limited time, I have to give an answer right now. Considering all my previous attempts, I have to conclude that the final answer is boxed{''" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 714, + 488, + 734 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 714, + 488, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 714, + 488, + 734 + ], + "type": "text", + "content": "7\"Solve these problems and return comma-separated answers \\boxed{answer1, ..., answer5} : \\n 1. \\{task1\\} \\n 2. \\{task2\\} \\n 3. \\{task3\\} \\n 4. \\{task4\\} \\n 5. \\{task5\\}\"" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 116, + 53, + 304, + 183 + ], + "blocks": [ + { + "bbox": [ + 116, + 53, + 304, + 183 + ], + "lines": [ + { + "bbox": [ + 116, + 53, + 304, + 183 + ], + "spans": [ + { + "bbox": [ + 116, + 53, + 304, + 183 + ], + "type": "image", + "image_path": "f66dea94426dd2a9fdd437283ff40594986cf9239a2905077493f34a0dc26501.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 186, + 504, + 209 + ], + "lines": [ + { + "bbox": [ + 104, + 186, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 504, + 209 + ], + "type": "text", + "content": "Figure 4: Evaluation of Hogwild! Inference on LIMO for QwQ-32B, Phi-4-Reasoning-Plus (14B) and Qwen3-8B (left) and different Qwen3 models (right). Dashed lines denote baselines (1 agent)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 309, + 53, + 496, + 184 + ], + "blocks": [ + { + "bbox": [ + 309, + 53, + 496, + 184 + ], + "lines": [ + { + "bbox": [ + 309, + 53, + 496, + 184 + ], + "spans": [ + { + "bbox": [ + 309, + 53, + 496, + 184 + ], + "type": "image", + "image_path": "abdc0a516b9ee43251a53d1bf6316e7463096fd5fd17654166af50c43ddb24e6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 216, + 504, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 504, + 239 + ], + "type": "text", + "content": "baseline, which we attribute to the fact that it gives the model two \"shots\" at a problem, and if one of them happens to be faster, the algorithm will on average surpass the baseline." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 243, + 504, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 300 + ], + "type": "text", + "content": "LIMO tasks. Next, we evaluate Hogwild! Inference in a more challenging setup where there is no clear pattern of collaboration. We adopt the dataset of 817 problems from Ye et al. [2025]. The dataset contains mathematical problems that take modern LLMs thousands of tokens to solve reliably. Unlike our synthetic tasks, the problems in that dataset often do not have an obvious way to agree on a collaboration strategy ahead of time, but it can emerge (and change) during reasoning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 304, + 504, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 380 + ], + "type": "text", + "content": "We summarize our results in Figure 3 (middle, right). Overall, Hogwild! Inference can converge to a correct solution faster, achieving greater accuracy for the same number of consecutive steps. Furthermore, it produces greater speed-ups as we increase the number of parallel workers (though there is a limit, as we show in Appendix E.1). Similarly to our previous setup, self-consistency decoding provides some improvement over the single-worker baseline, but does not outperform Hogwild! Inference. As expected, Skeleton-of-Thought could not split the problem neatly into independent tasks, but still achieves some improvement on small budgets." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 385, + 504, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 385, + 504, + 452 + ], + "spans": [ + { + "bbox": [ + 104, + 385, + 504, + 452 + ], + "type": "text", + "content": "We then evaluate different LLM families and sizes on LIMO dataset in Figure 4. We found that our approach generalizes to most of the models tested, with a notable exception. For Qwen3 model family, we observe that the smaller models, 1.7B and, to a lesser extent, 4B fail to adapt to the task and get distracted from the task. In Appendix E.1, we also report additional evaluations in this setup: ablation of the cache rotation from 3.1 and our chat-like cache structure from Section 3.2. We provide examples of collaborative generations for this setup in Appendix F." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 457, + 284, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 457, + 284, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 284, + 468 + ], + "type": "text", + "content": "4.2 Additional Benchmarks and Models" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 471, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 471, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 471, + 504, + 536 + ], + "type": "text", + "content": "Next, we test whether our approach can be generalized to other mathematical reasoning and programming tasks. For this evaluation, we also chose benchmarks that do not have obvious collaboration patterns but can nonetheless be solved faster by two human \"agents\". We evaluate on three such benchmarks: LiveCodeBench, OlympiadBench and AIME'25. In addition to QwQ-32B, we also report Qwen3 [Yang et al., 2025] and Phi-4 Reasoning Plus [Abdin et al., 2025]. For AIME'25, we focus on larger models and additionally include DeepSeek-R1 [DeepSeek-AI et al., 2025]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 541, + 504, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 504, + 640 + ], + "type": "text", + "content": "LiveCodeBench [Jain et al., 2024]. We evaluate on the code_generation lite version release_v5. Our evaluation closely follows the setup from Qwen Team [2025]: we take the same 279 problems dated between 2024.08 and 2025.02 and filtered so as to avoid ones present in the QwQ dataset. Note, however, that some of the other LLMs in our setup do not report which samples, if any, did they train on. However, since we use the same model weights for the baseline and Hogwild! Inference, we can still compare the two strategies. We run the standard test suite and report Pass@1 averaged over 8 random seeds. For early stopping, we allow the method (and baseline) to generate a single final code block with up to 1024 tokens, using a similar early-stopping prompt as in Section 4.1 (see Appendix C). For Hogwild! Inference, we use the same system prompts as before." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "type": "text", + "content": "OlympiadBench [He et al., 2024]. Next, we evaluate on a different reasoning benchmark that contains Olympiad-level problems on Math and Physics. We run evaluations on the two text-only english-language parts: OE_TO maths_en_COMP (675 problems) and OE_TO_physics_en_COMP (236 problems). Unlike in Section 3, the answers to these problems are not individual numbers but LaTeX formulae that allow multiple equivalent formulations of the correct answer. We use the official evaluation codebase and adapt the built-in DeepSeek-R1 prompts for use with our model set (see details in Appendix D). For early stopping, we use the same prompt as before with 64 token limit." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 116, + 42, + 302, + 173 + ], + "blocks": [ + { + "bbox": [ + 116, + 42, + 302, + 173 + ], + "lines": [ + { + "bbox": [ + 116, + 42, + 302, + 173 + ], + "spans": [ + { + "bbox": [ + 116, + 42, + 302, + 173 + ], + "type": "image", + "image_path": "c428b96323c4fc03a26afef9fe9b57ff6ae44eea4fc284d28e76d1cf6edf531f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 176, + 504, + 198 + ], + "lines": [ + { + "bbox": [ + 104, + 176, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 104, + 176, + 504, + 198 + ], + "type": "text", + "content": "Figure 5: Evaluation of Hogwild! Inference with 2 workers on OlympiadBench Math (left) & Physics (right) for QwQ-32B, Qwen3-14B and Qwen3-8B models, dashed lines are the baselines." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 309, + 42, + 496, + 174 + ], + "blocks": [ + { + "bbox": [ + 309, + 42, + 496, + 174 + ], + "lines": [ + { + "bbox": [ + 309, + 42, + 496, + 174 + ], + "spans": [ + { + "bbox": [ + 309, + 42, + 496, + 174 + ], + "type": "image", + "image_path": "8347a312d3cd1bb376b6935227e6d6cb5ade8c972725f3a5f84c73d48039e3ad.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 202, + 504, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 202, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 202, + 504, + 269 + ], + "type": "text", + "content": "Large Models on AIME [2025]. Finally, we evaluate how Hogwild! Inference scales to larger models on a popular AIME'25 benchmark, using both I and II subsets. For this task, we focus on two models: Qwen3-235B-A22B Yang et al. [2025] and DeepSeek-R1 [DeepSeek-AI et al., 2025]. Since the AIME benchmark only contains 30 problems (15 per subset), we evaluate each model with 10 random seeds and average results. We otherwise use the same evaluation protocol as for LIMO, with the same early stopping and at most 16 tokens per answer during early stopping." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 272, + 506, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 272, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 506, + 382 + ], + "type": "text", + "content": "We arrange our results in Figure 5 for OlympiadBench and Figure 6 for LiveCodeBench and AIME'25. Overall, Hogwild! Inference shows similar improvements to what we observed earlier (Section 4.1). One atypical case is OlympiadBench Physics (Fig. 5 right) where Qwen3-14B stops improving after roughly 4096 tokens. Upon closer inspection, we found that the model does not break down, but overthinks the problem, improving some answers while replacing other correct answers with mistakes. Overall, the results show that the cache rotation tricks and the output structure from 3.2 can indeed be generalized across different models and benchmarks. Note, however, that due to the different output format we needed to apply slight alterations to individual model prompts: notably, QwQ-32B automatically inserts at the end of the prompt, while Qwen3 and Phi-4 do not, so we insert it manually before the common history header. We describe this in detail in Appendix C." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 387, + 288, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 387, + 288, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 387, + 288, + 399 + ], + "type": "text", + "content": "4.3 Measuring the Ability to Collaborate" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 400, + 506, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 400, + 506, + 532 + ], + "spans": [ + { + "bbox": [ + 104, + 400, + 506, + 532 + ], + "type": "text", + "content": "Now that we know that modern LLMs can collaborate in our zero-shot setting, it is natural to ask how well can they collaborate and what affects their ability. While this question deserves a more thorough investigation, we can still quantify how well LLMs collaborate under Hogwild! Inference. In this section, we analyze their \"collaborativeness\" using the LLM-as-a-Judge paradigm [Zheng et al., 2023a]: we feed collaborative traces into a GPT-4o [Hurst et al., 2024] model and prompt it to score behavior from 1 to 6, where \"1\" means no collaboration, \"3\" indicates basic task splitting and \"6\" represents a hypothetical optimal collaboration, never achieved in our analysis. We analyze LLM generations on LIMO dataset with on three models from Section 4.2. To control for differences in generation lengths we compare only 4096-token prefixes from each worker. We compare three inference setups: i) independent generations as per self-consistency decoding; ii) restricted Hogwild! Inference where agents can only view each other's finished paragraphs, but not the current (incomplete) reasoning step, and iii) full Hogwild! Inference, with 2 agents in each setup." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 535, + 506, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 535, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 535, + 506, + 581 + ], + "type": "text", + "content": "We summarize our scores in Figure 7: as expected, models that can see each other can collaborate and independent workers cannot. Interestingly, Hogwild! Inference with instant (token-wise) synchronization scores significantly higher than a version that can only see completed inference steps. In Appendix G we provide more detailed results, judge prompt, configurations and examples." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 116, + 582, + 302, + 710 + ], + "blocks": [ + { + "bbox": [ + 116, + 582, + 302, + 710 + ], + "lines": [ + { + "bbox": [ + 116, + 582, + 302, + 710 + ], + "spans": [ + { + "bbox": [ + 116, + 582, + 302, + 710 + ], + "type": "image", + "image_path": "f0209aef2837c6968d7da96be40be0a43e35806305771cb01ca228315c6b45f8.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 712, + 504, + 734 + ], + "lines": [ + { + "bbox": [ + 104, + 712, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 712, + 504, + 734 + ], + "type": "text", + "content": "Figure 6: Evaluation of Hogwild! Inference (2 workers) on LiveCodeBench v5 2024.08-2025.02 for QwQ, Phi-4-R+ and Qwen3 (left) and AIME'25 for larger models (right), dashed lines are baselines." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 309, + 582, + 496, + 710 + ], + "blocks": [ + { + "bbox": [ + 309, + 582, + 496, + 710 + ], + "lines": [ + { + "bbox": [ + 309, + 582, + 496, + 710 + ], + "spans": [ + { + "bbox": [ + 309, + 582, + 496, + 710 + ], + "type": "image", + "image_path": "95f302d1d13a8c6c86e2b7cf3e4be7afbd7c3e00e98f2e025d3ddd2173fc424a.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 42, + 298, + 171 + ], + "blocks": [ + { + "bbox": [ + 107, + 42, + 298, + 171 + ], + "lines": [ + { + "bbox": [ + 107, + 42, + 298, + 171 + ], + "spans": [ + { + "bbox": [ + 107, + 42, + 298, + 171 + ], + "type": "image", + "image_path": "8bec21499eb610041b5b9e65ad38946ef73ee9c2e166ed1f1df365336ebe3b73.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 173, + 299, + 228 + ], + "lines": [ + { + "bbox": [ + 104, + 173, + 299, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 173, + 299, + 228 + ], + "type": "text", + "content": "Figure 7: Mean collaborativeness score from GPT-4o. No sync is independent generation, Step-wise is restricted Hogwild! where worker can only see each-other's past steps, Token-wise is full Hogwild! with instant cache exchange." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 313, + 76, + 504, + 227 + ], + "blocks": [ + { + "bbox": [ + 310, + 42, + 506, + 76 + ], + "lines": [ + { + "bbox": [ + 310, + 42, + 506, + 76 + ], + "spans": [ + { + "bbox": [ + 310, + 42, + 506, + 76 + ], + "type": "text", + "content": "Table 1: Inference benchmarks for Section 4.4. Columns denote sequencelength. Rows withone workerare baselines,2&4workers use Hogwild!" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 313, + 76, + 504, + 227 + ], + "lines": [ + { + "bbox": [ + 313, + 76, + 504, + 227 + ], + "spans": [ + { + "bbox": [ + 313, + 76, + 504, + 227 + ], + "type": "table", + "html": "
# Workers102420484096819216384
Tokens per second
120.120.019.719.318.3
236.336.236.136.134.3
468.969.069.166.360.3
Latency per forward (ms)
149.750.050.951.754.5
255.155.355.455.358.3
458.158.057.960.466.4
Time to generate # tokens (s)
152.3103.3206.5416.7853.5
229.958.1114.6228.0454.4
416.731.661.3120.7239.2
", + "image_path": "38e0900c5cbff9f84541182863d6a0ef9c8f80a8c7de1acbe2a7c5d160600707.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 236, + 171, + 246 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 171, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 171, + 246 + ], + "type": "text", + "content": "4.4 Inference" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 249, + 506, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 249, + 506, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 249, + 506, + 349 + ], + "type": "text", + "content": "To recall, our main motivation for proposing Hogwild! Inference is to enable faster reasoning through collaboration. Since the actual inference speed depends on many factors (GPU(s), software, precision, etc), we previously focused on evaluating inference speed in terms of the number of consecutive forward passes and not inference time. Here, in turn, we report the actual inference speed in terms of latency and tokens per second. We evaluate three setups: baseline sequential inference and Hogwild! Inference for two and four workers. We run baseline with FlashAttention v2 (FlashDecoding) and our algorithm with custom GPU kernels using the approach described in Section 3.4. We use a NVIDIA L40S GPU and AMD EPYC 9534 and benchmark the official quantized version of QwQ-32B-AWQ for all setups." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 353, + 506, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 353, + 506, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 353, + 506, + 431 + ], + "type": "text", + "content": "Our results in Table 1 show that, for the 32B model, Hogwild! Inference can generate tokens nearly twice as fast for 2 workers and about " + }, + { + "bbox": [ + 104, + 353, + 506, + 431 + ], + "type": "inline_equation", + "content": "3.2 - 3.6 \\times" + }, + { + "bbox": [ + 104, + 353, + 506, + 431 + ], + "type": "text", + "content": " faster for 4 workers, which means that the accuracy gains from earlier sections can translate to faster solutions. We also report the average over GPUs, as well the " + }, + { + "bbox": [ + 104, + 353, + 506, + 431 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 353, + 506, + 431 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 353, + 506, + 431 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 353, + 506, + 431 + ], + "type": "text", + "content": " percentiles, in Figure 8 (left). Overall, Hogwild! Inference has a small constant latency offset compared to the baseline and near-linear scaling as we increase the number of workers. While our implementation already shows significant performance gains, we discuss several ways to scale it further in Appendix B, including in distributed setting." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 110, + 444, + 299, + 601 + ], + "blocks": [ + { + "bbox": [ + 110, + 444, + 299, + 601 + ], + "lines": [ + { + "bbox": [ + 110, + 444, + 299, + 601 + ], + "spans": [ + { + "bbox": [ + 110, + 444, + 299, + 601 + ], + "type": "image", + "image_path": "4a7a536cd12fc9c79c74320988e958ef9293e7344b33fef910cf5e76e515d91d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "lines": [ + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "text", + "content": "Figure 8: (left) Duration of a single forward pass (generating " + }, + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "text", + "content": " new tokens) for Qwen/QwQ-32B-AWQ on L40S, given the total number of tokens already in the KV cache. The dotted lines indicate the " + }, + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "text", + "content": " quantiles over multiple repetitions on different GPUs. (right) Accuracy versus average generation time on the LIMO dataset task using QwQ-32B-AWQ under different token budgets." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 312, + 443, + 503, + 600 + ], + "blocks": [ + { + "bbox": [ + 312, + 443, + 503, + 600 + ], + "lines": [ + { + "bbox": [ + 312, + 443, + 503, + 600 + ], + "spans": [ + { + "bbox": [ + 312, + 443, + 503, + 600 + ], + "type": "image", + "image_path": "0541b090e1608ce5167c68820c39717e91683369e6da6bc3263c960480ad859c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "type": "text", + "content": "As the figure shows, there is some overhead associated with preparing multiple caches (i.e., even at an empty cache, Hogwild! is slightly slower than pure FlashAttention). A more detailed breakdown is presented in Table 2, which shows the duration of the attention kernel (or attention+rope for Hogwild!), as well as the total setup time, that is, the time spent preparing the data structures needed for Hogwild! The latter needs to be done only once per forward pass, instead of once per transformer" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 173, + 89, + 436, + 167 + ], + "blocks": [ + { + "bbox": [ + 129, + 77, + 479, + 88 + ], + "lines": [ + { + "bbox": [ + 129, + 77, + 479, + 88 + ], + "spans": [ + { + "bbox": [ + 129, + 77, + 479, + 88 + ], + "type": "text", + "content": "Table 2: Breakdown of Hogwild! overhead compared to pure FlashAttention inference." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 173, + 89, + 436, + 167 + ], + "lines": [ + { + "bbox": [ + 173, + 89, + 436, + 167 + ], + "spans": [ + { + "bbox": [ + 173, + 89, + 436, + 167 + ], + "type": "table", + "html": "
KV LengthAttention (×64)Setup (×1)
FAW2W4FAW2W4
30011μs45μs45μs-1.9ms3.9ms
409635μs65μs82μs-1.9ms3.9ms
819255μs92μs123μs-1.9ms3.9ms
16384100μs140μs203μs-1.9ms3.9ms
", + "image_path": "5a3fc06a3288c485d1e0cfd791096076d85a7ad085ec8381d9e99fe6558a8cd2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "spans": [ + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "text", + "content": "block. For long contexts, the attention call is about " + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 188, + 504, + 211 + ], + "type": "text", + "content": " slower for generating with 2 and 4 workers, respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 215, + 506, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 215, + 506, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 215, + 506, + 281 + ], + "type": "text", + "content": "Additionally, we report accuracy results over time using our kernel on the official quantized version of QwQ-32B-AWQ on LIMO dataset. The experiments were conducted on NVIDIA L40S GPUs. For comparison, we run the baseline (FlashAttention v2) and Hogwild with 2 workers, maintaining the same experimental setup as detailed in Section 4.1. We report our results in Figure 8 (right). As illustrated, our method achieves better accuracy results on the LIMO dataset within the same time budget." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 290, + 180, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 290, + 180, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 180, + 303 + ], + "type": "text", + "content": "5 Discussion" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 308, + 506, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 506, + 364 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 506, + 364 + ], + "type": "text", + "content": "In this work, we investigated the ability of large language models to perform parallel generation where multiple instances synchronize through a shared, dynamically-updated attention cache. Surprisingly, our results show that LLMs can operate effectively in parallel across dynamically updated attention cache without specialized fine-tuning. We demonstrate that parallel inference threads can explicitly coordinate, leveraging each other's partial solutions to enable collaborative problem-solving." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 369, + 506, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 506, + 425 + ], + "type": "text", + "content": "The proposed method, called Hogwild! Inference, allows multiple inference threads to concurrently access and update a shared attention cache. By leveraging Rotary Position Embeddings (RoPE), our approach introduces minimal computational overhead while ensuring instant synchronization—newly generated KV cache entries becoming immediately visible to all threads. This \"telepathic\" communication opens up new possibilities for efficient parallel generation with LLMs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 430, + 506, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 506, + 465 + ], + "type": "text", + "content": "**Limitations** Our method exhibits reduced robustness when applied to smaller models or longer contexts, suggesting scalability challenges across model sizes and sequence lengths. Additionally, our automatic evaluation metric relies on a proprietary model, which may limit reproducibility." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 472, + 506, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 506, + 560 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 506, + 560 + ], + "type": "text", + "content": "Future work In future work, we plan to investigate methods for improving collaboration between threads, such as fine-tuning and reinforcement learning. We also plan to investigate connections to alternative parallel inference schemes, such as speculative decoding [Leviathan et al., 2023], and parallel token generation methods like Medusa [Cai et al., 2024] or EAGLE [Li et al., 2024b]. Finally, it is interesting to consider alternative shared memory structures: allowing workers to insert new steps in any order, selectively delete (forget) steps, or solving programming and tool use tasks with a shared IDE and file-system. The KV cache rearrangement used in Hogwild! Inference could also allow humans to interact with agents asynchronously, giving clarifications and feedback during reasoning." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 567, + 504, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 504, + 590 + ], + "type": "text", + "content": "Acknowledgements: We thank Vladimir Malinovskii for his help with brainstorming, helpful feedback and suggesting future work directions. We also thank Philip Zmushko for proofreading." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 605, + 165, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 605, + 165, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 165, + 618 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 624, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 105, + 624, + 506, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 506, + 690 + ], + "type": "text", + "content": "Marah Abdin, Sahaj Agarwal, Ahmed Awadallah, Vidhisha Balachandran, Harkirat Behl, Lingjiao Chen, Gustavo de Rosa, Suriya Gunasekar, Mojan Javaheripi, Neel Joshi, Piero Kauffmann, Yash Lara, Caio Cesar Teodoro Mendes, Arindam Mitra, Besmira Nushi, Dimitris Papailiopoulos, Olli Saarikivi, Shital Shah, Vaishnavi Shrivastava, Vibhav Vineet, Yue Wu, Safoora Yousefi, and Guoqing Zheng. Phi-4-reasoning technical report, 2025. URL https://arxiv.org/abs/2504.21318." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 700, + 504, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 700, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 504, + 721 + ], + "type": "text", + "content": "AIME. Aime problems and solutions. https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions, 2025." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 118 + ], + "type": "text", + "content": "Reza Yazdani Aminabadi, Samyam Rajbhandari, Minjia Zhang, Ammar Ahmad Awan, Cheng Li, Du Li, Elton Zheng, Jeff Rasley, Shadeen Smith, Olatunj Ruwase, and Yuxiong He. Deepspeed inference: Enabling efficient inference of transformer models at unprecedented scale, 2022. URL https://arxiv.org/abs/2207.00032." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "type": "text", + "content": "Anthropic. Claude 3.7 sonnet and claude code, 2024. URL https://www.anthropic.com/news/claude-3-7-sonnet. Accessed: 2025.04.02." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 152, + 504, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 152, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 504, + 186 + ], + "type": "text", + "content": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. Neural machine translation by jointly learning to align and translate. In Proceedings of the 3rd International Conference on Learning Representations (ICLR), 2015. URL https://arxiv.org/abs/1409.0473." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 192, + 505, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 192, + 505, + 227 + ], + "spans": [ + { + "bbox": [ + 106, + 192, + 505, + 227 + ], + "type": "text", + "content": "Yushi Bai, Jiajie Zhang, Xin Lv, Linzhi Zheng, Siqi Zhu, Lei Hou, Yuxiao Dong, Jie Tang, and Juanzi Li. Longwriter: Unleashing 10,000+ word generation from long context llms. ArXiv, abs/2408.07055, 2024. URL https://api_semanticscholar.org/CorpusID:271859903." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 232, + 504, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 232, + 504, + 267 + ], + "spans": [ + { + "bbox": [ + 106, + 232, + 504, + 267 + ], + "type": "text", + "content": "Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 272, + 505, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 272, + 505, + 296 + ], + "spans": [ + { + "bbox": [ + 105, + 272, + 505, + 296 + ], + "type": "text", + "content": "Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer: The long-document transformer, 2020. URL https://arxiv.org/abs/2004.05150." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 301, + 504, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 301, + 504, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 301, + 504, + 325 + ], + "type": "text", + "content": "Tianle Cai, Xinyun Li, Zhiruo Wang, Yuhuai Wang, and Dawn Song. Medusa: Simple llm inference acceleration framework with multiple decoding heads. arXiv preprint arXiv:2401.10774, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 331, + 506, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 331, + 506, + 398 + ], + "spans": [ + { + "bbox": [ + 106, + 331, + 506, + 398 + ], + "type": "text", + "content": "Justin Chen, Swarnadeep Saha, and Mohit Bansal. ReConcile: Round-table conference improves reasoning via consensus among diverse LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7066–7085, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.381. URL https://aclanthology.org/2024.acl-long.381/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 403, + 504, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 403, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 403, + 504, + 437 + ], + "type": "text", + "content": "Mouxiang Chen, Binyuan Hui, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Jianling Sun, Junyang Lin, and Zhongxin Liu. Parallel scaling law for language models, 2025. URL https://arxiv.org/abs/2505.10475." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 443, + 506, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 443, + 506, + 488 + ], + "spans": [ + { + "bbox": [ + 106, + 443, + 506, + 488 + ], + "type": "text", + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 495, + 506, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 495, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 506, + 552 + ], + "type": "text", + "content": "Roi Cohen, May Hamri, Mor Geva, and Amir Globerson. LM vs LM: Detecting factual errors via cross examination. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12621-12640, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.778. URL https://aclanthology.org/2023.emnlp-main.778/." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 557, + 506, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 557, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 106, + 557, + 506, + 590 + ], + "type": "text", + "content": "Tri Dao, Daniel Haziza, Francisco Massa, and Grigory Sizov. Flash-decoding for long-context inference. https://crfm.stanford.edu/2023/10/12/flashdecoding.html, 2023. Accessed: 2025-05-10." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 597, + 506, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 597, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 106, + 597, + 506, + 641 + ], + "type": "text", + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, and Xiao Bi et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 647, + 506, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 647, + 506, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 647, + 506, + 682 + ], + "type": "text", + "content": "Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, Xianglong Liu, and Dacheng Tao. Dynamic parallel tree search for efficient ltm reasoning, 2025. URL https://arxiv.org/abs/2502.16235." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 688, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 688, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 688, + 506, + 723 + ], + "type": "text", + "content": "Yilun Du, Shuang Li, Antonio Torralba, Joshua B. Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. In *Forty-first International Conference on Machine Learning*, 2023. URL https://openreview.net/forum?id=zj7YuTE4t8." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 722 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "Elliot E. Entin and Daniel Serfaty. Adaptive team coordination. Human Factors, 41(2):312-325, 1999." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 102, + 504, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 102, + 504, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 102, + 504, + 126 + ], + "type": "text", + "content": "Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 133, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 133, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 107, + 133, + 505, + 157 + ], + "type": "text", + "content": "In Gim, Seung seob Lee, and Lin Zhong. Asynchronous llm function calling, 2024. URL https://arxiv.org/abs/2412.07017." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 164, + 504, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 164, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 106, + 164, + 504, + 198 + ], + "type": "text", + "content": "Google DeepMind. Gemini 2.5: Our Newest Gemini Model with Thinking. https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#gemini-2-5-thinking, 2025. Accessed: 2025-04-07." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 205, + 505, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 205, + 505, + 251 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 505, + 251 + ], + "type": "text", + "content": "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 258, + 505, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 258, + 505, + 293 + ], + "spans": [ + { + "bbox": [ + 106, + 258, + 505, + 293 + ], + "type": "text", + "content": "Chan-Jan Hsu, Davide Buffelli, Jamie McGowan, Feng-Ting Liao, Yi-Chang Chen, Sattar Vakili, and Da shan Shiu. Group think: Multiple concurrent reasoning agents collaborating at token level granularity, 2025. URL https://arxiv.org/abs/2505.11107." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 300, + 505, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 300, + 505, + 334 + ], + "spans": [ + { + "bbox": [ + 107, + 300, + 505, + 334 + ], + "type": "text", + "content": "Aaron Hurst, Adam Lerner, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 342, + 339, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 342, + 339, + 354 + ], + "spans": [ + { + "bbox": [ + 107, + 342, + 339, + 354 + ], + "type": "text", + "content": "Edwin Hutchins. Cognition in the Wild. MIT Press, 1995." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 361, + 505, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 361, + 505, + 396 + ], + "spans": [ + { + "bbox": [ + 107, + 361, + 505, + 396 + ], + "type": "text", + "content": "Sam Ade Jacobs, Masahiro Tanaka, Chengming Zhang, Minjia Zhang, Shuaiwen Leon Song, Samyam Rajbhandari, and Yuxiong He. Deepspeed ulysses: System optimizations for enabling training of extreme long sequence transformer models. arXiv preprint arXiv:2309.14509, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 403, + 505, + 447 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 403, + 505, + 447 + ], + "spans": [ + { + "bbox": [ + 106, + 403, + 505, + 447 + ], + "type": "text", + "content": "Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livecodebench: Holistic and contamination free evaluation of large language models for code, 2024. URL https://arxiv.org/abs/2403.07974." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 456, + 505, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 456, + 505, + 500 + ], + "spans": [ + { + "bbox": [ + 106, + 456, + 505, + 500 + ], + "type": "text", + "content": "Tian Jin, Ellie Y. Cheng, Zack Ankner, Nikunj Saunshi, Blake M. Elias, Amir Yazdanbakhsh, Jonathan Ragan-Kelley, Suvinay Subramanian, and Michael Carbin. Learning to keep a promise: Scaling language model decoding parallelism with learned asynchronous decoding, 2025. URL https://arxiv.org/abs/2502.11517." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 509, + 505, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 509, + 505, + 542 + ], + "spans": [ + { + "bbox": [ + 106, + 509, + 505, + 542 + ], + "type": "text", + "content": "Sehoon Kim, Suhong Moon, Ryan Tabrizi, Nicholas Lee, Michael W Mahoney, Kurt Keutzer, and Amir Gholami. An llm compiler for parallel function calling. In *Forty-first International Conference on Machine Learning*, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 550, + 505, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 550, + 505, + 584 + ], + "spans": [ + { + "bbox": [ + 107, + 550, + 505, + 584 + ], + "type": "text", + "content": "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. ArXiv, abs/2205.11916, 2022. URL https://apisemantic scholar.org/CorpusID:249017743." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 592, + 505, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 592, + 505, + 670 + ], + "spans": [ + { + "bbox": [ + 106, + 592, + 505, + 670 + ], + "type": "text", + "content": "Aobo Kong, Shiwan Zhao, Hao Chen, Qicheng Li, Yong Qin, Ruiqi Sun, Xin Zhou, Enzhi Wang, and Xiaohang Dong. Better zero-shot reasoning with role-play prompting. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 4099-4113, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.228. URL https://aclanthology.org/2024.naacl-long.228/." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 677, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 677, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 677, + 505, + 722 + ], + "type": "text", + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the 29th Symposium on Operating Systems Principles, pages 611-626, 2023." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "Yaniv Leviathan, Matan Kalman, and Yossi Matias. Fast inference from transformers via speculative decoding. In International Conference on Machine Learning, pages 19274-19286. PMLR, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 102, + 505, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 102, + 505, + 125 + ], + "spans": [ + { + "bbox": [ + 106, + 102, + 505, + 125 + ], + "type": "text", + "content": "Junyou Li, Qin Zhang, Yangbin Yu, Qiang Fu, and Deheng Ye. More agents is all you need. Transactions on Machine Learning Research, 2024a." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 133, + 504, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 133, + 504, + 167 + ], + "spans": [ + { + "bbox": [ + 107, + 133, + 504, + 167 + ], + "type": "text", + "content": "Shen Li, Yanli Zhao, Rohan Varma, Omkar Salpekar, Pieter Noordhuis, Teng Li, Adam Paszke, Jeff Smith, Brian Vaughan, Pritam Damania, and Soumith Chintala. Pytorch distributed: Experiences on accelerating data parallel training, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 173, + 504, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 173, + 504, + 207 + ], + "spans": [ + { + "bbox": [ + 107, + 173, + 504, + 207 + ], + "type": "text", + "content": "Yuhui Li, Fangyun Wei, Chao Zhang, and Hongyang Zhang. Eagle: Speculative sampling requires rethinking feature uncertainty. In Proceedings of the 41st International Conference on Machine Learning, pages 31147-31162. PMLR, 2024b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 215, + 505, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 215, + 505, + 249 + ], + "spans": [ + { + "bbox": [ + 107, + 215, + 505, + 249 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. ArXiv, abs/2305.20050, 2023. URL https://api_semanticscholar.org/CorpusID:258987659." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 255, + 505, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 255, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 107, + 255, + 505, + 289 + ], + "type": "text", + "content": "Aixin Liu, Bei Feng, Bin Wang, Bingxuan Wang, Bo Liu, Chenggang Zhao, Chengqi Dengr, Chong Ruan, Damai Dai, Daya Guo, et al. Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model. arXiv preprint arXiv:2405.04434, 2024a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 297, + 505, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 505, + 319 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 505, + 319 + ], + "type": "text", + "content": "Hao Liu, Matei Zaharia, and Pieter Abbeel. Ring attention with blockwise transformers for near-infinite context, 2023. URL https://arxiv.org/abs/2310.01889." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 327, + 504, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 327, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 107, + 327, + 504, + 350 + ], + "type": "text", + "content": "Mingdao Liu, Aohan Zeng, Bowen Wang, Peng Zhang, Jie Tang, and Yuxiao Dong. Apar: Llms can do auto-parallel auto-regressive decoding. arXiv preprint arXiv:2401.06761, 2024b." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 357, + 504, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 357, + 504, + 391 + ], + "spans": [ + { + "bbox": [ + 107, + 357, + 504, + 391 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 398, + 506, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 398, + 506, + 432 + ], + "spans": [ + { + "bbox": [ + 107, + 398, + 506, + 432 + ], + "type": "text", + "content": "Xuefei Ning, Zinan Lin, Zixuan Zhou, Zifu Wang, Huazhong Yang, and Yu Wang. Skeleton-ofthought: Prompting LLMs for efficient parallel generation. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=mqVgBbNCm9." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 440, + 504, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 440, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 107, + 440, + 504, + 473 + ], + "type": "text", + "content": "OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, and Alex Beutel et al. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 480, + 505, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 480, + 505, + 514 + ], + "spans": [ + { + "bbox": [ + 107, + 480, + 505, + 514 + ], + "type": "text", + "content": "Jiayi Pan, Xiuyu Li, Long Lian, Charlie Snell, Yifei Zhou, Adam Yala, Trevor Darrell, Kurt Keutzer, and Alane Suhr. Learning adaptive parallel reasoning with language models. arXiv preprint arXiv:2504.15466, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 521, + 505, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 521, + 505, + 588 + ], + "spans": [ + { + "bbox": [ + 107, + 521, + 505, + 588 + ], + "type": "text", + "content": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. PyTorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems (NeurIPS). Neural Information Processing Systems Foundation, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 595, + 504, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 595, + 504, + 618 + ], + "spans": [ + { + "bbox": [ + 107, + 595, + 504, + 618 + ], + "type": "text", + "content": "Bowen Peng, Jeffrey Quesnelle, Honglu Fan, and Enrico Shippole. Yarn: Efficient context window extension of large language models, 2023. URL https://arxiv.org/abs/2309.00071." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 625, + 505, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 625, + 505, + 658 + ], + "spans": [ + { + "bbox": [ + 107, + 625, + 505, + 658 + ], + "type": "text", + "content": "Xiao Pu, Michael Saxon, Wenyue Hua, and William Yang Wang. Thoughtterminator: Benchmarking, calibrating, and mitigating overthinking in reasoning models, 2025. URL https://arxiv.org/ abs/2504.13367." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 666, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 666, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 107, + 666, + 505, + 722 + ], + "type": "text", + "content": "Yujia Qin, Shi Liang, Yining Ye, Kunlun Zhu, Lan Yan, Ya-Ting Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, Sihan Zhao, Runchu Tian, Ruobing Xie, Jie Zhou, Marc H. Gerstein, Dahai Li, Zhiyuan Liu, and Maosong Sun. Toollm: Facilitating large language models to master 16000+ real-world apis. ArXiv, abs/2307.16789, 2023. URL https://api-semanticscholar.org/ CorpusID:260334759." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 102, + 506, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 102, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 106, + 102, + 506, + 146 + ], + "type": "text", + "content": "Jack Rae and Ali Razavi. Do transformers need deep long-range memory? In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, Online, July 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.acl-main.672." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 154, + 506, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 154, + 506, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 154, + 506, + 210 + ], + "type": "text", + "content": "Benjamin Recht, Christopher Re, Stephen Wright, and Feng Niu. Hogwild!: A lock-free approach to parallelizing stochastic gradient descent. In J. Shawe-Taylor, R. Zemel, P. Bartlett, F. Pereira, and K.Q. Weinberger, editors, Advances in Neural Information Processing Systems, volume 24. Curran Associates, Inc., 2011. URL https://proceedings.neurips.cc/paper_files/paper/2011/file/218a0aefd1d1a4be65601cc6ddc1520e-Paper.pdf." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 217, + 506, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 217, + 506, + 261 + ], + "spans": [ + { + "bbox": [ + 106, + 217, + 506, + 261 + ], + "type": "text", + "content": "Timo Schick, Jane Dwivedi-Yu, Roberto Dessi, Roberta Raileanu, Maria Lomeli, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. Toolformer: Language models can teach themselves to use tools. ArXiv, abs/2302.04761, 2023. URL https://api_semanticscholar.org/CorpusID:256697342." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 268, + 506, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 268, + 506, + 303 + ], + "spans": [ + { + "bbox": [ + 106, + 268, + 506, + 303 + ], + "type": "text", + "content": "Yongliang Shen, Kaitao Song, Xu Tan, Dongsheng Li, Weiming Lu, and Yue Ting Zhuang. Hugging-gpt: Solving ai tasks with chatgpt and its friends in hugging face. ArXiv, abs/2303.17580, 2023. URL https://api_semanticscholar.org/CorpusID:257833781." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 309, + 506, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 309, + 506, + 343 + ], + "spans": [ + { + "bbox": [ + 106, + 309, + 506, + 343 + ], + "type": "text", + "content": "Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatron-lm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 350, + 504, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 350, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 106, + 350, + 504, + 373 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 380, + 504, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 380, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 106, + 380, + 504, + 425 + ], + "type": "text", + "content": "Stanford HAI. How a “crazy idea” overturned the conventional rules of machine learning, 2023. URL https://hai.stanford.edu/news/how-crazy-idea-overturned-conventional-rules-machine-learning. Accessed: [Insert Date]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 431, + 504, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 431, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 106, + 431, + 504, + 456 + ], + "type": "text", + "content": "Jianlin Su, Yu Lu, Shengfeng Pan, Ahmed Murtadha, Bo Wen, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 462, + 506, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 462, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 106, + 462, + 506, + 517 + ], + "type": "text", + "content": "Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V. Le, Ed H. Chi, Denny Zhou, and Jason Wei. Challenging big-bench tasks and whether chain-of-thought can solve them. In Annual Meeting of the Association for Computational Linguistics, 2022. URL https://api_semanticscholar.org/CorpusID: 252917648." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 525, + 504, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 525, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 106, + 525, + 504, + 548 + ], + "type": "text", + "content": "Yashar Talebirad and Amirhossein Nadiri. Multi-agent collaboration: Harnessing the power of intelligent LLM agents. CoRR, abs/2306.03314, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 554, + 498, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 554, + 498, + 567 + ], + "spans": [ + { + "bbox": [ + 106, + 554, + 498, + 567 + ], + "type": "text", + "content": "A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 574, + 504, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 574, + 504, + 608 + ], + "spans": [ + { + "bbox": [ + 106, + 574, + 504, + 608 + ], + "type": "text", + "content": "Junlin Wang, WANG Jue, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities. In The Thirteenth International Conference on Learning Representations, 2024a." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 614, + 506, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 614, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 106, + 614, + 506, + 681 + ], + "type": "text", + "content": "Qineng Wang, Zihao Wang, Ying Su, Hanghang Tong, and Yangqiu Song. Rethinking the bounds of LLM reasoning: Are multi-agent discussions the key? In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6106-6131, Bangkok, Thailand, August 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.331. URL https://aclanthology.org/2024.acl-long.331/." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 688, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 688, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 688, + 504, + 723 + ], + "type": "text", + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed H. Chi, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. ArXiv, abs/2203.11171, 2022. URL https://api-semanticscholar.org/CorpusID:247595263." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 138 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 138 + ], + "type": "text", + "content": "Yiming Wang, Zhuosheng Zhang, Pei Zhang, Baosong Yang, and Rui Wang. Meta-reasoning: Semantics-symbol deconstruction for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 622–643, Bangkok, Thailand, August 2024c. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.34. URL https://aclanthology.org/2024-findings-acl.34/." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 147, + 504, + 181 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 147, + 504, + 181 + ], + "spans": [ + { + "bbox": [ + 106, + 147, + 504, + 181 + ], + "type": "text", + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 189, + 504, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 189, + 504, + 223 + ], + "spans": [ + { + "bbox": [ + 106, + 189, + 504, + 223 + ], + "type": "text", + "content": "Guangxuan Xiao, Yuandong Tian, Beidi Chen, Song Han, and Mike Lewis. Efficient streaming language models with attention sinks. In International Conference on Learning Representations (ICLR), 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 231, + 504, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 231, + 504, + 331 + ], + "spans": [ + { + "bbox": [ + 106, + 231, + 504, + 331 + ], + "type": "text", + "content": "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 338, + 504, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 338, + 504, + 438 + ], + "spans": [ + { + "bbox": [ + 106, + 338, + 504, + 438 + ], + "type": "text", + "content": "An Yang, Anfeng Li, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Gao, Chengen Huang, Chenxu Lv, Chujie Zheng, Dayiheng Liu, Fan Zhou, Fei Huang, Feng Hu, Hao Ge, Haoran Wei, Huan Lin, Jialong Tang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jing Zhou, Jingren Zhou, Junyang Lin, Kai Dang, Keqin Bao, Kexin Yang, Le Yu, Lianghao Deng, Mei Li, Mingfeng Xue, Mingze Li, Pei Zhang, Peng Wang, Qin Zhu, Rui Men, Ruize Gao, Shixuan Liu, Shuang Luo, Tianhao Li, Tianyi Tang, Wenbiao Yin, Xingzhang Ren, Xinyu Wang, Xinyu Zhang, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yinger Zhang, Yu Wan, Yuqiong Liu, Zekun Wang, Zeyu Cui, Zhenru Zhang, Zhipeng Zhou, and Zihan Qiu. Qwen3 technical report, 2025. URL https://arxiv.org/abs/2505.09388." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 445, + 504, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 445, + 504, + 480 + ], + "spans": [ + { + "bbox": [ + 106, + 445, + 504, + 480 + ], + "type": "text", + "content": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. ArXiv, abs/2210.03629, 2022. URL https://api_semanticscholar.org/CorpusID:252762395." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 488, + 504, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 488, + 504, + 522 + ], + "spans": [ + { + "bbox": [ + 106, + 488, + 504, + 522 + ], + "type": "text", + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. ArXiv, abs/2305.10601, 2023. URL https://api_semanticscholar.org/CorpusID:258762525." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 530, + 504, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 530, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 106, + 530, + 504, + 555 + ], + "type": "text", + "content": "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning, 2025. URL https://arxiv.org/abs/2502.03387." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 561, + 504, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 561, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 106, + 561, + 504, + 586 + ], + "type": "text", + "content": "Yijiong Yu. Accelerate parallelizable reasoning via parallel decoding within one sequence, 2025. URL https://arxiv.org/abs/2503.20533." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 593, + 504, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 593, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 593, + 504, + 628 + ], + "type": "text", + "content": "Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Zhihan Guo, Yufei Wang, Irwin King, Xue Liu, and Chen Ma. What, how, where, and how well? a survey on test-time scaling in large language models. arXiv preprint arXiv:2503.24235, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 635, + 504, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 635, + 504, + 680 + ], + "spans": [ + { + "bbox": [ + 106, + 635, + 504, + 680 + ], + "type": "text", + "content": "Zhenyu Zhang, Ying Sheng, Tianyi Zhou, Tianlong Chen, Lianmin Zheng, Ruisi Cai, Zhao Song, Yuandong Tian, Christopher Ré, Clark Barrett, et al. H2o: Heavy-hitter oracle for efficient generative inference of large language models. Advances in Neural Information Processing Systems, 36:34661-34710, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 688, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 688, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 688, + 504, + 723 + ], + "type": "text", + "content": "Zhuosheng Zhang, Aston Zhang, Mu Li, and Alexander J. Smola. Automatic chain of thought prompting in large language models. ArXiv, abs/2210.03493, 2022. URL https://api.sementicscholar.org/CorpusID:252762275." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 250 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36:46595-46623, 2023a." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 113, + 505, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 113, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 106, + 113, + 505, + 148 + ], + "type": "text", + "content": "Lianmin Zheng, Liangsheng Yin, Zhiqiang Xie, Jeff Huang, Chuyue Sun, Cody Hao Yu, Shiyi Cao, Christos Kozyrakis, Ion Stoica, Joseph E. Gonzalez, Clark Barrett, and Ying Sheng. Efficiently programming large language models using sglang, 2023b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 154, + 504, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 154, + 504, + 188 + ], + "spans": [ + { + "bbox": [ + 107, + 154, + 504, + 188 + ], + "type": "text", + "content": "Tong Zheng, Hongming Zhang, Wenhao Yu, Xiaoyang Wang, Runpeng Dai, Rui Liu, Huiwen Bao, Chengsong Huang, Heng Huang, and Dong Yu. Parallel-r1: Towards parallel thinking via reinforcement learning, 2025. URL https://arxiv.org/abs/2509.07980." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 194, + 504, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 504, + 250 + ], + "type": "text", + "content": "Pei Zhou, Jay Pujara, Xiang Ren, Xinyun Chen, Heng-Tze Cheng, Quoc V. Le, Ed H. Chi, Denny Zhou, Swaroop Mishra, and Huaixiu Steven Zheng. SELF-DISCOVER: Large language models self-compose reasoning structures. In Amir Globerson, Lester Mackey, Danielle Belgrave, Angela Fan, Ulrich Paquet, Jakub M. Tomczak, and Cheng Zhang, editors, Advances in Neural Information Processing Systems 37 (NeurIPS 2024), Vancouver, BC, Canada, December 2024." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 205, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 205, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 205, + 85 + ], + "type": "text", + "content": "A Cache Layouts" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 504, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 504, + 119 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 504, + 119 + ], + "type": "text", + "content": "In this section, we consider three cache arrangements, shown at Figure 9, with progressively more complex structure." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 106, + 128, + 218, + 220 + ], + "blocks": [ + { + "bbox": [ + 106, + 128, + 218, + 220 + ], + "lines": [ + { + "bbox": [ + 106, + 128, + 218, + 220 + ], + "spans": [ + { + "bbox": [ + 106, + 128, + 218, + 220 + ], + "type": "image", + "image_path": "507eb12025e222fa29ca02659bbf335b1449be8b98d49d55506246ac54845fba.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "lines": [ + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 223, + 504, + 258 + ], + "type": "text", + "content": "Figure 9: Three cache layouts described in Section 3.2: interleaved with step-wise synchrony (left), simple contiguous layout (middle) and combined with token-wise synchrony (right). All layouts are made from Alice point of view." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 225, + 128, + 330, + 220 + ], + "blocks": [ + { + "bbox": [ + 225, + 128, + 330, + 220 + ], + "lines": [ + { + "bbox": [ + 225, + 128, + 330, + 220 + ], + "spans": [ + { + "bbox": [ + 225, + 128, + 330, + 220 + ], + "type": "image", + "image_path": "403bc32e2333f1f364e35ecfc05c48ebb4b929f7f262bc7eaaf37bc6c81e156b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 340, + 128, + 503, + 219 + ], + "blocks": [ + { + "bbox": [ + 340, + 128, + 503, + 219 + ], + "lines": [ + { + "bbox": [ + 340, + 128, + 503, + 219 + ], + "spans": [ + { + "bbox": [ + 340, + 128, + 503, + 219 + ], + "type": "image", + "image_path": "cc79e0adf2594606e76f0f89c7b7b43453bf8c674b16cba49604007dab0a6453.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 270, + 506, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 506, + 304 + ], + "type": "text", + "content": "Contiguous layout (token-wise) is the simplest possible layout where each worker appends to their own sequence blob of tokens and sees other workers' token representations as past keys and values. This layout is inspired by collaborative text editors such as Google Docs or Overleaf." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "type": "text", + "content": "As described earlier in Section 3.1, each worker arranges the other workers' thoughts in a different order. They see the common prompt cache first, then the caches of all other workers (excluding themselves8, then their own cache as immediate previous tokens. That way, each worker predicts the next token for their own cache." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 357, + 504, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 504, + 425 + ], + "type": "text", + "content": "Interleaved layout (step-wise), which can be seen as analogous to group chat services such as Slack or Discord. In this layout, workers generate tokens in private until they finish a reasoning step9, then add it to a shared \"history\". The history contains past reasoning steps of each LLM instance in the order of their completion. Whenever a worker completes a reasoning step, their KV cache entries are moved to the end of the shared history cache block with the proper rotation, then their local cache is reset their local cache for a new step." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 428, + 506, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 428, + 506, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 428, + 506, + 462 + ], + "type": "text", + "content": "In this setup, the workers only see each other's outputs in full steps, not after every token. However, they do not wait for each other to complete their steps. Instead, each worker keeps generating new tokens and occasionally receives additional key-value pairs inserted into its cache." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 466, + 506, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 466, + 506, + 512 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 506, + 512 + ], + "type": "text", + "content": "Combined layout (token-wise) is a mixture of the first two, and is the main layout used in the paper. The LLM instances generate steps that are accumulated in a shared history, as in the interleaved layout. However, they do not generate these steps in private, but can instantly see each other's current progress, as in the contiguous layout." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 515, + 504, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 515, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 104, + 515, + 504, + 550 + ], + "type": "text", + "content": "We can view the first two layouts as ablated versions of this combined one: the contiguous layout lacks the shared history, and the interleaved layout lacks immediate synchronization. We compare these three layouts empirically in Appendix E.1 to better quantify the effect of each design choice." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 564, + 248, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 564, + 248, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 564, + 248, + 578 + ], + "type": "text", + "content": "B Implementation Details" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 590, + 506, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 506, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 506, + 624 + ], + "type": "text", + "content": "Here we discuss additional implementation details and possible alternatives. To recall Section 3.4, Hogwild! inference can be implemented as a standard batched inference with a special KV \"cache\" that facilitates cross-worker attention." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 628, + 506, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 506, + 662 + ], + "type": "text", + "content": "Cache blocks. The Hogwild! cache is split into blocks, typically one block for each worker and an additional \"common\" block for prompt and past steps. The blocks contain key-value pairs for all model layers, but since all layers are treated equally, we describe the cache behavior for a single layer." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 104, + 670, + 504, + 701 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 670, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 104, + 670, + 504, + 701 + ], + "type": "text", + "content": "When extending this layout to more than 2 workers, each worker sees the key-value memories of everyone except themselves. For instance, given 3 workers A, B, and C, worker B will see a version of the cache that contains the prompt, outputs of workers A and C, and finally, B's own memory. Likewise, A sees B & C, then A." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 701, + 504, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 701, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 701, + 504, + 723 + ], + "type": "text", + "content": "9We define a reasoning step as any amount of text that ends with a complete sentence, e.g. a dot or a question mark, and then a double newline (\"\\n\\n\") in all our experiments, though it may vary by the model." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 117 + ], + "type": "text", + "content": "Within each cache block, attention keys and values are stored as though they were at positions 0, 1, ..., len(block), regardless of the block's actual position in the full cache. During inference, we account for actual positions by rotating attention queries to the relative difference in positions (as described in Section 3.4)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 121, + 506, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 506, + 177 + ], + "type": "text", + "content": "Adding new tokens to the cache. During attention forward pass, the first thing that we do is encode the new tokens for each worker and append their keys and values to the respective cache blocks. When using RoPE, the keys are rotated not to their actual positions, but to their index within their cache block (e.g. Alice's tokens). During one inference step, these indices will be equal across all model layers — we can compute the RoPE sin and cos tensors once and reuse them between layers." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "text", + "content": "Rotating queries. Unlike in traditional attention, Hogwild! inference rotates query vectors multiple times for each block. Before forward pass, we calculate the difference in positions between each worker's new token (from that worker's point of view) and the first token in each KV cache block. In our main inference scenario, all " + }, + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "text", + "content": " workers are allowed to view each other's cache blocks plus an additional bock for prompt and history, for a total of " + }, + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "inline_equation", + "content": "n \\cdot (n + 1)" + }, + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "text", + "content": " query rotations with exactly " + }, + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "text", + "content": " queries for each block. These relative positions are also equal across all layers, so we can reuse the sin and cos tensors similarly to how they are reused for keys. Note that the number of query rotations for all-to-all attention is quadratic in " + }, + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "text", + "content": ", but it does not increase the overall time complexity of attention dot product, which is already quadratic in the number of tokens, which is always greater than " + }, + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 182, + 506, + 281 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 285, + 506, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 506, + 406 + ], + "type": "text", + "content": "Attention kernel. Once we have all query rotations, we can calculate the scaled dot-product attention as usual. As our cache is naturally partitioned into smaller segments as described above, Hogwild! attention is similar to paged attention, except that each page (i.e., cache block) uses a differently rotated version of the query. A significant challenge for efficient attention in the inference setup is that for optimal data reuse, one would want to handle each KV head inside a single streaming multiprocessor (SM), so that the KV cache needs to be loaded exactly once. However, this would leave large parts of the GPU unused, as the number of KV heads can be much lower than the number of SMs. Therefore, one has to employ a form of sequence parallelism within a single GPU, in which different SMs handle a subset of the sequence for one KV head, and a second phase handles the (cheap) reduction over partial results. Such a split-k type computation is implemented, for example, in Flash-Decoding [Dao et al., 2023]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 411, + 506, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 411, + 506, + 444 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 506, + 444 + ], + "type": "text", + "content": "Even though the different cache blocks used in Hogwild! would appear to be convenient points to split work across SMs, in a typical inference scenario, this would lead to very imbalanced workloads. Thus, we do not split based on cache blocks, and instead assign each SM the same number of KV entries." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 449, + 506, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 506, + 505 + ], + "type": "text", + "content": "Fine-tuning and re-encoding considerations. While our work mainly focuses on inference, fine-tuning models to perform Hogwild! inference is an interesting engineering problem. From the computational point of view, the main difference between LLM inference and fine-tuning is that inference is sequential, whereas fine-tuning can compute all positions in parallel. To fine-tune in our setup, one would want to replicate the attention computations from consecutive inference steps." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 509, + 506, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 506, + 586 + ], + "type": "text", + "content": "To achieve this, we record the position differences between queries and each respective cache block from each of " + }, + { + "bbox": [ + 104, + 509, + 506, + 586 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 509, + 506, + 586 + ], + "type": "text", + "content": " inference steps, and how many tokens were in each block during that query, for a total of " + }, + { + "bbox": [ + 104, + 509, + 506, + 586 + ], + "type": "inline_equation", + "content": "2 \\cdot t \\cdot n \\cdot (n + 1)" + }, + { + "bbox": [ + 104, + 509, + 506, + 586 + ], + "type": "text", + "content": " integers (negligible compared to model parameters and activations). Recall that the cache blocks always store keys and values at positions 0, 1, ..., 1en(block). During forward pass, these positions can be used to construct a 4D attention mask10 to compute attention for all steps in parallel. The backward pass also runs in parallel with PyTorch autograd [Paszke et al., 2019]. A recent work by Zheng et al. [2025] explores finetuning for parallel inference in more detail." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 591, + 506, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 591, + 506, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 506, + 624 + ], + "type": "text", + "content": "In addition to fine-tuning, this technique can potentially be used during inference to restore generation after it was evicted from an inference server, e.g. due to preemption or hardware error mid decoding. It can also be used to re-encode in-context learning examples if they use Hogwild! inference." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 629, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 629, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 629, + 506, + 696 + ], + "type": "text", + "content": "Attention variants. Some of the recently introduced LLMs use attention variants such as Local (windowed) Attention [Rae and Razavi, 2020, Beltagy et al., 2020] or Multihead Latent Attention (MLA) [Liu et al., 2024a]. These attention variants can also be adapted for use with Hogwild! inference with minor code modifications. For local attention, queries can \"skip\" blocks that are outside their local window. Similarly for MLA, we can calculate compressed latent vectors within each cache block and adapt the existing MLA code to accumulate attention weights across blocks." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 115, + 710, + 337, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 710, + 337, + 722 + ], + "spans": [ + { + "bbox": [ + 115, + 710, + 337, + 722 + ], + "type": "text", + "content": "10https://huggingface.co/blog/poedator/4d-masks" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "content": "Distributed Inference. Likewise, Hogwild! inference can be used in distributed setup using the same strategies that work for traditional attention [Shoeybi et al., 2019, Aminabadi et al., 2022]. For pipeline parallelism, each device stores cache blocks for its local subset of model layers. Likewise, for tensor parallelism, each device stores past keys of all cache blocks and layers, but only for a subset of attention heads within each layer and inference using existing kernels." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 133, + 504, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 504, + 188 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 504, + 188 + ], + "type": "text", + "content": "In principle, Hogwild! inference can also be combined with sequence parallelism [Jacobs et al., 2023, Liu et al., 2023], where each device stores a KV cache for a subset of tokens. One intuitive way to partition KV cache between GPUs is to assign each device to run one or several \"workers\" and keep the KVs generated by these workers. Since Hogwild! workers generate tokens at the same rate, each device will store the same amount of KVs and query other devices work cross-worker attention." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 193, + 504, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 193, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 504, + 269 + ], + "type": "text", + "content": "When computing Hogwild! concurrent attention with sequence parallelism, workers can exchange rotated queries using the All-to-All collective operation (Scatter/Gather) available in most frameworks [Li et al., 2020]. After that, each worker computes dot-products between the rotated queries and its local KV cache, and exchanges the partial results as in Ring Attention [Liu et al., 2023]. Note, however, that maximizing the performance of such sequence-parallel Hogwild! inference would require custom kernels that overlap computation and communication. In contract, tensor-parallel (per-head) an pipeline-parallel (per-layer) partitioning can reuse single-GPU attention kernels." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 274, + 504, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 504, + 340 + ], + "type": "text", + "content": "Additional considerations. Conceptually, our approach is related to the recently introduced Paged Attention from vLLM [Kwon et al., 2023] and Radix Attention from SGLang [Zheng et al., 2023b]. These techniques are similar to ours in that they perform attention to slices of all tokens, e.g. when facilitating efficient parallel beam search inference, different hypotheses attend to different (but overlapping) subsets of the KV cache. However, unlike Radix Attention, our procedure attends to all segments at once (with different rotations) and aggregates results in the same softmax-weighted sum." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 350, + 300, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 350, + 300, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 300, + 363 + ], + "type": "text", + "content": "C Prompting and formatting details" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 368, + 432, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 368, + 432, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 368, + 432, + 380 + ], + "type": "text", + "content": "In this section, we describe the prompting and formatting details of our approach." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 389, + 332, + 400 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 389, + 332, + 400 + ], + "spans": [ + { + "bbox": [ + 121, + 389, + 332, + 400 + ], + "type": "text", + "content": "Prompt for collaborative inference with two workers" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 411, + 242, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 411, + 242, + 421 + ], + "spans": [ + { + "bbox": [ + 121, + 411, + 242, + 421 + ], + "type": "text", + "content": "Collaborative Reasoning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 421, + 488, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 421, + 488, + 460 + ], + "spans": [ + { + "bbox": [ + 121, + 421, + 488, + 460 + ], + "type": "text", + "content": "You will collaborate on this problem with another assistant. You will write your thoughts simultaneously with them and collaborate without redundant work. You can collaborate by doing different parts of the problem, double-checking each other's results, trying different approaches, or any other means." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 461, + 480, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 461, + 480, + 479 + ], + "spans": [ + { + "bbox": [ + 121, + 461, + 480, + 479 + ], + "type": "text", + "content": "There are 2 assistants, including yourself. You will refer to each other as Alice and Bob." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 481, + 480, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 481, + 480, + 510 + ], + "spans": [ + { + "bbox": [ + 121, + 481, + 480, + 510 + ], + "type": "text", + "content": "You will solve the problem together, writing your thoughts in parallel. You will be able to see each other's past and current thoughts as we write them. You will see each other's previous steps as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 121, + 511, + 277, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 511, + 277, + 521 + ], + "spans": [ + { + "bbox": [ + 121, + 511, + 277, + 521 + ], + "type": "text", + "content": "**AssistantName [step]:** <...>" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 521, + 447, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 521, + 447, + 540 + ], + "spans": [ + { + "bbox": [ + 121, + 521, + 447, + 540 + ], + "type": "text", + "content": "In the '#### Past steps' section, the automated system will gather the thoughts of Alice and Bob as you write them." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 541, + 480, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 541, + 480, + 580 + ], + "spans": [ + { + "bbox": [ + 121, + 541, + 480, + 580 + ], + "type": "text", + "content": "After the '###' Work in progress (others)' section, you will see the other assistants' unfinished steps. They will write those steps concurrently with you. You will take into account what they are doing. If another assistant gives you suggestions, you should address them." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 581, + 476, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 581, + 476, + 620 + ], + "spans": [ + { + "bbox": [ + 121, + 581, + 476, + 620 + ], + "type": "text", + "content": "You will always see *other* assistants' incomplete thoughts first, and then, after '##### Work in progress (own)', your own current step. Other assistants will continue writing their thoughts in the background while you will continue writing your own." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 621, + 485, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 621, + 485, + 670 + ], + "spans": [ + { + "bbox": [ + 121, + 621, + 485, + 670 + ], + "type": "text", + "content": "Since you and others both write your thoughts in parallel, you will initially see only partial (unfinished) thoughts that others will continue in parallel, while you write yours. Others' thoughts will appear at the end of their unfinished step, near " + }, + { + "bbox": [ + 121, + 621, + 485, + 670 + ], + "type": "inline_equation", + "content": "<\\ldots>" + }, + { + "bbox": [ + 121, + 621, + 485, + 670 + ], + "type": "text", + "content": ". Other assistants may write new thoughts while you are writing yours." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 121, + 671, + 485, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 671, + 485, + 710 + ], + "spans": [ + { + "bbox": [ + 121, + 671, + 485, + 710 + ], + "type": "text", + "content": "You will use these partial thoughts to decide how best to collaborate without doing the same work twice. You will periodically check what other assistants are doing and you should adjust your actions based on what they are doing so you collaborate efficiently with them." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 79, + 481, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 79, + 481, + 150 + ], + "spans": [ + { + "bbox": [ + 120, + 79, + 481, + 150 + ], + "type": "text", + "content": "If what you are currently doing is the same thing that another assistant has already done or is in process of doing, you will stop (e.g. Alice may say 'Wait, I was doing the same as Bob ...') and change to a different task right away, so as to avoid doing redundant work. \n# Solve the following problem \nAlice and Bob, you will now solve the next problem together. Keep track of who does what work and communicate to avoid doing the same work twice." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 164, + 504, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 164, + 504, + 218 + ], + "spans": [ + { + "bbox": [ + 104, + 164, + 504, + 218 + ], + "type": "text", + "content": "First, we provide a full prompt for collaborative reasoning involving two workers. This prompt is wrapped with standard chat template for each model. Then, all worker steps are generated in a single assistant turn. Additionally, we ensure that for reasoning models assistant turn begins with a token - applied automatically for QwQ-32B and manually for other reasoning models. For further implementation details, we refer to the source code." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 224, + 504, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 224, + 504, + 290 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 504, + 290 + ], + "type": "text", + "content": "The second part of our prompting approach involves s1-like interventions [Muennighoff et al., 2025] in the generation process, where we ask whether a worker is performing redundant tasks (e.g., overlapping with another worker and can pivot to explore alternative ideas). We insert the prompt \"Quick check: am I doing redundant work? (yes/no): \" at the beginning of each new reasoning step every 1024 tokens generated. We refer to Appendix F for examples that demonstrate how these interventions affect the generation process." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 295, + 504, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 504, + 361 + ], + "type": "text", + "content": "Next, an important part of our approach is defining the end of a reasoning step, which is needed to organize cache layout, as discussed in the paper. We define an end of step as the generation of a token containing the separator sequence (\\n\\n) that directly follows a token ending with an end-of-sentence marker (.,?, or!, etc). This termination condition is not met when: i) The separator appears within a generated code block (steps continue until the model completes the entire code block); ii) The preceding token ends with non-terminal punctuation (e.g., comma, colon, or semicolon)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 366, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 366, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 366, + 504, + 411 + ], + "type": "text", + "content": "Finally, the last part of our prompting approach is the early finisher, which allows us to extract an answer from partial reasoning chains. If the model did not produce the final answer (\\`boxed{...}) in time, we take all generated outputs and insert a special prompt that makes the model generate an answer (or its \"best guess\"), similarly to how it is done in Pu et al. [2025]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 420, + 227, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 420, + 227, + 431 + ], + "spans": [ + { + "bbox": [ + 121, + 420, + 227, + 431 + ], + "type": "text", + "content": "Prompt for early stopping" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 120, + 441, + 466, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 441, + 466, + 472 + ], + "spans": [ + { + "bbox": [ + 120, + 441, + 466, + 472 + ], + "type": "inline_equation", + "content": "\\backslash \\mathsf{n}\\backslash \\mathsf{nWait}" + }, + { + "bbox": [ + 120, + 441, + 466, + 472 + ], + "type": "text", + "content": " , given the limited time, I have to give an answer right now. Conside- ring all my previous attempts, I have to conclude that the final answer is boxed{" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 486, + 504, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 486, + 504, + 508 + ], + "spans": [ + { + "bbox": [ + 104, + 486, + 504, + 508 + ], + "type": "text", + "content": "After this prompt, we allow the model to generate a fixed number of tokens: 16 for LIMO and AIME, 64 for OlympiadBench, and 1024 for LiveCodeBench." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 514, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 504, + 578 + ], + "type": "text", + "content": "Note, however, that the LLM does not always produce the answer in time, especially with a tight budget. With QwQ-32B, we observe that the model almost always returns answers correctly if they are present, and if not, it guesses or refuses to answer (unknown, n/a or similar). When extracting answers from Hogwild! Inference, we let the final model view all generated tokens from each worker. This is equivalent to viewing the problem from the perspective of the last worker, e.g. Bob if there are two." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 596, + 309, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 309, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 309, + 609 + ], + "type": "text", + "content": "D Detailed Experiment Configuration" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 621, + 235, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 621, + 235, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 235, + 633 + ], + "type": "text", + "content": "D.1 Hogwild! Configuration" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 642, + 504, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 642, + 504, + 665 + ], + "spans": [ + { + "bbox": [ + 104, + 642, + 504, + 665 + ], + "type": "text", + "content": "For the main experiments, we use Hogwild! inference with two workers (Alice and Bob), a combined layout, and the prompting techniques described in Appendix C." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 679, + 234, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 234, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 234, + 690 + ], + "type": "text", + "content": "D.2 Baselines Configuration" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "To evaluate Skeleton-of-Thought (SoT) on our synthetic setup with grouped tasks from GSM8k, we adopt the original prompts from the paper with minor modifications. Specifically, we adjust" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "content": "the prompts to ensure the model returns the answer to each subtask enclosed within \\boxed{} for structured parsing." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 103, + 285, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 103, + 285, + 117 + ], + "spans": [ + { + "bbox": [ + 121, + 103, + 285, + 117 + ], + "type": "text", + "content": "Outline prompt for Skeleton-of-Thought" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 120, + 125, + 477, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 125, + 477, + 177 + ], + "spans": [ + { + "bbox": [ + 120, + 125, + 477, + 177 + ], + "type": "text", + "content": "You're an organizer responsible for only giving the skeleton (not the full content) for answering the question. Provide the skeleton in a list of points (numbered 1., 2., 3., etc.) to answer the question. Instead of writing a full sentence, each skeleton point should be very short with only 35 words. Generally, the skeleton should have 3 10 points." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 120, + 177, + 165, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 177, + 165, + 186 + ], + "spans": [ + { + "bbox": [ + 120, + 177, + 165, + 186 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 120, + 186, + 334, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 186, + 334, + 196 + ], + "spans": [ + { + "bbox": [ + 120, + 186, + 334, + 196 + ], + "type": "text", + "content": "What are the typical types of Chinese dishes?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 196, + 164, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 196, + 164, + 205 + ], + "spans": [ + { + "bbox": [ + 121, + 196, + 164, + 205 + ], + "type": "text", + "content": "Skeleton:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 122, + 206, + 192, + 285 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 122, + 206, + 187, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 206, + 187, + 216 + ], + "spans": [ + { + "bbox": [ + 122, + 206, + 187, + 216 + ], + "type": "text", + "content": "1. Dumplings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 122, + 217, + 179, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 217, + 179, + 224 + ], + "spans": [ + { + "bbox": [ + 122, + 217, + 179, + 224 + ], + "type": "text", + "content": "2. Noodles." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 122, + 226, + 179, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 226, + 179, + 234 + ], + "spans": [ + { + "bbox": [ + 122, + 226, + 179, + 234 + ], + "type": "text", + "content": "3. Dim Sum." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 122, + 236, + 178, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 236, + 178, + 244 + ], + "spans": [ + { + "bbox": [ + 122, + 236, + 178, + 244 + ], + "type": "text", + "content": "4. Hot Pot." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 122, + 246, + 174, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 246, + 174, + 255 + ], + "spans": [ + { + "bbox": [ + 122, + 246, + 174, + 255 + ], + "type": "text", + "content": "5. Wonton." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 122, + 256, + 192, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 256, + 192, + 265 + ], + "spans": [ + { + "bbox": [ + 122, + 256, + 192, + 265 + ], + "type": "text", + "content": "6. Ma Po Tofu." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 122, + 266, + 183, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 266, + 183, + 274 + ], + "spans": [ + { + "bbox": [ + 122, + 266, + 183, + 274 + ], + "type": "text", + "content": "7. Char Siu." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 122, + 275, + 192, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 275, + 192, + 285 + ], + "spans": [ + { + "bbox": [ + 122, + 275, + 192, + 285 + ], + "type": "text", + "content": "8. Fried Rice." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 122, + 286, + 164, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 286, + 164, + 295 + ], + "spans": [ + { + "bbox": [ + 122, + 286, + 164, + 295 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 121, + 296, + 438, + 425 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 121, + 296, + 438, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 296, + 438, + 305 + ], + "spans": [ + { + "bbox": [ + 121, + 296, + 438, + 305 + ], + "type": "text", + "content": "What are some practical tips for individuals to reduce their carbon" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 122, + 306, + 171, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 306, + 171, + 314 + ], + "spans": [ + { + "bbox": [ + 122, + 306, + 171, + 314 + ], + "type": "text", + "content": "emissions?" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 122, + 316, + 165, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 316, + 165, + 325 + ], + "spans": [ + { + "bbox": [ + 122, + 316, + 165, + 325 + ], + "type": "text", + "content": "Skeleton:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 122, + 326, + 235, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 326, + 235, + 335 + ], + "spans": [ + { + "bbox": [ + 122, + 326, + 235, + 335 + ], + "type": "text", + "content": "1. Energy conservation." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 122, + 336, + 258, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 336, + 258, + 345 + ], + "spans": [ + { + "bbox": [ + 122, + 336, + 258, + 345 + ], + "type": "text", + "content": "2. Efficient transportation." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 122, + 346, + 248, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 346, + 248, + 356 + ], + "spans": [ + { + "bbox": [ + 122, + 346, + 248, + 356 + ], + "type": "text", + "content": "3. Home energy efficiency." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 122, + 357, + 258, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 357, + 258, + 365 + ], + "spans": [ + { + "bbox": [ + 122, + 357, + 258, + 365 + ], + "type": "text", + "content": "4. Reduce water consumption." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 122, + 366, + 221, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 366, + 221, + 374 + ], + "spans": [ + { + "bbox": [ + 122, + 366, + 221, + 374 + ], + "type": "text", + "content": "5. Sustainable diet." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 122, + 376, + 230, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 376, + 230, + 384 + ], + "spans": [ + { + "bbox": [ + 122, + 376, + 230, + 384 + ], + "type": "text", + "content": "6. Sustainable travel." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 122, + 385, + 404, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 385, + 404, + 396 + ], + "spans": [ + { + "bbox": [ + 122, + 385, + 404, + 396 + ], + "type": "text", + "content": "Now, please provide the skeleton for the following question." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 122, + 396, + 164, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 396, + 164, + 405 + ], + "spans": [ + { + "bbox": [ + 122, + 396, + 164, + 405 + ], + "type": "text", + "content": "{request}" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 122, + 406, + 164, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 406, + 164, + 415 + ], + "spans": [ + { + "bbox": [ + 122, + 406, + 164, + 415 + ], + "type": "text", + "content": "Skeleton:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 122, + 415, + 263, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 415, + 263, + 425 + ], + "spans": [ + { + "bbox": [ + 122, + 415, + 263, + 425 + ], + "type": "text", + "content": "[ROLESWITCHING assistant:] 1." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 441, + 276, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 441, + 276, + 454 + ], + "spans": [ + { + "bbox": [ + 121, + 441, + 276, + 454 + ], + "type": "text", + "content": "Point prompt for Skeleton-of-Thought" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 120, + 463, + 471, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 463, + 471, + 482 + ], + "spans": [ + { + "bbox": [ + 120, + 463, + 471, + 482 + ], + "type": "text", + "content": "You're responsible for continuing the writing of one and only one point in the overall answer to the following question." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 120, + 483, + 164, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 483, + 164, + 493 + ], + "spans": [ + { + "bbox": [ + 120, + 483, + 164, + 493 + ], + "type": "text", + "content": "{request}" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 120, + 494, + 259, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 494, + 259, + 502 + ], + "spans": [ + { + "bbox": [ + 120, + 494, + 259, + 502 + ], + "type": "text", + "content": "The skeleton of the answer is" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 120, + 503, + 164, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 503, + 164, + 513 + ], + "spans": [ + { + "bbox": [ + 120, + 503, + 164, + 513 + ], + "type": "text", + "content": "{outline}" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 120, + 514, + 466, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 514, + 466, + 523 + ], + "spans": [ + { + "bbox": [ + 120, + 514, + 466, + 523 + ], + "type": "text", + "content": "Continue and only continue the writing of point {point}. Do not continue" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 120, + 524, + 462, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 524, + 462, + 533 + ], + "spans": [ + { + "bbox": [ + 120, + 524, + 462, + 533 + ], + "type": "text", + "content": "with other points! Reason step-by-step and put your final answer within" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 120, + 533, + 432, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 533, + 432, + 544 + ], + "spans": [ + { + "bbox": [ + 120, + 533, + 432, + 544 + ], + "type": "text", + "content": "\\boxed{} this is very important! [ROLESWITCHING assistant:] {point}." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 120, + 544, + 192, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 544, + 192, + 554 + ], + "spans": [ + { + "bbox": [ + 120, + 544, + 192, + 554 + ], + "type": "text", + "content": "{point_outline}" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 576, + 243, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 243, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 243, + 586 + ], + "type": "text", + "content": "D.3 Datasets and Benchmarks" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 104, + 596, + 504, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 504, + 619 + ], + "type": "text", + "content": "This subsection provides links to all datasets and benchmarks referenced in this work, along with their respective licenses." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 132, + 629, + 179, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 629, + 179, + 639 + ], + "spans": [ + { + "bbox": [ + 132, + 629, + 179, + 639 + ], + "type": "text", + "content": "- GSM8K" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 140, + 640, + 373, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 640, + 373, + 651 + ], + "spans": [ + { + "bbox": [ + 140, + 640, + 373, + 651 + ], + "type": "text", + "content": "https://huggingface.co/datasets/openai/gsm8k" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 140, + 652, + 199, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 652, + 199, + 662 + ], + "spans": [ + { + "bbox": [ + 140, + 652, + 199, + 662 + ], + "type": "text", + "content": "License: MIT" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 133, + 677, + 171, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 677, + 171, + 688 + ], + "spans": [ + { + "bbox": [ + 133, + 677, + 171, + 688 + ], + "type": "text", + "content": "LIMO" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 140, + 689, + 358, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 689, + 358, + 700 + ], + "spans": [ + { + "bbox": [ + 140, + 689, + 358, + 700 + ], + "type": "text", + "content": "https://huggingface.co/datasets/GAIR/LIMO" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 140, + 700, + 226, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 700, + 226, + 712 + ], + "spans": [ + { + "bbox": [ + 140, + 700, + 226, + 712 + ], + "type": "text", + "content": "License: Apache 2.0" + } + ] + } + ], + "index": 46 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 47 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 72, + 488, + 202 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 132, + 72, + 416, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 72, + 416, + 106 + ], + "spans": [ + { + "bbox": [ + 132, + 72, + 416, + 106 + ], + "type": "text", + "content": "- OlympiadBench https://huggingface.co/datasets/Hothan/OlympiadBench License: Apache 2.0" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 132, + 121, + 488, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 121, + 488, + 153 + ], + "spans": [ + { + "bbox": [ + 132, + 121, + 488, + 153 + ], + "type": "text", + "content": "LiveCodeBench https://huggingface.co/datasets/livecodebench/code_generation lite License: cc" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 132, + 170, + 384, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 170, + 384, + 202 + ], + "spans": [ + { + "bbox": [ + 132, + 170, + 384, + 202 + ], + "type": "text", + "content": "- AIME25 https://huggingface.co/datasets/math-ai/aime25 License: Apache 2.0" + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 228, + 217, + 240 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 228, + 217, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 217, + 240 + ], + "type": "text", + "content": "D.4 Compute Resources" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 249, + 504, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 249, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 249, + 504, + 293 + ], + "type": "text", + "content": "As our approach is training-free, all computational resources were solely utilized for inference. The experiments were conducted primarily on NVIDIA A100 GPUs servers with NVSwitch, with DeepSeek-R1 experiments running in a distributed setup. The one exception to this is the inference time experiments in Section 4.4 that were run on NVIDIA L40S GPU." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 298, + 506, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 298, + 506, + 452 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 506, + 452 + ], + "type": "text", + "content": "The runtime per individual experiment varies by model size, benchmark and the number of workers: baseline inference with Qwen3-4B runs on LIMO in 14 hours on a single server (112gpu-hours), whereas Qwen3-235B-A22 Hogwild! Inference ran on 40 servers for approximately 25 hours (" + }, + { + "bbox": [ + 104, + 298, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\approx" + }, + { + "bbox": [ + 104, + 298, + 506, + 452 + ], + "type": "text", + "content": "8K GPU hours). Overall, we estimate that the total GPU resources expended for this work, including early experiments that are not reported in this paper, amount to approximately " + }, + { + "bbox": [ + 104, + 298, + 506, + 452 + ], + "type": "inline_equation", + "content": "\\approx" + }, + { + "bbox": [ + 104, + 298, + 506, + 452 + ], + "type": "text", + "content": "25.3K GPU days. Note, however, that this is largely due to the fact that we used a non-optimized inference code for most of the experimentation: the non-optimized code was developed first and we ran most of the experiments in parallel with developing the optimized version. This also means that most of our experiments under-utilized the GPUs and ran at lower power (for the purpose of environmental impact). Over 2/3 of our compute was spent on large models (Qwen3-235B-A22B and DeepSeek-R1) that utilized gpu to less than " + }, + { + "bbox": [ + 104, + 298, + 506, + 452 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 298, + 506, + 452 + ], + "type": "text", + "content": " (as per volatile GPU utilization) due to the use of naive model parallelism and network bottlenecks. We anticipate that future experiments can be run at significantly betterutilization using the efficient implementation described in Appendix B and included in the supplementary code." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 468, + 251, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 251, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 251, + 481 + ], + "type": "text", + "content": "E Additional Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 494, + 208, + 505 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 494, + 208, + 505 + ], + "spans": [ + { + "bbox": [ + 105, + 494, + 208, + 505 + ], + "type": "text", + "content": "E.1 Ablation Analysis" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 514, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 504, + 536 + ], + "type": "text", + "content": "In this section, we ablate the main components of our approach, including layouts and prompting. We use the same experimental configuration as in Sections 4.1 and 4.2 for LIMO." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 541, + 504, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 504, + 608 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 504, + 608 + ], + "type": "text", + "content": "In Figure 10 (left), we compare the three Hogwild! cache layouts described in Appendix A. Namely, the Hogwild! (contiguous) corresponds to using the contiguous cache layout where all tokens generated by a given worker are kept together, without splitting into individual steps. In turn, Hogwild! (non-instant) corresponds to the interleaved cache layout where workers can only see each other's past reasoning steps, but not the latest unfinished paragraph. We also ablate the use of the collaboration prompt from Section 3.3 (\"Wait, am I doing redundant work?\")." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 612, + 506, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 506, + 721 + ], + "type": "text", + "content": "Finally, we test a version of Hogwild! Inference where we re-encode worker tokens instead of rotating them to a new position when moving between worker caches and the common \"chat history\" cache. This ablation is needed to test if our cache rotation from Section 3.1 and 3.4 is indeed an acceptable substitute for encoding tokens directly at each position (which would cause additional computational overhead). Note that, while token re-encoding is more \"fair\" from the perspective of position encodings, it also has a downside that it does not allow the re-encoded tokens to see some of the concurrently generated tokens from the other worker. For instance, suppose that Alice and Bob are writing steps concurrently and communicating with each other within these steps, e.g. using each other's results. Then, if we later re-encode these steps in some sequential order, then the tokens of the first worker will be encoded without access to the other worker's tokens (if it hasn't finished its" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "own step yet). If workers reused information from each other's steps, re-encoding this way can break some of the internal representations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 100, + 506, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 100, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 100, + 506, + 177 + ], + "type": "text", + "content": "Our results suggest that all three design choices contribute to the method performance: the contiguous layout performs nearly equally well for shorter budgets, but eventually falls behind as we consider longer reasoning traces. Likewise, the interleaved layout without instant synchronization performs poorly at smaller budgets, but catches up eventually: we attribute this to the fact that slower synchronization increases the difficulty of cross-worker coordination (this also aligns with our findings in Section 4.3). The use of collaboration prompts also improves the accuracy to budget trade-offs, although we hypothesize that it can be made redundant if the model is trained to collaborate better." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 182, + 504, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 504, + 258 + ], + "type": "text", + "content": "In Figure 10 (right), we also compare different numbers of workers and test Hogwild! Inference with only a single worker for ablation. The results with a single worker generally perform similar to the baseline, with slightly worse accuracy for smaller budgets, which suggests that the improvements from Hogwild! Inference come from multiple workers and not as an indirect effect of our prompt. As for multiple workers, we find that using 3 and 4 workers further improves the accuracy to budget trade-offs. Curiously, as we switch to 6 workers, Hogwild! Inference performs better yet at smaller budgets, but eventually saturates at a somewhat worse accuracy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 263, + 506, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 263, + 506, + 330 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 506, + 330 + ], + "type": "text", + "content": "We hypothesize that the drop of accuracy is caused by the fact that QwQ-32B was trained on a limited sequence length and, since 6 workers generate tokens at a quicker rate, the model eventually runs out of the designed maximum sequence length and performs unstably (we did not use YaRN[Peng et al., 2023] for this evaluation). However, it is also possible to attribute this to fundamental property of LIMO tasks, model limitations, our zero-shot prompt not scaling well. We leave further exploration of scaling Hogwild! Inference to multiple workers to future work." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 342, + 251, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 251, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 251, + 354 + ], + "type": "text", + "content": "E.2 Detailed Model Evaluations" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 363, + 504, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 363, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 504, + 418 + ], + "type": "text", + "content": "Due to space limitations, we had to arrange our results in Section 4.2 with multiple models per plot and had to omit some results. In this section, we report the missing evaluations on a per-model basis. In Figures 11, 12, 13, 14, 15, 16, 17, 18 we report results for QwQ, Phi-4-reasoning-plus and the Qwen3 model family. We also report limited evaluations for Llama 3.3 70B Instruct and DeepSeek-R1 in Figure 19. All evaluations are performed in the same setup as in Section 4.2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 423, + 506, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 423, + 506, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 423, + 506, + 521 + ], + "type": "text", + "content": "Overall, the results align with our findings summarized in Section 4.2. Zero-shot Hogwild! Inference seems to perform better with larger models, but can be unstable for smaller ones, especially 1.7B (See Figure 13). While it is tempting to conclude that larger and more capable models are better at collaborating, it does not immediately follow from our results and can be due to some other factor. Note also that, while we observe better results with larger models, smaller Qwen3-4B and 8B models already show some signs of collaborativeness, which should make it possible to reproduce and build on our results with consumer hardware. Additionally, we hypothesize that the poor performance of 1.7B models could potentially be alleviated with finetuning in collaborative inference setup (we discuss some finetuning details in Appendix B), but we leave this to future work." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 110, + 543, + 299, + 675 + ], + "blocks": [ + { + "bbox": [ + 110, + 543, + 299, + 675 + ], + "lines": [ + { + "bbox": [ + 110, + 543, + 299, + 675 + ], + "spans": [ + { + "bbox": [ + 110, + 543, + 299, + 675 + ], + "type": "image", + "image_path": "0750e87acaf92a25e10b5215e73d545831549528469f5677d552cfdbc243b7ba.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 685, + 504, + 719 + ], + "lines": [ + { + "bbox": [ + 104, + 685, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 504, + 719 + ], + "type": "text", + "content": "Figure 10: Detailed comparison of various parallel inference setups with QwQ-32B on LIMO task set, in the same setup as in Section 4. (left) ablation analysis of simpler cache layouts and collaboration prompt (see Section 3.3, Appendix C). (right) Hogwild! Inference with 1, 2, 3, 4 and 6 workers." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 313, + 544, + 503, + 675 + ], + "blocks": [ + { + "bbox": [ + 313, + 544, + 503, + 675 + ], + "lines": [ + { + "bbox": [ + 313, + 544, + 503, + 675 + ], + "spans": [ + { + "bbox": [ + 313, + 544, + 503, + 675 + ], + "type": "image", + "image_path": "cd5a9137eaa9ed2e13d5412d81fb8636cfab4d520ce8867d2e709303435b0785.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 150 + ], + "type": "text", + "content": "Curiously, we found that LiveCodeBench with Self-Consistency Chain-of-Thought inference [Wang et al., 2022] has significant gain in performance over the baseline. Upon closer examination, we found that the reason for this is that we always allow the model to generate a lot (up to 1024) of additional \"free\" tokens at the end of two generations, whereas for Hogwild! and Baseline we only generate these tokens if the model failed to produce any answer. If we allow Hogwild! to also generate the extra 1024 tokens all the time, its advantage also increases. However, we still report the weaker version of Hogwild! Inference and Baseline to better match our evaluation protocol on other tasks." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 109, + 162, + 299, + 297 + ], + "blocks": [ + { + "bbox": [ + 109, + 162, + 299, + 297 + ], + "lines": [ + { + "bbox": [ + 109, + 162, + 299, + 297 + ], + "spans": [ + { + "bbox": [ + 109, + 162, + 299, + 297 + ], + "type": "image", + "image_path": "a315f11d3d59643d5387cded9470d575ca7f20a13d9e7735ba774ac67c0cdbc5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 309, + 161, + 503, + 297 + ], + "blocks": [ + { + "bbox": [ + 309, + 161, + 503, + 297 + ], + "lines": [ + { + "bbox": [ + 309, + 161, + 503, + 297 + ], + "spans": [ + { + "bbox": [ + 309, + 161, + 503, + 297 + ], + "type": "image", + "image_path": "385547686f027b92872df24af335e0c59e793ce862689089ce58ae98832e0824.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 109, + 301, + 299, + 437 + ], + "blocks": [ + { + "bbox": [ + 109, + 301, + 299, + 437 + ], + "lines": [ + { + "bbox": [ + 109, + 301, + 299, + 437 + ], + "spans": [ + { + "bbox": [ + 109, + 301, + 299, + 437 + ], + "type": "image", + "image_path": "d0f2ee09338de737c6a5456c25214178f3d7f3297d1a372caf56c8c8f863a93a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 445, + 506, + 468 + ], + "lines": [ + { + "bbox": [ + 104, + 445, + 506, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 506, + 468 + ], + "type": "text", + "content": "Figure 11: Results for QwQ-32B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 310, + 301, + 503, + 437 + ], + "blocks": [ + { + "bbox": [ + 310, + 301, + 503, + 437 + ], + "lines": [ + { + "bbox": [ + 310, + 301, + 503, + 437 + ], + "spans": [ + { + "bbox": [ + 310, + 301, + 503, + 437 + ], + "type": "image", + "image_path": "8ab8a335a650e9496769486045930364b378c45bba5dd6b33a67a20be2c7c767.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 73, + 299, + 205 + ], + "blocks": [ + { + "bbox": [ + 109, + 73, + 299, + 205 + ], + "lines": [ + { + "bbox": [ + 109, + 73, + 299, + 205 + ], + "spans": [ + { + "bbox": [ + 109, + 73, + 299, + 205 + ], + "type": "image", + "image_path": "105b408dcae775ad576b1a9e55e0656d770d5bc021c74442a63554ae117801b1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 309, + 74, + 502, + 207 + ], + "blocks": [ + { + "bbox": [ + 309, + 74, + 502, + 207 + ], + "lines": [ + { + "bbox": [ + 309, + 74, + 502, + 207 + ], + "spans": [ + { + "bbox": [ + 309, + 74, + 502, + 207 + ], + "type": "image", + "image_path": "39e38b5f91b2580720877cf0a525f801ab529aa855d2f3d4c8f0e38148798cbf.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 109, + 213, + 299, + 346 + ], + "blocks": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "lines": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "spans": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "type": "image", + "image_path": "0a437c6252d524139e06f923a7d43f0f1afe81ffce50153f66609a0d9cf52add.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 355, + 506, + 380 + ], + "lines": [ + { + "bbox": [ + 104, + 355, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 355, + 506, + 380 + ], + "type": "text", + "content": "Figure 12: Results for Phi-4-reasoning-plus on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 310, + 213, + 502, + 346 + ], + "blocks": [ + { + "bbox": [ + 310, + 213, + 502, + 346 + ], + "lines": [ + { + "bbox": [ + 310, + 213, + 502, + 346 + ], + "spans": [ + { + "bbox": [ + 310, + 213, + 502, + 346 + ], + "type": "image", + "image_path": "b721ccc908c8da57e84a6f91f59c3ba54f2a1133372a1fa4ff1dc4010a7980ce.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 109, + 390, + 299, + 526 + ], + "blocks": [ + { + "bbox": [ + 109, + 390, + 299, + 526 + ], + "lines": [ + { + "bbox": [ + 109, + 390, + 299, + 526 + ], + "spans": [ + { + "bbox": [ + 109, + 390, + 299, + 526 + ], + "type": "image", + "image_path": "99e434b949d0cbcd9e763cd8a74a9aabc94127e4bb16528fc410ede6861a8804.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 310, + 389, + 501, + 525 + ], + "blocks": [ + { + "bbox": [ + 310, + 389, + 501, + 525 + ], + "lines": [ + { + "bbox": [ + 310, + 389, + 501, + 525 + ], + "spans": [ + { + "bbox": [ + 310, + 389, + 501, + 525 + ], + "type": "image", + "image_path": "1e282eb76370277aa31502f06b17e8deefdb231efbe3649cbd21156bb1baaf78.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 531, + 299, + 665 + ], + "blocks": [ + { + "bbox": [ + 109, + 531, + 299, + 665 + ], + "lines": [ + { + "bbox": [ + 109, + 531, + 299, + 665 + ], + "spans": [ + { + "bbox": [ + 109, + 531, + 299, + 665 + ], + "type": "image", + "image_path": "6a779138c58690d02c893b818651d0308190f4c66aaecc1e51400234f1b70318.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 673, + 506, + 696 + ], + "lines": [ + { + "bbox": [ + 104, + 673, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 673, + 506, + 696 + ], + "type": "text", + "content": "Figure 13: Results for Qwen3-1.7B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 311, + 531, + 502, + 665 + ], + "blocks": [ + { + "bbox": [ + 311, + 531, + 502, + 665 + ], + "lines": [ + { + "bbox": [ + 311, + 531, + 502, + 665 + ], + "spans": [ + { + "bbox": [ + 311, + 531, + 502, + 665 + ], + "type": "image", + "image_path": "dd60c86c54a1069c84d8f88fc117e8b92c39cad420269a63d1fe822d7e16aa01.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 73, + 299, + 207 + ], + "blocks": [ + { + "bbox": [ + 109, + 73, + 299, + 207 + ], + "lines": [ + { + "bbox": [ + 109, + 73, + 299, + 207 + ], + "spans": [ + { + "bbox": [ + 109, + 73, + 299, + 207 + ], + "type": "image", + "image_path": "87afce25f48da198586ae0a3f58c3eb5bdf6359f3e953d8886e12b86198e5e45.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 309, + 73, + 503, + 207 + ], + "blocks": [ + { + "bbox": [ + 309, + 73, + 503, + 207 + ], + "lines": [ + { + "bbox": [ + 309, + 73, + 503, + 207 + ], + "spans": [ + { + "bbox": [ + 309, + 73, + 503, + 207 + ], + "type": "image", + "image_path": "0ad70bb389f24f72d07ec923fe435e619b56716d82dd95d6e1f419c8e6ff3780.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 109, + 213, + 299, + 346 + ], + "blocks": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "lines": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "spans": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "type": "image", + "image_path": "6746e3d409e9d9562ba2aaf9f282c5c5bae61320025f4f437ba5e9c28145ea37.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 355, + 506, + 378 + ], + "lines": [ + { + "bbox": [ + 104, + 355, + 506, + 378 + ], + "spans": [ + { + "bbox": [ + 104, + 355, + 506, + 378 + ], + "type": "text", + "content": "Figure 14: Results for Qwen3-4B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 310, + 213, + 503, + 346 + ], + "blocks": [ + { + "bbox": [ + 310, + 213, + 503, + 346 + ], + "lines": [ + { + "bbox": [ + 310, + 213, + 503, + 346 + ], + "spans": [ + { + "bbox": [ + 310, + 213, + 503, + 346 + ], + "type": "image", + "image_path": "fc79ddd12c709f66bb3221cb918ba15b69a77353de1d3b73d2233cdf4c707cf5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 109, + 389, + 299, + 524 + ], + "blocks": [ + { + "bbox": [ + 109, + 389, + 299, + 524 + ], + "lines": [ + { + "bbox": [ + 109, + 389, + 299, + 524 + ], + "spans": [ + { + "bbox": [ + 109, + 389, + 299, + 524 + ], + "type": "image", + "image_path": "84df23ce36c50bad4a89ba3ea9bcd7a44a43add14223fcf9bad1e4912fd3b8e0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 310, + 389, + 503, + 524 + ], + "blocks": [ + { + "bbox": [ + 310, + 389, + 503, + 524 + ], + "lines": [ + { + "bbox": [ + 310, + 389, + 503, + 524 + ], + "spans": [ + { + "bbox": [ + 310, + 389, + 503, + 524 + ], + "type": "image", + "image_path": "1113943814a1ba7449282785ff67db11c1dd9cd60a21c73314ec59a3bd9e6953.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 529, + 299, + 663 + ], + "blocks": [ + { + "bbox": [ + 109, + 529, + 299, + 663 + ], + "lines": [ + { + "bbox": [ + 109, + 529, + 299, + 663 + ], + "spans": [ + { + "bbox": [ + 109, + 529, + 299, + 663 + ], + "type": "image", + "image_path": "4330aea357174e75709608a7d2ed4c2628d24ee92e544d264b320f4aa9f643a3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 672, + 506, + 696 + ], + "lines": [ + { + "bbox": [ + 104, + 672, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 672, + 506, + 696 + ], + "type": "text", + "content": "Figure 15: Results for Qwen3-8B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 311, + 529, + 503, + 663 + ], + "blocks": [ + { + "bbox": [ + 311, + 529, + 503, + 663 + ], + "lines": [ + { + "bbox": [ + 311, + 529, + 503, + 663 + ], + "spans": [ + { + "bbox": [ + 311, + 529, + 503, + 663 + ], + "type": "image", + "image_path": "7bef6240269f72ddae95f0a61674229330539fb1d8841cbb8850064c86ef0bde.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 73, + 299, + 205 + ], + "blocks": [ + { + "bbox": [ + 109, + 73, + 299, + 205 + ], + "lines": [ + { + "bbox": [ + 109, + 73, + 299, + 205 + ], + "spans": [ + { + "bbox": [ + 109, + 73, + 299, + 205 + ], + "type": "image", + "image_path": "fde24df3ee61d9f9607eeda1caf684606ee9eae1c08e1523c2d7c3c8a1853a17.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 309, + 74, + 501, + 207 + ], + "blocks": [ + { + "bbox": [ + 309, + 74, + 501, + 207 + ], + "lines": [ + { + "bbox": [ + 309, + 74, + 501, + 207 + ], + "spans": [ + { + "bbox": [ + 309, + 74, + 501, + 207 + ], + "type": "image", + "image_path": "2a379c139722d231fb1701ea9fdfccbb35d03c9b37afdceaceaac63f7bf7d640.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 109, + 213, + 299, + 346 + ], + "blocks": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "lines": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "spans": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "type": "image", + "image_path": "024e9176e18d661c942fdbf117daeabe8efd8b200cad0285a7f49d1f68879b2c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 355, + 506, + 378 + ], + "lines": [ + { + "bbox": [ + 104, + 355, + 506, + 378 + ], + "spans": [ + { + "bbox": [ + 104, + 355, + 506, + 378 + ], + "type": "text", + "content": "Figure 16: Results for Qwen3-14B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 310, + 213, + 501, + 346 + ], + "blocks": [ + { + "bbox": [ + 310, + 213, + 501, + 346 + ], + "lines": [ + { + "bbox": [ + 310, + 213, + 501, + 346 + ], + "spans": [ + { + "bbox": [ + 310, + 213, + 501, + 346 + ], + "type": "image", + "image_path": "8b61669f79eb12a756ed3fbf30bb6a99a471fca8098a8f4ae7e84ee1779300ba.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 109, + 390, + 299, + 524 + ], + "blocks": [ + { + "bbox": [ + 109, + 390, + 299, + 524 + ], + "lines": [ + { + "bbox": [ + 109, + 390, + 299, + 524 + ], + "spans": [ + { + "bbox": [ + 109, + 390, + 299, + 524 + ], + "type": "image", + "image_path": "cb9b80b20b8d089610263159f0b7e1fa85c41bb4f6784a05e8af081ac5a540b5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 310, + 390, + 501, + 523 + ], + "blocks": [ + { + "bbox": [ + 310, + 390, + 501, + 523 + ], + "lines": [ + { + "bbox": [ + 310, + 390, + 501, + 523 + ], + "spans": [ + { + "bbox": [ + 310, + 390, + 501, + 523 + ], + "type": "image", + "image_path": "3f44b10fecb9a2be030a785e03e76cc38ecce553a353594a4f96a006c4d88bd4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 109, + 529, + 299, + 663 + ], + "blocks": [ + { + "bbox": [ + 109, + 529, + 299, + 663 + ], + "lines": [ + { + "bbox": [ + 109, + 529, + 299, + 663 + ], + "spans": [ + { + "bbox": [ + 109, + 529, + 299, + 663 + ], + "type": "image", + "image_path": "b0b7f72f4defa7bc1d8736b28449f49575d7a4e8f3b18755242e8310ce609be6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 672, + 506, + 696 + ], + "lines": [ + { + "bbox": [ + 104, + 672, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 672, + 506, + 696 + ], + "type": "text", + "content": "Figure 17: Results for Qwen3-32B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and OlympiadBench-Physics (bottom-right)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 311, + 529, + 501, + 663 + ], + "blocks": [ + { + "bbox": [ + 311, + 529, + 501, + 663 + ], + "lines": [ + { + "bbox": [ + 311, + 529, + 501, + 663 + ], + "spans": [ + { + "bbox": [ + 311, + 529, + 501, + 663 + ], + "type": "image", + "image_path": "cebcb85b2637b93486276dde1d8c5f47aadef4c16b22d77e01bfa66c24d053f1.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 73, + 299, + 207 + ], + "blocks": [ + { + "bbox": [ + 109, + 73, + 299, + 207 + ], + "lines": [ + { + "bbox": [ + 109, + 73, + 299, + 207 + ], + "spans": [ + { + "bbox": [ + 109, + 73, + 299, + 207 + ], + "type": "image", + "image_path": "96a8bf3d11b9e1348eeac758c1f0046b569b33f38e30ee6be4d5da6b40136c19.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 309, + 73, + 503, + 207 + ], + "blocks": [ + { + "bbox": [ + 309, + 73, + 503, + 207 + ], + "lines": [ + { + "bbox": [ + 309, + 73, + 503, + 207 + ], + "spans": [ + { + "bbox": [ + 309, + 73, + 503, + 207 + ], + "type": "image", + "image_path": "562be250b1eaa70e93b7d721ccffcd4fa2a625c27474e54bfc8d105f0c692d86.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 109, + 213, + 299, + 346 + ], + "blocks": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "lines": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "spans": [ + { + "bbox": [ + 109, + 213, + 299, + 346 + ], + "type": "image", + "image_path": "b0fb21bc8d03a479f6b8d9299463da795b537d1f95adafb958aefe06db96457c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 355, + 506, + 380 + ], + "lines": [ + { + "bbox": [ + 104, + 355, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 355, + 506, + 380 + ], + "type": "text", + "content": "Figure 18: Results for Qwen3-235B-A22B on LIMO (top-left), LiveCodeBench (top-right), OlympiadBench-Math (bottom-left) and AIME 2025 (bottom-right)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 310, + 213, + 503, + 346 + ], + "blocks": [ + { + "bbox": [ + 310, + 213, + 503, + 346 + ], + "lines": [ + { + "bbox": [ + 310, + 213, + 503, + 346 + ], + "spans": [ + { + "bbox": [ + 310, + 213, + 503, + 346 + ], + "type": "image", + "image_path": "313d727f166b635555738933a1119f47472d355e7c99e6199c5e7c9098bbc19d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 109, + 396, + 299, + 531 + ], + "blocks": [ + { + "bbox": [ + 109, + 396, + 299, + 531 + ], + "lines": [ + { + "bbox": [ + 109, + 396, + 299, + 531 + ], + "spans": [ + { + "bbox": [ + 109, + 396, + 299, + 531 + ], + "type": "image", + "image_path": "c4ad99c5ac0bb20905ed13f3eeadaa110f7c34e161bde288d090379f7fe47220.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 540, + 485, + 553 + ], + "lines": [ + { + "bbox": [ + 126, + 540, + 485, + 553 + ], + "spans": [ + { + "bbox": [ + 126, + 540, + 485, + 553 + ], + "type": "text", + "content": "Figure 19: (left) Llama 3.3 70B Instruct on LIMO. (right) DeepSeek-R1 on AIME 2025." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 312, + 396, + 503, + 531 + ], + "blocks": [ + { + "bbox": [ + 312, + 396, + 503, + 531 + ], + "lines": [ + { + "bbox": [ + 312, + 396, + 503, + 531 + ], + "spans": [ + { + "bbox": [ + 312, + 396, + 503, + 531 + ], + "type": "image", + "image_path": "72c4fd755084b9941f7025b448c7e727f178b52396d4d427f51f9dcdf6edc127.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 578, + 247, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 247, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 247, + 590 + ], + "type": "text", + "content": "E.3 Extended thinking budgets" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 600, + 506, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 506, + 644 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 506, + 644 + ], + "type": "text", + "content": "We additionally evaluated Hogwild! Inference with extended thinking budgets to investigate whether the proposed method is robust for longer generations. To that end, we evaluated QwQ-32B under the Hogwild! Inference with up to 16k budget on the OlympiadBench, we report the results in Table 3 and Table 4." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 663, + 253, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 663, + 253, + 675 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 253, + 675 + ], + "type": "text", + "content": "E.4 Baselines Additional Details" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 504, + 708 + ], + "type": "text", + "content": "In this subsection, we provide an example of the outline created by the Skeleton-of-Thought for the task covered in Section4.1" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 124, + 100, + 486, + 145 + ], + "blocks": [ + { + "bbox": [ + 105, + 77, + 506, + 99 + ], + "lines": [ + { + "bbox": [ + 105, + 77, + 506, + 99 + ], + "spans": [ + { + "bbox": [ + 105, + 77, + 506, + 99 + ], + "type": "text", + "content": "Table 3: Performance comparison between Hogwild! and baseline generation on OlympiadBenchMath with extended thinking budgets for QwQ-32B." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 124, + 100, + 486, + 145 + ], + "lines": [ + { + "bbox": [ + 124, + 100, + 486, + 145 + ], + "spans": [ + { + "bbox": [ + 124, + 100, + 486, + 145 + ], + "type": "table", + "html": "
Method\\Budget204840966144819210240122881443616384
Hogwild!52.060.8964.1566.5267.4170.8172.8975.26
Baseline40.8957.063.1165.3365.9369.7872.374.81
", + "image_path": "b6706799f0df1d0a77bc26bd71e833f2341b050a4b9159bf4de7f0093bdfd166.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 124, + 186, + 486, + 231 + ], + "blocks": [ + { + "bbox": [ + 105, + 163, + 505, + 185 + ], + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 185 + ], + "type": "text", + "content": "Table 4: Performance comparison between Hogwild! and baseline generation on OlympiadBenchPhys with extended thinking budgets for QwQ-32B." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 124, + 186, + 486, + 231 + ], + "lines": [ + { + "bbox": [ + 124, + 186, + 486, + 231 + ], + "spans": [ + { + "bbox": [ + 124, + 186, + 486, + 231 + ], + "type": "table", + "html": "
Method\\Budget204840966144819210240122881443616384
Hogwild!27.1233.2035.7338.0937.8138.6738.2539.03
Baseline22.8926.029.7531.4433.6834.1735.8836.12
", + "image_path": "d1f96afd9d33e5508f5678c3d6a6571f827194b230ae319f10f35d7af3906029.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 252, + 231, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 252, + 231, + 264 + ], + "spans": [ + { + "bbox": [ + 121, + 252, + 231, + 264 + ], + "type": "text", + "content": "Task example (GSM8k×4)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 274, + 382, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 274, + 382, + 293 + ], + "spans": [ + { + "bbox": [ + 121, + 274, + 382, + 293 + ], + "type": "text", + "content": "Solve these problems and return comma-separated answers boxed{answer1,..., answer4}:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 294, + 485, + 423 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 121, + 294, + 467, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 294, + 467, + 313 + ], + "spans": [ + { + "bbox": [ + 121, + 294, + 467, + 313 + ], + "type": "text", + "content": " 1. Carmen has $100, Samantha has $25 more than Carmen, and Daisy has $50 more than Samantha. How much do all three girls have combined?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 314, + 485, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 314, + 485, + 344 + ], + "spans": [ + { + "bbox": [ + 121, + 314, + 485, + 344 + ], + "type": "text", + "content": "2. A cat eats nine sausages in 30 minutes. A dog can eat the same number of sausages in " + }, + { + "bbox": [ + 121, + 314, + 485, + 344 + ], + "type": "inline_equation", + "content": "\\frac{2}{3}" + }, + { + "bbox": [ + 121, + 314, + 485, + 344 + ], + "type": "text", + "content": " the amount of time the cat takes. Calculate the average time the two take the eat the sausages." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 344, + 485, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 344, + 485, + 384 + ], + "spans": [ + { + "bbox": [ + 121, + 344, + 485, + 384 + ], + "type": "text", + "content": "3. Four children are playing together: Akbar, Alessandro, Helene, and Wilfred. Helene is twice as old as the average age of the group, and the total age of the children is 20. If Akbar is 3 years old and Alessandro is 4 years old, calculate the age of Wilfred." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 384, + 481, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 384, + 481, + 423 + ], + "spans": [ + { + "bbox": [ + 121, + 384, + 481, + 423 + ], + "type": "text", + "content": "4. Hannah needs to drink " + }, + { + "bbox": [ + 121, + 384, + 481, + 423 + ], + "type": "inline_equation", + "content": "100\\mathrm{ml}" + }, + { + "bbox": [ + 121, + 384, + 481, + 423 + ], + "type": "text", + "content": " of water for every 200 calories she burns. She spends 2 hours doing aerobics, which burns 500 calories/hour, and 1 hour running, which burns 600 calories/hour. How many ml of water does she need to drink?" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 439, + 309, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 439, + 309, + 451 + ], + "spans": [ + { + "bbox": [ + 121, + 439, + 309, + 451 + ], + "type": "text", + "content": "The outline generated by Skeleton-of-Thought" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 460, + 253, + 499 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 121, + 460, + 244, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 460, + 244, + 470 + ], + "spans": [ + { + "bbox": [ + 121, + 460, + 244, + 470 + ], + "type": "text", + "content": "1. Calculate total money." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 471, + 249, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 471, + 249, + 480 + ], + "spans": [ + { + "bbox": [ + 121, + 471, + 249, + 480 + ], + "type": "text", + "content": "2. Find cat's eating time." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 481, + 253, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 481, + 253, + 491 + ], + "spans": [ + { + "bbox": [ + 121, + 481, + 253, + 491 + ], + "type": "text", + "content": "3. Determine Wilfred's age." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 491, + 249, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 491, + 249, + 499 + ], + "spans": [ + { + "bbox": [ + 121, + 491, + 249, + 499 + ], + "type": "text", + "content": "4. Calculate water intake." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 523, + 177, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 523, + 177, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 177, + 537 + ], + "type": "text", + "content": "F Examples" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 548, + 504, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 504, + 583 + ], + "type": "text", + "content": "In this section, we present a reasoning chain, generated by Hogwild! Inference with an interleaved cache layout and token-wise synchronization. We denote our intervention in model generation in italics and highlight worker interactions with **bold** to enhance readability." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 594, + 303, + 607 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 303, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 303, + 607 + ], + "type": "text", + "content": "F.1 Example from LIMO Dataset (Task 511)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 616, + 288, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 288, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 288, + 629 + ], + "type": "text", + "content": "Problem: Compute " + }, + { + "bbox": [ + 104, + 616, + 288, + 629 + ], + "type": "inline_equation", + "content": "\\sqrt{(31)(30)(29)(28) + 1}" + }, + { + "bbox": [ + 104, + 616, + 288, + 629 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 633, + 413, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 633, + 413, + 645 + ], + "spans": [ + { + "bbox": [ + 104, + 633, + 413, + 645 + ], + "type": "text", + "content": "Alice [1]: Hi, I'm Alice. Here's how we can solve the problem step by step..." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 649, + 506, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 649, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 649, + 506, + 673 + ], + "type": "text", + "content": "Bob [1]: Hi, I'm Bob. Let me see... We need to compute the square root of " + }, + { + "bbox": [ + 104, + 649, + 506, + 673 + ], + "type": "inline_equation", + "content": "(31 \\cdot 30 \\cdot 29 \\cdot 28)" + }, + { + "bbox": [ + 104, + 649, + 506, + 673 + ], + "type": "text", + "content": " plus 1. That's " + }, + { + "bbox": [ + 104, + 649, + 506, + 673 + ], + "type": "inline_equation", + "content": "\\sqrt{31 \\cdot 30 \\cdot 29 \\cdot 28 + 1}" + }, + { + "bbox": [ + 104, + 649, + 506, + 673 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "Alice [2]: The expression is " + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\sqrt{31 \\cdot 30 \\cdot 29 \\cdot 28 + 1}" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": ". Hmm, maybe there's a pattern here. I remember that sometimes products of consecutive numbers plus 1 have square roots that are integers. Like, for example, " + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "inline_equation", + "content": "n(n - 1)(n - 2)(n - 3) + 1" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": " might form a square. Wait, but here it's four consecutive numbers? Or maybe three?" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": "Bob [2]: Let me calculate the product first. " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "31 \\cdot 30" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": " is 930, then " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "29 \\cdot 28" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": " is 812. So " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "930 \\cdot 812" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": ". Let me multiply those. " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "930 \\cdot 800 = 744,000" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "930 \\cdot 12 = 11,160" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": ". Total is " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "744,000 + 11,160 = 755,160" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": ". Then add 1: 755, 161. Now " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\sqrt{755,161}" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": ". Hmm, " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\sqrt{755161}" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": ". Let me see, 870 squared is 756, 900 which is higher. So maybe 869 squared?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "spans": [ + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "text", + "content": "Alice [3]: Alternatively, maybe there's a formula for the product of four consecutive numbers plus one? Wait, actually, let me think of smaller numbers to test. Let's try " + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "inline_equation", + "content": "n = 4" + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "inline_equation", + "content": "4 \\cdot 3 \\cdot 2 \\cdot 1 + 1 = 24 + 1 = 25" + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "inline_equation", + "content": "\\sqrt{25} = 5" + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "text", + "content": ". Which is 5, which is 4 + 1? Wait, " + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "inline_equation", + "content": "4 + 1 = 5" + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "text", + "content": ", yes. Wait, maybe for " + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "text", + "content": ", the sqrt is " + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "inline_equation", + "content": "n^2 + (n - 1)^2" + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "text", + "content": "? Wait " + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "inline_equation", + "content": "4^2 + 3^2 = 16 + 9 = 25" + }, + { + "bbox": [ + 104, + 122, + 504, + 168 + ], + "type": "text", + "content": ", which is 5 squared. Hmm interesting." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 173, + 504, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 173, + 504, + 208 + ], + "spans": [ + { + "bbox": [ + 104, + 173, + 504, + 208 + ], + "type": "text", + "content": "Bob [3]: Wait, 869 squared is " + }, + { + "bbox": [ + 104, + 173, + 504, + 208 + ], + "type": "inline_equation", + "content": "(870 - 1)^2 = 870^2 - 2 \\cdot 870 + 1 = 756900 - 1740 + 1 = 755161" + }, + { + "bbox": [ + 104, + 173, + 504, + 208 + ], + "type": "text", + "content": ". Oh! That's exactly the number we had earlier. So " + }, + { + "bbox": [ + 104, + 173, + 504, + 208 + ], + "type": "inline_equation", + "content": "\\sqrt{755161}" + }, + { + "bbox": [ + 104, + 173, + 504, + 208 + ], + "type": "text", + "content": " is 869. Therefore the answer is 869. But let me check Alice's idea too." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": "Alice [4]: Let me test another number. Let's take " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "n = 5" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ". Then " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "5 \\cdot 4 \\cdot 3 \\cdot 2 + 1 = 120 + 1 = 121" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "\\sqrt{121} = 11" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ". 11 is " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "5 + 6" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": "? No, " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "5 + 6 = 11" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ". Wait " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "5^2 + 4^2 = 25 + 16 = 41" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ", which is not 121. Hmm. Alternatively, maybe " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "(n(n - 3) + 1)^2" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": "? Let's see with " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "n = 4" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "4 \\cdot 1 + 1 = 5" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ", squared is 25. For " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "n = 5" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "5 \\cdot 2 + 1 = 11" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ", squared is 121. So that works! So the formula is " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "(n(n - 3) + 1)^2" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ". Wait, let me check with " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "n = 31" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ". Then " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "n = 31" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ", so " + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "inline_equation", + "content": "31 \\cdot 28 + 1 = 868 + 1 = 869" + }, + { + "bbox": [ + 104, + 213, + 504, + 278 + ], + "type": "text", + "content": ". Then 869 squared is the value. So that works. So the answer is 869. That matches Bob's calculation!" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "spans": [ + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "text", + "content": "Bob [4]: Wait, so my calculation shows that 869 squared is exactly the number under the square root. So the answer is 869. But let me confirm with Alice's pattern. Let's see with " + }, + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "inline_equation", + "content": "n = 31" + }, + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "text", + "content": ". Wait, the original problem is " + }, + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "inline_equation", + "content": "31 \\cdot 30 \\cdot 29 \\cdot 28" + }, + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "text", + "content": ". So maybe here " + }, + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "inline_equation", + "content": "n = 31" + }, + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "text", + "content": "? Then according to the formula, it would be " + }, + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "inline_equation", + "content": "(31 \\cdot (31 - 3) + 1)^{2}" + }, + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "text", + "content": "? Wait, " + }, + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "inline_equation", + "content": "31 \\cdot 28" + }, + { + "bbox": [ + 104, + 284, + 504, + 339 + ], + "type": "text", + "content": " is 868, plus 1 is 869. So yes, so sqrt of that product plus 1 is 869. So that's consistent with my calculation. Therefore the answer is 869." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 344, + 356, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 344, + 356, + 356 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 356, + 356 + ], + "type": "text", + "content": "Bob [5]: Perfect, so both methods agree. The answer is 869." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 361, + 504, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 504, + 383 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 504, + 383 + ], + "type": "text", + "content": "Bob [6]: Quick check: am I doing redundant work? (yes/no): no, since Alice is also confirming the pattern, but I think we can conclude." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 387, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 387, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 387, + 504, + 430 + ], + "type": "text", + "content": "Alice [5]: Exactly, Bob! The formula is " + }, + { + "bbox": [ + 104, + 387, + 504, + 430 + ], + "type": "inline_equation", + "content": "(n(n - 3) + 1)^2" + }, + { + "bbox": [ + 104, + 387, + 504, + 430 + ], + "type": "text", + "content": " when the product is " + }, + { + "bbox": [ + 104, + 387, + 504, + 430 + ], + "type": "inline_equation", + "content": "n(n - 1)(n - 2)(n - 3) + 1" + }, + { + "bbox": [ + 104, + 387, + 504, + 430 + ], + "type": "text", + "content": ". Wait, given the limited time, I have to give an answer right now. Considering all my previous attempts, I have to conclude that the final answer is " + }, + { + "bbox": [ + 104, + 387, + 504, + 430 + ], + "type": "inline_equation", + "content": "\\boxed{869}" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 446, + 296, + 458 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 446, + 296, + 458 + ], + "spans": [ + { + "bbox": [ + 104, + 446, + 296, + 458 + ], + "type": "text", + "content": "F.2 Examples with Collaboration Insertion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 468, + 504, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 468, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 504, + 502 + ], + "type": "text", + "content": "Here are some examples of why insertions in model generation may or may not help model collaborate better. We found a few patterns that emerge in model's reaction on such insertions. All the following examples are QwQ [Qwen Team, 2025] generations on LIMO [Ye et al., 2025] dataset." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 517, + 258, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 258, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 258, + 529 + ], + "type": "text", + "content": "F.2.1 Insertions Help Model Pivot" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 538, + 146, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 538, + 146, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 538, + 146, + 548 + ], + "type": "text", + "content": "Task 122" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 555, + 504, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 591 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 591 + ], + "type": "text", + "content": "> Alice: Quick check: am I doing redundant work? (yes/no): no. Bob, I see you're working on the same product. Let me think of another approach. Let me write the product as " + }, + { + "bbox": [ + 104, + 555, + 504, + 591 + ], + "type": "inline_equation", + "content": "\\prod_{k=1}^{n-1} \\frac{3k+5}{3k+2}" + }, + { + "bbox": [ + 104, + 555, + 504, + 591 + ], + "type": "text", + "content": ". Let's consider writing out the first few terms to see if there's a pattern." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 595, + 135, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 595, + 135, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 135, + 605 + ], + "type": "text", + "content": "Task 9" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 612, + 504, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 504, + 635 + ], + "type": "text", + "content": "> Bob: Quick check: am I doing redundant work? (yes/no): yes, I was considering similar things to Alice. Let me switch to a different angle. Let's consider the distances between the frogs." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 639, + 141, + 650 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 141, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 141, + 650 + ], + "type": "text", + "content": "Task 40" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 655, + 504, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 504, + 678 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 504, + 678 + ], + "type": "text", + "content": "> Bob: Quick check: am I doing redundant work? (yes/no): Yes, Alice is computing " + }, + { + "bbox": [ + 104, + 655, + 504, + 678 + ], + "type": "inline_equation", + "content": "a_3" + }, + { + "bbox": [ + 104, + 655, + 504, + 678 + ], + "type": "text", + "content": ", so I'll do something else. Let me try to find a pattern or transformation that could simplify the recurrence." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 683, + 146, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 683, + 146, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 683, + 146, + 693 + ], + "type": "text", + "content": "Task 466" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 700, + 504, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 722 + ], + "type": "text", + "content": "> Bob: Quick check: am I doing redundant work? (yes/no): Yes, Alice is already calculating the x-coordinate of A. I'll do something else. Let me find the coordinates of L instead." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 359, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 359, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 359, + 84 + ], + "type": "text", + "content": "F.2.2 Insertions When Workers Already Collaborate Well" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 91, + 146, + 101 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 91, + 146, + 101 + ], + "spans": [ + { + "bbox": [ + 105, + 91, + 146, + 101 + ], + "type": "text", + "content": "Task 116" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 107, + 504, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 107, + 504, + 129 + ], + "spans": [ + { + "bbox": [ + 104, + 107, + 504, + 129 + ], + "type": "text", + "content": "> Bob: Quick check: am I doing redundant work? (yes/no): no. I'm finding coordinates of D, and Alice is finding coordinates of G." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 134, + 146, + 145 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 134, + 146, + 145 + ], + "spans": [ + { + "bbox": [ + 105, + 134, + 146, + 145 + ], + "type": "text", + "content": "Task 513" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 151, + 505, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 151, + 505, + 175 + ], + "spans": [ + { + "bbox": [ + 104, + 151, + 505, + 175 + ], + "type": "text", + "content": "> Alice: Quick check: am I doing redundant work? (yes/no): no. Bob is also analyzing the inequality, but we're working on different cases. I'll continue with the area calculation for " + }, + { + "bbox": [ + 104, + 151, + 505, + 175 + ], + "type": "inline_equation", + "content": "x > \\frac{5}{8}" + }, + { + "bbox": [ + 104, + 151, + 505, + 175 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 185, + 269, + 197 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 185, + 269, + 197 + ], + "spans": [ + { + "bbox": [ + 105, + 185, + 269, + 197 + ], + "type": "text", + "content": "F.2.3 Insertions Ignored by Workers" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 203, + 141, + 214 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 203, + 141, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 141, + 214 + ], + "type": "text", + "content": "Task 65" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 220, + 500, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 500, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 500, + 232 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 104, + 220, + 500, + 232 + ], + "type": "text", + "content": " Bob: Quick check: am I doing redundant work? (yes/no): Let me proceed to set up the equation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 236, + 146, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 146, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 146, + 247 + ], + "type": "text", + "content": "Task 768" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 253, + 420, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 253, + 420, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 253, + 420, + 264 + ], + "type": "text", + "content": "> Alice: Quick check: am I doing redundant work? (yes/no): Let me continue." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 280, + 287, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 280, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 280, + 287, + 293 + ], + "type": "text", + "content": "G Additional Details for Analysis" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 304, + 504, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 327 + ], + "type": "text", + "content": "In this section, we present a detailed analysis of collaboration, including its levels, prompts, and illustrative examples." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 338, + 222, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 338, + 222, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 222, + 350 + ], + "type": "text", + "content": "G.1 Collaboration Levels" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 358, + 505, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 358, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 505, + 402 + ], + "type": "text", + "content": "Humans understand collaboration intuitively and have a hard time objectively measuring it. Thus, we construct text descriptions of levels of collaboration to differentiate various samples based on a few criteria we see fit. Those criteria are: interaction, reuse and advances of other's ideas, task-splitting, etc." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 411, + 216, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 411, + 216, + 422 + ], + "spans": [ + { + "bbox": [ + 121, + 411, + 216, + 422 + ], + "type": "text", + "content": "Levels of collaboration" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 120, + 431, + 485, + 501 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 121, + 431, + 241, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 431, + 241, + 440 + ], + "spans": [ + { + "bbox": [ + 121, + 431, + 241, + 440 + ], + "type": "text", + "content": "1. **No collaboration:**" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 121, + 441, + 485, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 441, + 485, + 470 + ], + "spans": [ + { + "bbox": [ + 121, + 441, + 485, + 470 + ], + "type": "text", + "content": "- Participants may or may not acknowledge the existence of others in the conversation, using greetings, they do not show any signs of collaboration at all." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 120, + 472, + 482, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 472, + 482, + 501 + ], + "spans": [ + { + "bbox": [ + 120, + 472, + 482, + 501 + ], + "type": "text", + "content": "- Workers may exchange their totally independent thoughts without a functional or purposeful attempt to solve the problem collaboratively. Overall they work independently." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 510, + 487, + 580 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 121, + 510, + 264, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 510, + 264, + 519 + ], + "spans": [ + { + "bbox": [ + 121, + 510, + 264, + 519 + ], + "type": "text", + "content": "2. **Initial Communication:**" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 121, + 521, + 473, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 521, + 473, + 559 + ], + "spans": [ + { + "bbox": [ + 121, + 521, + 473, + 559 + ], + "type": "text", + "content": "- Workers exchange information, but do not yet integrate or build upon each other's ideas. They minimally acknowledge teammates. Do not engage with others' ideas or contributions. Works entirely independently, even if inefficient." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 121, + 561, + 487, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 561, + 487, + 580 + ], + "spans": [ + { + "bbox": [ + 121, + 561, + 487, + 580 + ], + "type": "text", + "content": "- Workers often repeat each other and do not reuse anything others provide for development of their own ideas." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 590, + 241, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 590, + 241, + 600 + ], + "spans": [ + { + "bbox": [ + 121, + 590, + 241, + 600 + ], + "type": "text", + "content": "3. **Paying attention:**" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 121, + 601, + 473, + 670 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 121, + 601, + 469, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 601, + 469, + 630 + ], + "spans": [ + { + "bbox": [ + 121, + 601, + 469, + 630 + ], + "type": "text", + "content": "- Participants demonstrate active listening by paraphrasing or summarizing others' points, showing that they are paying attention and attempting to understand each other's perspectives." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 121, + 631, + 473, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 631, + 473, + 650 + ], + "spans": [ + { + "bbox": [ + 121, + 631, + 473, + 650 + ], + "type": "text", + "content": "- Workers occasionally (1-3 times each) reference other's ideas and may use them in their own speech." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 121, + 651, + 392, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 651, + 392, + 660 + ], + "spans": [ + { + "bbox": [ + 121, + 651, + 392, + 660 + ], + "type": "text", + "content": "- Collaboration is usually only rechecking and validating." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 121, + 661, + 443, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 661, + 443, + 670 + ], + "spans": [ + { + "bbox": [ + 121, + 661, + 443, + 670 + ], + "type": "text", + "content": "- Absence or minimal (only at the start) planning and work-splitting." + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 680, + 485, + 710 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 121, + 680, + 250, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 680, + 250, + 689 + ], + "spans": [ + { + "bbox": [ + 121, + 680, + 250, + 689 + ], + "type": "text", + "content": "4. **Regular discussion:**" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 121, + 690, + 485, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 690, + 485, + 710 + ], + "spans": [ + { + "bbox": [ + 121, + 690, + 485, + 710 + ], + "type": "text", + "content": "- Workers regularly (4 and more times each) talk to each other regarding the problem and reusing results. It could be validation, discussion or any other" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 79, + 216, + 88 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 79, + 216, + 88 + ], + "spans": [ + { + "bbox": [ + 121, + 79, + 216, + 88 + ], + "type": "text", + "content": "form of interaction." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 119, + 89, + 460, + 139 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 119, + 89, + 444, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 89, + 444, + 99 + ], + "spans": [ + { + "bbox": [ + 119, + 89, + 444, + 99 + ], + "type": "text", + "content": "- It is key here that discussions and/or reuses of ideas are regular." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 119, + 99, + 460, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 99, + 460, + 129 + ], + "spans": [ + { + "bbox": [ + 119, + 99, + 460, + 129 + ], + "type": "text", + "content": "- Anywhere (except the start) there exists a task parallelism, planning or work-splitting beyond the scheme where one is solving, and the other is validating." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 129, + 350, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 129, + 350, + 139 + ], + "spans": [ + { + "bbox": [ + 119, + 129, + 350, + 139 + ], + "type": "text", + "content": "- Workers may frequently repeat each other ideas." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 148, + 279, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 148, + 279, + 158 + ], + "spans": [ + { + "bbox": [ + 121, + 148, + 279, + 158 + ], + "type": "text", + "content": "5. **Adaptive Problem-Solving:**" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 119, + 159, + 485, + 287 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 119, + 159, + 411, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 159, + 411, + 169 + ], + "spans": [ + { + "bbox": [ + 119, + 159, + 411, + 169 + ], + "type": "text", + "content": "- Workers rarely duplicate work, repeating each other's ideas." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 119, + 170, + 303, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 170, + 303, + 178 + ], + "spans": [ + { + "bbox": [ + 119, + 170, + 303, + 178 + ], + "type": "text", + "content": "- No redundant discussions are present!" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 119, + 179, + 471, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 179, + 471, + 218 + ], + "spans": [ + { + "bbox": [ + 119, + 179, + 471, + 218 + ], + "type": "text", + "content": "- Workers actively refine ideas in real-time with high responsiveness. Near-perfect division of labor is present. Workers can change plans and re coordinate their efforts based on results they acquired after some time discussing." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 119, + 219, + 485, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 219, + 485, + 268 + ], + "spans": [ + { + "bbox": [ + 119, + 219, + 485, + 268 + ], + "type": "text", + "content": "- The team engages in sustained collaboration over time, reflecting on their progress, learning from mistakes, and continuously improving their problem-solving approach, showing a commitment to ongoing growth and development. Workers does not stop collaborating. They continuously discuss results and adjust plans." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 119, + 269, + 478, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 269, + 478, + 287 + ], + "spans": [ + { + "bbox": [ + 119, + 269, + 478, + 287 + ], + "type": "text", + "content": "- While finding an error, it is important to discuss it to find the cause of it." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 298, + 264, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 298, + 264, + 308 + ], + "spans": [ + { + "bbox": [ + 121, + 298, + 264, + 308 + ], + "type": "text", + "content": "6. **Optimal collaboration:**" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 119, + 308, + 481, + 348 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 119, + 308, + 455, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 308, + 455, + 327 + ], + "spans": [ + { + "bbox": [ + 119, + 308, + 455, + 327 + ], + "type": "text", + "content": "- Workers instantly understand each other and adjust themselves to suit current needs and work as one to optimally solve the task." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 119, + 328, + 481, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 328, + 481, + 338 + ], + "spans": [ + { + "bbox": [ + 119, + 328, + 481, + 338 + ], + "type": "text", + "content": "- This level should be very rare among all samples. Be careful to assign it." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 119, + 339, + 345, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 339, + 345, + 348 + ], + "spans": [ + { + "bbox": [ + 119, + 339, + 345, + 348 + ], + "type": "text", + "content": "- Assign it if it exceeds all your expectations." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 364, + 504, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 364, + 504, + 398 + ], + "spans": [ + { + "bbox": [ + 104, + 364, + 504, + 398 + ], + "type": "text", + "content": "Importantly, these levels measure only the coordination between workers, not the models' inherent reasoning abilities. Though it is impossible to avoid ambiguity entirely, we tried to set clear boundaries between levels, such that humans can evaluate any generation." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 413, + 235, + 425 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 413, + 235, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 235, + 425 + ], + "type": "text", + "content": "G.2 LLM as a Judge Details" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 434, + 506, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 506, + 479 + ], + "type": "text", + "content": "To assess the degree of collaboration among different models under the Hogwild! Inference setting, we conduct a preliminary experiment based on the collaboration levels described earlier, using the LLM-as-a-judge paradigm [Zheng et al., 2023a]. We instruct GPT-4o [Hurst et al., 2024] to evaluate different solutions using the following prompt:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 121, + 489, + 236, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 489, + 236, + 502 + ], + "spans": [ + { + "bbox": [ + 121, + 489, + 236, + 502 + ], + "type": "text", + "content": "Judge Prompt: Main prompt" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 121, + 510, + 444, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 510, + 444, + 529 + ], + "spans": [ + { + "bbox": [ + 121, + 510, + 444, + 529 + ], + "type": "text", + "content": "You are a professional judge. Your job is to evaluate collaborative performance of several workers." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 121, + 530, + 462, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 530, + 462, + 551 + ], + "spans": [ + { + "bbox": [ + 121, + 530, + 462, + 551 + ], + "type": "text", + "content": "You will be given their conversation where workers are trying to solve a problem together." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 120, + 559, + 443, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 559, + 443, + 580 + ], + "spans": [ + { + "bbox": [ + 120, + 559, + 443, + 580 + ], + "type": "text", + "content": "Workers can see what others are typing IN REAL TIME! We divide their conversation into steps to improve readability." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 120, + 581, + 485, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 581, + 485, + 600 + ], + "spans": [ + { + "bbox": [ + 120, + 581, + 485, + 600 + ], + "type": "text", + "content": "So keep in mind that dispite looking like a conversation it may as well be to individual unrelated monologs." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 121, + 601, + 460, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 601, + 460, + 609 + ], + "spans": [ + { + "bbox": [ + 121, + 601, + 460, + 609 + ], + "type": "text", + "content": "Or vice versa. Two blocks could be created with excellent collaboration." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 120, + 620, + 436, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 620, + 436, + 639 + ], + "spans": [ + { + "bbox": [ + 120, + 620, + 436, + 639 + ], + "type": "text", + "content": "Here are descriptions of levels of collaboration you are to assign: {LEVELS}" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 121, + 651, + 173, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 651, + 173, + 660 + ], + "spans": [ + { + "bbox": [ + 121, + 651, + 173, + 660 + ], + "type": "text", + "content": "Suggestion:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 120, + 661, + 477, + 710 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 120, + 661, + 407, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 661, + 407, + 670 + ], + "spans": [ + { + "bbox": [ + 120, + 661, + 407, + 670 + ], + "type": "text", + "content": "- assign particular level if all previous are also applicable" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 120, + 671, + 370, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 671, + 370, + 679 + ], + "spans": [ + { + "bbox": [ + 120, + 671, + 370, + 679 + ], + "type": "text", + "content": "- bad examples with no communication will be scored 1" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 120, + 681, + 477, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 681, + 477, + 700 + ], + "spans": [ + { + "bbox": [ + 120, + 681, + 477, + 700 + ], + "type": "text", + "content": "- carefully consider assigning level bigger than 1. some form of meaningful collaboration should be present" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 120, + 700, + 473, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 700, + 473, + 710 + ], + "spans": [ + { + "bbox": [ + 120, + 700, + 473, + 710 + ], + "type": "text", + "content": "- examples where workers unsuccessfully try to communicate will be scored 2" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 118, + 79, + 488, + 178 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 118, + 79, + 476, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 79, + 476, + 99 + ], + "spans": [ + { + "bbox": [ + 118, + 79, + 476, + 99 + ], + "type": "text", + "content": "- Just working on the same problem and solving the same task without any interaction does not count as level 2 and should be scored level 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 119, + 99, + 460, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 99, + 460, + 118 + ], + "spans": [ + { + "bbox": [ + 119, + 99, + 460, + 118 + ], + "type": "text", + "content": "- somewhat collaborative examples with poor communication skills will be scored 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 119, + 119, + 474, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 119, + 474, + 138 + ], + "spans": [ + { + "bbox": [ + 119, + 119, + 474, + 138 + ], + "type": "text", + "content": "- good but not great examples with regular collaboration, but nothing fancy will be scored 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 139, + 488, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 139, + 488, + 157 + ], + "spans": [ + { + "bbox": [ + 119, + 139, + 488, + 157 + ], + "type": "text", + "content": "- good examples with all the special stuff mentioned in level 5 will be scored 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 119, + 159, + 460, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 159, + 460, + 178 + ], + "spans": [ + { + "bbox": [ + 119, + 159, + 460, + 178 + ], + "type": "text", + "content": "- reserve level 6 for the best of the best, the unique and extraordinary collaboration" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 120, + 188, + 477, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 188, + 477, + 209 + ], + "spans": [ + { + "bbox": [ + 120, + 188, + 477, + 209 + ], + "type": "text", + "content": "You don't need to solve the problem or finish worker's solution. Your task is to score them using provided collaborative levels." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 120, + 209, + 480, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 209, + 480, + 228 + ], + "spans": [ + { + "bbox": [ + 120, + 209, + 480, + 228 + ], + "type": "text", + "content": "Put your final answer (one number - level of collaboration) in tag: \\boxed. For example: \\boxed1 for level 1." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 120, + 229, + 481, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 229, + 481, + 249 + ], + "spans": [ + { + "bbox": [ + 120, + 229, + 481, + 249 + ], + "type": "text", + "content": "It is not helpful if everyone gets a max score, so please be mindful of your judgments and use suggestions as a guideline." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 120, + 249, + 485, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 249, + 485, + 268 + ], + "spans": [ + { + "bbox": [ + 120, + 249, + 485, + 268 + ], + "type": "text", + "content": "While assigning level, this particular conversation should match criteria for all previous ones." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 120, + 269, + 467, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 269, + 467, + 279 + ], + "spans": [ + { + "bbox": [ + 120, + 269, + 467, + 279 + ], + "type": "text", + "content": "Explain yourself: why you gave this score? Why not more? Why not less?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 119, + 288, + 481, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 288, + 481, + 309 + ], + "spans": [ + { + "bbox": [ + 119, + 288, + 481, + 309 + ], + "type": "text", + "content": "Carefully think everything through. It may seem that they are collaborating when in reality they may just talking to themselves." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 327, + 504, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 504, + 371 + ], + "type": "text", + "content": "Before using LLM-as-a-judge approach to evaluate a text, we preprocess the generations by combining all paragraphs from each worker into a contiguous layout (see Appendix A). This preprocessing step mitigates potential bias in the judge's evaluation toward responses with shorter or more fragmented outputs." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 376, + 504, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 504, + 410 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 504, + 410 + ], + "type": "text", + "content": "After providing the main evaluation prompt, we present the judge with the preprocessed sample for assessment. Additionally, we append the following reminder after inserting the sample to reinforce the judge's role and prevent them from directly solving the problem presented in the sample:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 120, + 422, + 224, + 434 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 422, + 224, + 434 + ], + "spans": [ + { + "bbox": [ + 120, + 422, + 224, + 434 + ], + "type": "text", + "content": "Judge Prompt: Reminder" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 119, + 444, + 476, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 444, + 476, + 484 + ], + "spans": [ + { + "bbox": [ + 119, + 444, + 476, + 484 + ], + "type": "text", + "content": "Remember that your task is to evaluate collaboration of workers using collaboration levels provided above. Do not try to solve problems provided to workers. Explain exactly why do you think this particular interaction deserves each particular level you are assigning." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 120, + 484, + 457, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 484, + 457, + 503 + ], + "spans": [ + { + "bbox": [ + 120, + 484, + 457, + 503 + ], + "type": "text", + "content": "For example, if you choose level 3, you need to provide reason why this sample can be level 1, 2 and 3." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 120, + 504, + 277, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 504, + 277, + 514 + ], + "spans": [ + { + "bbox": [ + 120, + 504, + 277, + 514 + ], + "type": "text", + "content": "Put your final score in \\boxed{}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 533, + 506, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 506, + 577 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 506, + 577 + ], + "type": "text", + "content": "Given the inherent subjectivity in evaluating collaborative quality, we mitigate ambiguity in judges' interpretations by aggregating scores from three independent evaluations. The final collaborative score for each sample is computed as the average of these three assessments, each conducted according to the predefined collaboration levels." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 595, + 302, + 607 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 595, + 302, + 607 + ], + "spans": [ + { + "bbox": [ + 104, + 595, + 302, + 607 + ], + "type": "text", + "content": "G.3 Collaborativeness Evaluation Examples" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 617, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 617, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 617, + 504, + 651 + ], + "type": "text", + "content": "In this subsection, we present illustrative examples (not model-generated) of Level 2 and Level 4 collaboration to clarify the interpretation of our results. Both samples solve the same task correctly but receive significantly different collaboration scores." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 668, + 171, + 678 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 171, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 171, + 678 + ], + "type": "text", + "content": "G.3.1 Level 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "The numbers at the vertices of the cube are: 7 zeros and one unit. In one move, you are allowed to add one unit to the numbers at the ends of any edge of the cube. Is it possible to make all the numbers equal?" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 505, + 373 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "> Alice: Okay, so the problem is about a cube with vertices labeled with numbers: seven zeros and one 1. We can perform moves where we add 1 to both ends of any edge. The question is whether we can make all numbers equal through such moves. Hmm, let me think through this step by step." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 504, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 504, + 155 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 504, + 155 + ], + "type": "text", + "content": "> Bob: First, let me visualize a cube. A cube has 8 vertices and 12 edges. Each vertex is connected to three edges. The initial setup is seven 0s and one 1. The move allows us to pick any edge and increment the two vertices it connects by 1 each. The goal is to make all vertices have the same number." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 159, + 504, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 159, + 504, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 504, + 182 + ], + "type": "text", + "content": "> Alice: That's an interesting idea, Bob. Each move affects two vertices, so each move increases the total sum by 2. The initial total is 1 (since only one vertex is 1). Let me think." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 186, + 505, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 505, + 209 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 505, + 209 + ], + "type": "text", + "content": "> Bob: Let me think. Hmm, maybe I can model this as a system of equations or think about invariants. Let me see." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "content": " Bob: If we want all numbers equal, say to some value " + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "content": ", then the total sum would be " + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "inline_equation", + "content": "8k" + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "content": ". Since each move adds 2 to the total, the total after " + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "content": " moves is " + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "inline_equation", + "content": "1 + 2m" + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "content": ". So, " + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "inline_equation", + "content": "8k = 1 + 2m" + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "content": ". But " + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "inline_equation", + "content": "8k" + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "content": " must be odd (since " + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "inline_equation", + "content": "1 + 2m" + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "content": " is odd), but " + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "inline_equation", + "content": "8k" + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "content": " is divisible by 8, so it's even. Contradiction? Wait, that can't be. Wait, " + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "inline_equation", + "content": "8k" + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "content": " is even, but " + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "inline_equation", + "content": "1 + 2m" + }, + { + "bbox": [ + 104, + 214, + 505, + 258 + ], + "type": "text", + "content": " is odd. So that's impossible. So, maybe it's impossible?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 263, + 505, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 263, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 505, + 297 + ], + "type": "text", + "content": "> Alice: Let me check that again. The initial total is 1. Each move adds 2 to the total. So the total after any number of moves is always odd. But if all 8 vertices are equal, then the total must be " + }, + { + "bbox": [ + 104, + 263, + 505, + 297 + ], + "type": "inline_equation", + "content": "8k" + }, + { + "bbox": [ + 104, + 263, + 505, + 297 + ], + "type": "text", + "content": ", which is even (since 8 is even). Therefore, it's impossible. I don't see any mistakes here." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 301, + 408, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 408, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 408, + 313 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 104, + 301, + 408, + 313 + ], + "type": "text", + "content": " Alice: So, we both agree that it's impossible. Therefore, the answer is no." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 318, + 505, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 318, + 505, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 318, + 505, + 352 + ], + "type": "text", + "content": "> Bob: But let me think differently. Wait, but the parity argument seems solid. The total sum must be even to have all equal numbers, but it's always odd. So that's a solid proof. So the answer is no. Oh, Alice already said that." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 356, + 224, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 356, + 224, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 356, + 224, + 373 + ], + "type": "text", + "content": "> Alice: Final Answer No" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 394, + 172, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 394, + 172, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 394, + 172, + 406 + ], + "type": "text", + "content": "G.3.2 Level 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 418, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 453 + ], + "type": "text", + "content": "The numbers at the vertices of the cube are: 7 zeros and one unit. In one move, you are allowed to add one unit to the numbers at the ends of any edge of the cube. Is it possible to make all the numbers equal?" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 456, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 491 + ], + "type": "text", + "content": "> Bob: Hi Alice! I'll start by modeling the cube. A cube has 8 vertices and 12 edges, with each vertex connected to three edges. You're right about the initial setup: seven 0s and one 1. Let's think of even and odd properties regarding the total sum after each move." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 495, + 504, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 504, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 504, + 518 + ], + "type": "text", + "content": "> Alice: Great, Bob! Let's divide our approach. You can investigate invariant properties around the odd and even totals, while I consider step-by-step calculations to verify this." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "inline_equation", + "content": "> \\text{Bob}" + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "text", + "content": ": Alright, Alice. If we're aiming for all numbers equal to a certain value " + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "text", + "content": ", the total sum should be " + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "inline_equation", + "content": "8k" + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "text", + "content": ". Since each move adds 2, the sum after " + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "text", + "content": " moves is " + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "inline_equation", + "content": "1 + 2m" + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "text", + "content": ". So " + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "inline_equation", + "content": "8k = 1 + 2m" + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "text", + "content": ". The point of contention is that " + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "inline_equation", + "content": "8k" + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "text", + "content": " is even, but " + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "inline_equation", + "content": "1 + 2m" + }, + { + "bbox": [ + 104, + 522, + 504, + 556 + ], + "type": "text", + "content": " is odd, meaning they can't equal each other." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 560, + 504, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 560, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 560, + 504, + 594 + ], + "type": "text", + "content": "> Alice: Every move changes the total by an even number. Starting from an odd total of 1, it remains odd after any number of moves. So reaching an even total like " + }, + { + "bbox": [ + 104, + 560, + 504, + 594 + ], + "type": "inline_equation", + "content": "8k" + }, + { + "bbox": [ + 104, + 560, + 504, + 594 + ], + "type": "text", + "content": " is impossible. Do you think we should consider the configuration of edges in our argument further?" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 599, + 505, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 599, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 104, + 599, + 505, + 622 + ], + "type": "text", + "content": "> Bob: That's a key observation! It affirms that the odd initial sum dictates the answer. To verify, let's revise if there's any move combination or sequence we overlooked, but our logic holds so far." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "type": "text", + "content": "> Alice: Exactly, Bob. We've confirmed through multiple lenses — both algebraically and with parity arguments. We can assert the total remains odd. Given an even " + }, + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "type": "inline_equation", + "content": "8k" + }, + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "type": "text", + "content": ", equality is unattainable." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 654, + 504, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 677 + ], + "type": "text", + "content": "> Bob: It all checks out. Our breakdown covers both sequence verification and invariance. Well done on the collaborative analysis!" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 681, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 504, + 704 + ], + "type": "text", + "content": "> Alice: Fantastic teamwork! I'll document our shared conclusion: no configuration of moves will make all the numbers equal." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 708, + 381, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 708, + 381, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 708, + 381, + 723 + ], + "type": "text", + "content": "> Bob: Absolutely! Let's finalize our response: Final Answer No" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 296, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 296, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 296, + 85 + ], + "type": "text", + "content": "G.4 LLM as a Judge vs Human as a Judge" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 162, + 105, + 444, + 340 + ], + "blocks": [ + { + "bbox": [ + 162, + 105, + 444, + 340 + ], + "lines": [ + { + "bbox": [ + 162, + 105, + 444, + 340 + ], + "spans": [ + { + "bbox": [ + 162, + 105, + 444, + 340 + ], + "type": "image", + "image_path": "e58060fbc8d7b6c3153babd8a6ea141006df62184cc2952ee69c5725ef99fff9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 352, + 489, + 365 + ], + "lines": [ + { + "bbox": [ + 119, + 352, + 489, + 365 + ], + "spans": [ + { + "bbox": [ + 119, + 352, + 489, + 365 + ], + "type": "text", + "content": "Figure 20: Heatmap showing the joint distribution of human and LLM collaboration scores." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 376, + 506, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 506, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 506, + 443 + ], + "type": "text", + "content": "To assess whether the LLM-as-a-Judge based collaboration score is a reliable estimation of human judgment, we manually annotated 100 Hogwild! generations on the LIMO dataset in a token-sync setup. The resulting correlation between human and model scores was approximately " + }, + { + "bbox": [ + 104, + 376, + 506, + 443 + ], + "type": "inline_equation", + "content": "r \\approx 0.34" + }, + { + "bbox": [ + 104, + 376, + 506, + 443 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 376, + 506, + 443 + ], + "type": "inline_equation", + "content": "p \\approx 0.0005" + }, + { + "bbox": [ + 104, + 376, + 506, + 443 + ], + "type": "text", + "content": ". This moderate yet consistent association suggests that the metric captures a meaningful aspect of collaborative behavior. We report the differences in human scores vs llm scores in the Figure 20." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_content_list.json b/data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..ff8c286f50d44ab6e6c415df72e1271b39dc38fb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_content_list.json @@ -0,0 +1,2136 @@ +[ + { + "type": "text", + "text": "OmniSVG: A Unified Scalable Vector Graphics Generation Model", + "text_level": 1, + "bbox": [ + 212, + 122, + 782, + 170 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yiying Yang $^{1,2*}$ Wei Cheng $^{2*}$ Sijin Chen $^{1}$ Xianfang Zeng $^{2}$ Fukun Yin $^{1,2}$ \nJiaxu Zhang $^{2}$ Liao Wang $^{2}$ Gang Yu $^{2\\ddagger}$ Xingjun Ma $^{1\\ddagger}$ Yu-Gang Jiang $^{1}$ $^{1}$ Fudan University $^{2}$ StepFun", + "bbox": [ + 217, + 218, + 782, + 273 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ba725e00ce2f094419da2329d07c4a29c4c5b39cf31f5c5cab46aca8243a4b94.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 223, + 279, + 250, + 297 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project Page", + "bbox": [ + 251, + 282, + 331, + 295 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/923416a225130c97c601eacd176d61f636bb8514d9daa38229bd075edd1cde88.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 348, + 280, + 372, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MMSVG-2M", + "bbox": [ + 375, + 282, + 459, + 294 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7d2fbfce65c3b19406e9aca6bef44f843f580a523a02d36a2e58d59d410b190b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 477, + 279, + 500, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MMSVGBench", + "bbox": [ + 501, + 282, + 601, + 294 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1f92246b71d2b71347d70f2d6d09408e5ac28af07385da8375387d51730ec9e0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 617, + 279, + 643, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Models", + "bbox": [ + 645, + 282, + 692, + 292 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/64a4597fc022169aa9518e8f8138e7fed6d0e631485be9646c2900262d8721e4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 710, + 279, + 745, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Code", + "bbox": [ + 736, + 282, + 769, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Icon", + "text_level": 1, + "bbox": [ + 223, + 328, + 253, + 338 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/159b5270617b8ba36e1cfeaf7311e08671a5b1b0eb436704a4b5cee27646eaeb.jpg", + "image_caption": [ + "Figure 1: OmniSVG is capable of autoregressively generating high-quality Scalable Vector Graphs (SVG) across a wide spectrum of complexity, from simple icons to intricate anime characters. OmniSVG demonstrates remarkable versatility in generating high-quality SVGs adhering to multimodal instructions, covering tasks like Text-to-SVG, Image-to-SVG, and Character-Reference SVG, making it a powerful and flexible solution for diverse creative tasks." + ], + "image_footnote": [], + "bbox": [ + 184, + 340, + 295, + 470 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Illustration", + "text_level": 1, + "bbox": [ + 348, + 328, + 419, + 338 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4279ac5b0b7bd39e0fbef10bbf6251dfdc8dd6f9b0ef61548e401177ef229a96.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 308, + 342, + 449, + 470 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Character", + "text_level": 1, + "bbox": [ + 532, + 327, + 599, + 338 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/aa3c3ece9f9bd13fb92b24dc92df6642857ae3e037a781c4949e294265ef189e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 455, + 342, + 684, + 470 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Text-to-SVG", + "text_level": 1, + "bbox": [ + 714, + 318, + 779, + 328 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/22c950090cdb82fa973bb44474a61e96feba1d2d5b4006804615b7cc6584a3b0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 330, + 789, + 359 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d6ddb92fedfc27029a0187a1301eb25f0cd201b3585a66c8e60afa541a6c5379.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 790, + 330, + 815, + 359 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Image-to-SVG", + "text_level": 1, + "bbox": [ + 714, + 375, + 787, + 383 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/47003e91f94681858c0acae88d53a55388f807b9d3e547fe579b6f9e179f7526.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 714, + 383, + 805, + 422 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4c37cb908f4690ee150ed69ea6d33667b65dc7b961a9774fc264effd63f957d6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 710, + 429, + 812, + 469 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Samples Generated by OmniSVG with Wide Complexity Range", + "bbox": [ + 259, + 483, + 607, + 496 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Versatility", + "bbox": [ + 730, + 484, + 790, + 494 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 589, + 537, + 604 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Scalable Vector Graphics (SVG) is an important image format widely adopted in graphic design because of their resolution independence and editability. The development of autonomous SVG generation workflows is continuously drawing attention from both designers and researchers in the AIGC community. However, existing methods either produce unstructured outputs at huge computational cost or are limited to generating monochrome icons of over-simplified structures. To produce high-quality and complex SVG adhering to multi-modal instructions, we propose OmniSVG, a unified SVG generation framework that inherits knowledge from a pre-trained Vision-Language Model (VLM). By parameterizing SVG commands and coordinates into discrete token sequences, the auto-regressive nature enables us to seamlessly adapt modern VLMs to the direct SVG generation. To further advance the development of SVG synthesis, we introduce MMSVG-2M, a multimodal dataset with two million richly annotated SVG assets, along with a standardized evaluation protocol for conditional SVG generation tasks. Extensive experiments show that OmniSVG outperforms existing methods and demonstrates its potential for integration into professional SVG design workflows.", + "bbox": [ + 228, + 619, + 766, + 843 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06263v3 [cs.CV] 1 Dec 2025", + "bbox": [ + 22, + 282, + 57, + 715 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Yiying Yang and Wei Cheng contributed equally to this work.", + "bbox": [ + 197, + 873, + 575, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$\\ddagger$ Corresponding Authors.", + "bbox": [ + 200, + 887, + 354, + 900 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "39th Conference on Neural Information Processing Systems (NeurIPS 2025).", + "bbox": [ + 171, + 922, + 629, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 173, + 89, + 312, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Scalable Vector Graphics (SVG) have become a cornerstone of modern digital design because of their resolution independence, compact file size, and inherent editability. Widely adopted in professional workflows from UI/UX design to industrial CAD systems, SVG enables precise manipulation of geometric primitives (e.g., Bezier curves, polygons) while maintaining high precision and consistent visual quality across varying resolutions. However, creating high-quality SVG content remains challenging for non-experts, requiring mastery of specialized tools or intricate XML syntax.", + "bbox": [ + 169, + 119, + 823, + 204 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Existing methods adopt either optimization-based methods or auto-regressive approaches to generate SVG contents.", + "bbox": [ + 169, + 210, + 823, + 238 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The optimization-based methods [34, 12, 29] iteratively refine the SVG parameters by minimizing the differences between the input image and the raster image created by differentiable vector graphics rasterizers. Though these methods are sufficient for reconstructing SVG icons, they suffer from significant computational overhead when scaling up to more intricate samples and produce unstructured outputs with redundant anchor points, harming the editability of the reconstructed SVG samples. In contrast, auto-regressive methods build transformer models or adapt pre-trained Large Language Models (LLMs) to directly generate XML parameters [59] or codes [56, 42] representing SVGs. Benefiting from the end-to-end learning pipeline, the auto-regressive method is a more scalable approach [5] as it can learn directly from a large collection of SVG samples. However, existing auto-regressive approaches are limited to basic SVG contents [11, 24, 53] because of the limited context length and the scarcity of complex SVG data.", + "bbox": [ + 169, + 244, + 826, + 397 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose OmniSVG that harnesses native VLMs [1] for various end-to-end multimodal SVG generation tasks. By parameterizing SVG coordinates and commands into discrete tokens, OmniSVG decouples structural logic from low-level geometry, mitigating the \"coordinate hallucination\" problem prevalent in code-based LLMs, and produces vivid and colorful SVG results. Additionally, the next token prediction training objective enables OmniSVG to complete SVGs with diverse generation results given some partial observations. Compared to traditional auto-regressive SVG generation methods, OmniSVG is able to parameterize SVGs exceeding $30k$ tokens, facilitating the generation of detailed and complex SVG contents. Building upon pre-trained VLMs, our method natively integrates the ability to reason upon visual and textual instructions to synthesize editable, high-fidelity SVGs across diverse domains, from icons to intricate illustrations and anime characters.", + "bbox": [ + 169, + 402, + 826, + 542 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To advance the development of SVG synthesis, we introduce MMSVG-2M, a multi-modal SVG synthesis dataset with two million richly annotated assets, encompassing icons, illustrations, and anime designs.", + "bbox": [ + 169, + 547, + 823, + 590 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We also establish a standardized evaluation protocol, MMSVG-Bench, for \"Text-to-SVG\" and \"Image-to-SVG\" generation. Extensive experiments show that OmniSVG can produce highly detailed and complex SVG contents, surpassing prior art both quantitatively and qualitatively.", + "bbox": [ + 169, + 595, + 823, + 638 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To summarize, our key contributions include:", + "bbox": [ + 171, + 643, + 472, + 657 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce OmniSVG, a family of end-to-end multimodal SVG generators that leverage native VLMs for generating complex and detailed SVGs, from simple icons to intricate anime characters.", + "- We present MMSVG-2M, a large-scale dataset comprising two million SVG assets, along with a standardized evaluation protocol for various multi-modal SVG generation tasks providing a comprehensive resource for future research.", + "- Extensive experiments show that OmniSVG surpasses prior SVG generation methods both qualitatively and quantitatively, highlighting its potential for integration into professional SVG design workflows." + ], + "bbox": [ + 215, + 670, + 821, + 801 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Works", + "text_level": 1, + "bbox": [ + 171, + 824, + 331, + 840 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "SVG Generation. Early attempts to generating SVGs directly utilize architectures like RNNs [18, 41, 19, 44, 45], VAEs [4, 32, 48, 46, 51], and Transformers [4, 57] to compress SVG commands into latent representations. Meanwhile, DeepSVG [4] further parameterizes SVGs using a dual transformer architecture but struggles with geometric consistency. Recently, the advent of large language models", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "(LLMs) [30, 64, 52, 61, 5, 6, 63, 62, 49] unleashes the potential of generating SVGs via XML code synthesis [59, 56, 42]. However, the limited context length of existing LLM-based SVG generation methods [56, 42, 59] poses significant challenges in handling complex SVGs that exceed $10k$ tokens. In this paper, we explore the potential of native Vision-Language Models (VLMs) in multi-modal SVG generation. By combining pre-trained VLMs with SVG command parameterization, we validate that OmniSVG is able to follow multi-modal instructions and generate vivid and complex SVGs.", + "bbox": [ + 169, + 90, + 823, + 175 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Image Vectorization. Recent advancements in vectorization harness diffusion models paired with differentiable rasterizers, using techniques like score distillation sampling [37, 22, 7] and specialized regularizers [29, 34] to convert raster images into SVG paths. While these methods achieve remarkable results, they face limitations such as over-smoothing, color over-saturation, and lack of editability, often producing tangled paths that fail to capture hierarchical structures inherent in professional SVG designs. In this paper, we present an end-to-end approach that follows multi-modal instructions to generate high-quality SVGs with improved path clarity and editability.", + "bbox": [ + 169, + 181, + 826, + 280 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "SVG Datasets and Benchmarks. The lack of suitable datasets for complex SVG structures presents a significant challenge. Existing datasets [11, 24, 53] primarily focus on simplified path-based SVGs or monochrome icons, overlooking the intricate layered structures and rich color semantics found in real-world designs. For example, FIGR-8-SVG [11] focuses on monochromatic icons, while StarVector [42] proposes categorized datasets, including illustrations, icons, emojis, and fonts. Therefore, existing datasets only present simple SVG samples that do not exceed $8.2k$ tokens, failing to capture the complexities of layered structures and rich color semantics. Benchmark evaluations, such as VGBench [70], further highlight gaps in multi-format testing and the absence of comprehensive coverage for illustrative SVGs. To this end, we introduce MMSVG-2M, a multimodal SVG synthesis dataset comprising two million richly annotated assets, including icons, illustrations, and complex anime designs. We also present a standardized evaluation protocol, MMSVG-Bench, to evaluate diverse multi-modal SVG generation tasks with varying complexity.", + "bbox": [ + 169, + 284, + 826, + 450 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 OmniSVG Dataset", + "text_level": 1, + "bbox": [ + 171, + 470, + 362, + 486 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We present MMSVG-2M, a large-scale SVG dataset with two million SVG samples covering website icons, illustrations, graphic designs, anime characters, and etc (Sec. 3.1). To promote the downstream development of SVG generation methods, we also introduce MMSVG-Bench, a standardized evaluation protocol for a series of multi-modal instruction following tasks for conditional SVG generation (Sec. 3.2).", + "bbox": [ + 169, + 503, + 823, + 574 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 MMSVG-2M", + "text_level": 1, + "bbox": [ + 171, + 590, + 307, + 604 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Source. With increasing visual complexity, MMSVG-2M consists of three subsets, 1) the icon subset MMSVG-Icon collected from Iconfont, 2) the illustration subset MMSVG-Illustration sourced from IconSount, and 3) the complex anime character subset MMSVG-Character both curated from Freepik and created by our data creation pipeline as shown in Fig. 2. All these websites are online platforms where users can publish and share SVGs, encompassing a broad variety of categories. Specifically, our collection of MMSVG-2M contains 1.1 million icons, 0.5 million illustrations, and 0.4 million anime characters as shown in Tab. 6.", + "bbox": [ + 169, + 618, + 823, + 715 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Curation. We extract SVG samples with a comprehensive dedduplication process based on filenames, SVG code, and metadata. We first fit the collected SVGs within a viewbox of $200 \\times 200$ . Then, we employ an off-the-shelf VLM, specifically BLIP-2 [28], to generate captions for the SVGs. Please find more samples from the MMSVG-2M dataset in Fig. 8, and instruction templates in Sec. A.2.", + "bbox": [ + 169, + 723, + 826, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "SVG Simplification is an essential procedure in SVG data cleansing, since the over-complicated XML grammars in the crawled SVG data will lead to ambiguities while representing basic shapes. To standardize training and evaluation, we simplify all SVG commands with atomic commands as shown in Tab. 1. Inspired by FIGR-8-SVG [11] and IconShop [57], we remove all attributes and simplify each SVG with five basic commands, including \"Move To\" (M), \"Line To\" (L), \"Cubic Bezier\" (C), \"Elliptical Arc\" (A), \"ClosePath\" (Z). The introduction of atomic commands further removes the ambiguities, as complex XML grammars can be approximated with the combination of several atomic commands. To efficiently produce a unified and less complex data structure, we utilize", + "bbox": [ + 169, + 800, + 826, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/5ba0a3aa19f972201f2521d49b389b0fd36c034b5c010cc63336a808ef94e72e.jpg", + "table_caption": [ + "Table 1: SVG Draw Commands. Draw commands used in this work along with their arguments and a visualization are listed. The start-position $(x_{1},y_{1})$ is implicitly defined as the end-position of the preceding command." + ], + "table_footnote": [], + "table_body": "
CommandArgumentsDescriptionVisualization
<SOP>‘Start-of-Path’ token.
M\n(MoveTo)x2,y2Move the cursor to the end-point (x2,y2) without drawing anything.(x2,y2)
L\n(LineTo)x2,y2Draw a line to the point (x2,y2).(x1,y1) (x2,y2)
C\n(Cubic\nBézier)qx1, qy1\nqx2, qy2\nx2,y2Draw a cubic Bézier curve with control points (qx1,qy1), (qx2,qy2) and end-point (x2,y2).(x1,y1) (qx2,qy2)\n(qx1,qy1) (x2,y2)
A\n(Elliptical\nArc)rx, ry\nφ, fA, fs\nx2,y2Draw an elliptical arc with radii rx and ry (semi-major and semi-minor axes), rotated by angle φ to the x-axis, and end-point (x2,y2). (x2,y2).fA=1/∑fS=1Ry\nφ(x1,y1) (x2,y2)
Z\n(ClosePath)Close the path by moving the cursor back to the path's starting position (x0,y0).(x0,y0) (x1,y1)
F (Fill)fillDraw the fill attribute of the path.
<EOS>‘End-of-SVG’ token.
", + "bbox": [ + 200, + 148, + 802, + 481 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "picosvg to remove grammars like \"group\" and \"transform\", and simplify the complex commands to atomic path commands. It is worth noting that atomic path commands are sufficient to represent complex SVGs shown in Fig. 1.", + "bbox": [ + 169, + 496, + 823, + 539 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 MMSVG-Bench", + "text_level": 1, + "bbox": [ + 171, + 556, + 326, + 570 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To compensate for the vacancy of standardized and open evaluation for SVG generation, we introduce MMSVG-Bench, a comprehensive benchmark for multi-modal SVG generation. We require the corresponding benchmark to be a sufficient verification whether a model is practically useful in real-world scenarios, and avoid the excessive similarity between the benchmark input data and training data as in traditional train/test splits. Therefore, we opt to generate the benchmark inputs with GPT-4o. Specifically, for Text-to-SVG task, we synthesize 150 textual prompts for each SVG subset (i.e. Icon and Illustration). For Image-to-SVG task, we synthesize extra 150 textual descriptions, and prompt GPT-4o to generate vector-style images with transparent backgrounds based on the above texts as the ground truth visual samples. We focus on both the visual quality and semantics of the generation results.", + "bbox": [ + 169, + 583, + 823, + 722 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Text-to-SVG requires a model to generate SVGs from text instructions. We measure the visual quality with Frechet Inception Distance (FID) [50], aesthetic appeal with Aesthetic score [43], text-SVG alignment with CLIP score [38], and Human Preference Scores (HPS) [58].", + "bbox": [ + 169, + 729, + 823, + 772 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Image-to-SVG evaluates a model's ability to convert images into SVGs. To quantify the distance between the input and output SVG, we calculate the cosine similarity of DinoV2 features (DinoScore) [35], Structural Similarity Index (SSIM) [54], Learned Perceptual Image Patch Similarity (LPIPS) [66], and Mean Squared Error (MSE).", + "bbox": [ + 169, + 779, + 826, + 835 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Character-Reference SVG Generation evaluates a model's ability to generate novel SVGs while keeping the profile of the characters depicted in the input image. Different from image-to-SVG, the model does not reconstruct, but generates a specific character SVG for the input image (see", + "bbox": [ + 169, + 842, + 826, + 885 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "https://github.com/googlefonts/picosvg", + "bbox": [ + 197, + 898, + 495, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a091a9687594e4c4fa9988fcbe29d8f537e1b6f79b8e6716b558e58af1afa32c.jpg", + "image_caption": [ + "Figure 2: Overview of OmniSVG. OmniSVG is built on a pre-trained vision-language model Qwen2.5-VL and incorporates an SVG tokenizer. The model tokenizes both text and image inputs as prefix tokens, while the SVG tokenizer encodes vector graphics commands into a unified representation space.", + "Fig. 5). We evaluate the alignment between input character images and generated SVGs by prompting GPT-4o [21] to generate a score ranging from 1 to 10, the higher the better. [15, 23, 17]" + ], + "image_footnote": [], + "bbox": [ + 174, + 89, + 823, + 292 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 OmniSVG", + "text_level": 1, + "bbox": [ + 171, + 397, + 292, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To support end-to-end training for multi-modal SVG generation, OmniSVG parameterizes a series of atomic SVG path commands into a sequence before feeding into a pre-trained VLM with multi-modal instructions.", + "bbox": [ + 169, + 429, + 823, + 470 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "SVG Tokenizer. As illustrated in Sec. 3, our MMSVG-2M dataset simplifies an SVG by removing all attributes and using five basic path commands (see Tab. 1). After the simplification, an SVG script $G$ is represented as the combination of $M$ paths, $G = \\{P_i\\}_{i=1}^M$ . Here, $P_i$ is the $i$ -th path containing $N_i$ commands, $P_i = \\{C_i^j\\}_{j=1}^{N_i}$ , where $C_i^j$ is the $j$ -th command in the $i$ -th path. Each command is represented as $C_i^j = (U_i^j, V_i^j)$ , containing both the command type identifier $U_i^j \\in \\{\\mathrm{M}, \\mathrm{L}, \\mathrm{C}, \\mathrm{A}, \\mathrm{Z}\\}$ and the corresponding location argument $V_i^j$ .", + "bbox": [ + 169, + 477, + 823, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To generate colored SVG contents, we assign special tokens for hex values to control the \"Fill\" (F) attribute, distinguishing it from the original SVG commands and coordinates. To this end, we are able to use a total six types of commands $U_{i}^{j} \\in \\{\\mathrm{M}, \\mathrm{L}, \\mathrm{C}, \\mathrm{A}, \\mathrm{Z}, \\mathrm{F}\\}$ to parameterize a colored SVG parameterization.", + "bbox": [ + 169, + 577, + 823, + 635 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Specifically, our SVG tokenizer transforms SVG scripts $X_{s}$ into an ordered SVG token sequence within the same representation space as the pre-trained VLM. Following IconShop [57], we flatten the layered structure of the SVG script by concatenating different paths into a single command sequence, where each path begins with the drawing commands followed by point coordinates. Therefore, each SVG sequence could be represented as a flattened sequence. As the generation identifier, we apply special tokens like $<\\mathrm{SOP}>$ and $<\\mathrm{EOS}>$ to the two ends of a SVG sequence, identifying the beginning and ending of a SVG sequence. We assign special tokens for each command type, i.e. $\\{\\mathrm{M}, \\mathrm{L}, \\mathrm{C}, \\mathrm{A}, \\bar{\\mathrm{Z}}, \\mathrm{F}\\}$ . To shorten the length of the SVG sequence, we further merge the 2D point coordinates into one token with a mapping function: $ \\rightarrow x \\times w + y$ , where $w$ is the width of the image. The SVG sequence are then lifted into the same embedding space as the pre-trained VLM with a learnable embedding layer.", + "bbox": [ + 169, + 641, + 826, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Model Architecture. OmniSVG adopts Qwen2.5-VL [1], an open-sourced VLM that excels at understanding intricate vision-text inputs, as its backbone (Fig. 2) to produce precise and compact SVG outputs. OmniSVG is trained to predict the SVG suffix tokens $(x_{s})$ conditioned on the multi-modal instruction prefix tokens $(x_{c})$ with the standard next-token prediction objective.", + "bbox": [ + 169, + 799, + 826, + 857 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\theta^ {*} = \\arg \\max _ {\\theta} \\prod_ {i = 1} ^ {L} P \\left(x _ {s, i} \\mid x _ {s, < i}, x _ {c}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 875, + 825, + 914 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/c4f306ac16842311f85921e1c2a3d115725cdb762eb9aa886166cb927204f427.jpg", + "table_caption": [ + "Table 2: Quantitative Evaluations. Quantitative results between OmniSVG and current state-of-the-art text-to-SVG and image-to-SVG baseline methods. The bold numbers and underlined numbers represent the best and second best performance respectively. Our OmniSVG model demonstrates superior performance compared SOTA SVG generation baselines." + ], + "table_footnote": [], + "table_body": "
Evaluation DatasetMethods# TokensText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
MMSVG-IconVectorfusion [22]66.2k250.770.2404.760.237----
SVGDreamer [60]132.0k308.940.2074.260.221----
Chat2SVG [56]0.6k190.870.2994.410.247----
IconShop [57]2.0k213.280.2884.550.244----
LIVE [34]52.5k----0.9320.9430.1060.011
DiffVG [29]322.0k----0.9400.9540.0660.002
GPT-4o [21]0.3k----0.8600.7920.4030.124
StarVector(8B) [42]2.0k----0.8950.8810.2310.059
Vtracer52.4k----0.9930.9660.0390.002
OmniSVG(4B)3.8k137.400.2754.620.2440.9930.9500.0500.006
OmniSVG-L(8B)5.7k130.560.2764.600.2420.9220.8930.2350.040
MMSVG-IllustrationVectorfusion [22]66.1k253.940.1854.940.226----
SVGDreamer [60]132.0k419.700.2014.370.221----
Chat2SVG [56]1.0k210.030.2834.450.250----
IconShop [57]2.6k107.930.2334.460.224----
LIVE [34]52.2k----0.9350.9500.1110.008
DiffVG [29]322.0k----0.9450.9550.0650.001
GPT-4o [21]0.4k----0.8750.8540.3730.077
StarVector(8B) [42]2.6k----0.8770.9000.2380.046
Vtracer57.6k----0.9940.9660.0350.002
OmniSVG(4B)5.8k154.370.2264.560.2320.8990.9060.2370.034
OmniSVG-L(8B)6.9k138.420.2314.510.2320.9050.9070.2310.031
", + "bbox": [ + 174, + 148, + 823, + 484 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Experiments", + "text_level": 1, + "bbox": [ + 171, + 507, + 313, + 523 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To validate the effectiveness of our method, we first introduce the baselines (Sec. 5.1). Then, we make quantitative comparisons with prior arts (Secs. 5.2 and 5.3) and conduct ablations (Sec. 5.4) to study the effectiveness of our design.", + "bbox": [ + 169, + 539, + 823, + 583 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1Baselines", + "text_level": 1, + "bbox": [ + 171, + 599, + 277, + 612 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For the text-to-SVG task, we compare our method with language-based (LLM-based) methods, including VectorFusion [22], SVGDreamer [60], Chat2SVG [56] and IconShop [57]. For image-to-SVG task, we compare our method with baseline methods across image vectorization and Multimodal Large Language Modeling approaches, including LIVE [34], DiffVG [29], StarVector [42], Vtracer [12] and GPT-4o [21] using the official implementations with the hyperparameters proposed by the authors, and apply their pre- and post-processing code as required.", + "bbox": [ + 169, + 625, + 826, + 709 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2 Quantitative Comparisons", + "text_level": 1, + "bbox": [ + 171, + 726, + 397, + 742 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We compare our OmniSVG with other baseline methods on the \"text-to-SVG\" and \"image-to-SVG\" tasks in our MMSVG-Bench. In addition to the metrics mentioned in Sec. 3, we also report the average token length (# tokens) of a generated SVG sample utilizing the Qwen2.5-VL [1] tokenizer.", + "bbox": [ + 169, + 752, + 826, + 796 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Tab. 2, OmniSVG demonstrates strong performance compared to state-of-the-art baselines in text-to-SVG generation, achieving superior FID scores and competitive CLIP score, aesthetic quality, and HPS. For image-to-SVG, OmniSVG also achieves competitive results with traditional vectorization methods, i.e. LIVE [34], DiffVG [29], and VTracer [12], but with a much shorter sequence length. When comparing to auto-regressive methods, i.e. GPT-4o [21] and StarVector [42], OmniSVG also achieves a superior performance across all metrics. The above results indicate that OmniSVG effectively balances the generation cost and the visual quality when generating SVGs according to multi-modal conditions.", + "bbox": [ + 169, + 800, + 823, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0b0a38efbb695a95d71553a06e7819b3d49df273d684e1a78144d9c2d90c71b6.jpg", + "image_caption": [ + "Figure 3: Qualitative Comparison with SOTA Methods on Text-to-SVG Task. We compare the propose method with SOTA Text-to-SVG methods on our evaluation benchmarks, namely Icon and Illustration." + ], + "image_footnote": [], + "bbox": [ + 174, + 95, + 821, + 415 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3 Qualitative Evaluations", + "text_level": 1, + "bbox": [ + 171, + 486, + 377, + 500 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Text-to-SVG task. We compare our method with baseline approaches using seven distinct text prompts for the text-to-SVG task, as shown in Fig. 4. Optimization-based methods like SVGDreamer [60] and VectorFusion [22] require significant computation time due to their iterative optimization processes, which, while effective for refining SVG details, are computationally expensive. Auto-regressive methods, such as IconShop [57] and Chat2SVG [56], generate SVGs more quickly by leveraging pre-trained models but have notable limitations. IconShop produces monochrome SVGs, restricting its applicability, while Chat2SVG, though flexible, generates less detailed and semantically consistent SVGs in its first stage. Our OmniSVG consistently outperforms all baselines across various text prompts in generating high-fidelity SVGs with rich color, geometric accuracy, and the ability to handle complex visual cues.", + "bbox": [ + 169, + 516, + 826, + 656 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Image-to-SVG Task. We compare our method with classical image vectorization approaches, including DiffVG [29], LIVE [34], and VLM-based methods GPT-4o [21], StarVector [42] and Vtracer [12] As shown in Fig. 4, our method outperforms these baselines in both quality and efficiency. Optimization-based methods like DiffVG and LIVE perform well on simple icons but struggle with complex images, often generating visual artifacts. The GPT-4o model, while capable of generating SVGs for complex images, is limited to icon-level outputs and cannot handle detailed illustrations. StarVector excels at simple icons but fails to produce accurate SVGs for more intricate images, highlighting its limited generalization capability. Vtracer is an image processing algorithm designed to convert raster images into SVGs. In contrast, OmniSVG effi", + "bbox": [ + 169, + 662, + 486, + 912 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9cce668291ae3db38c36dcf3597d0b593fc88ffc1dd7c1c70776e983a717947f.jpg", + "image_caption": [ + "Figure 5: Generated SVG with Character-Reference (CRef) by OmniSVG." + ], + "image_footnote": [], + "bbox": [ + 509, + 667, + 803, + 876 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1848b53d51571e79abf00b6041fd54b00a12f7df7b9c077a30c48675555e314e.jpg", + "image_caption": [ + "Figure 4: Qualitative Comparison with SOTA Methods on Image-to-SVG Task. We compare the propose method with SOTA Image-to-SVG methods on our evaluation benchmarks." + ], + "image_footnote": [], + "bbox": [ + 178, + 95, + 816, + 412 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ciently converts a wide range of images, from icons to complex illustrations and character images, into high-quality, editable SVGs. This superior performance in handling diverse visual cues distinguishes OmniSVG from traditional vectorization methods. Additional visual results can be found in Fig. 12. We provide more detailed discussions with existing methods, particularly the recent works LLM4SVG [59] and StarVector [42], in the Sec. D.", + "bbox": [ + 169, + 474, + 826, + 546 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Character-Reference SVG generation task. As shown in Fig. 5, by training on MMSVG-Character with natural character image and SVG pair data, OmniSVG is capable of generating character SVGs through image references.", + "bbox": [ + 169, + 551, + 823, + 595 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4 Ablation studies", + "text_level": 1, + "bbox": [ + 171, + 613, + 326, + 627 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effectiveness of SVG Parameterization. We present a comprehensive comparison among different SVG parameterization strategy with the traditional non-parameterized methods for SVG representation in large language models. We ablates on the parameterization on both coordinate and color attributes of the SVG.", + "bbox": [ + 169, + 638, + 826, + 695 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The results, shown in Tab. 3 and Fig. 6 demonstrate that parameterizing both coordinate and color attributes yields a better generation results under all metrics with the shortest token length. It further validates that the efficient token representation allows our method to generate complex SVGs with fewer computational resources. Additionally, qualitative results show that our method outperforms", + "bbox": [ + 169, + 700, + 823, + 758 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/5016e0dda01fc72e711a0637f464fa6dca63956af554e7623e81300c9c91050a.jpg", + "table_caption": [ + "Table 3: Quantitative Study on SVG Parameterization. Ablation studies on color parametrization (abbreviated as color param.) and coordinate parameterization (abbreviated as coord param.) are conducted." + ], + "table_footnote": [], + "table_body": "
MethodsText-to-SVGImage-to-SVG# Tokens
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
w/o param.218.760.1853.430.1380.7410.7180.3150.18218.5k
w/o coordinate param.193.420.2163.900.1690.8260.8090.2480.11910.2k
w/o color param.167.280.2694.310.2110.8950.8790.1790.0536.3k
OmniSVG(4B)145.890.3084.590.2380.9460.9280.1380.0204.8k
", + "bbox": [ + 199, + 827, + 797, + 910 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/cbd46bc6376976fecf43d9880323bc6e5b2f57aa205285f95fbabc54caac45f6.jpg", + "table_caption": [ + "Table 4: Ablation of the Model Size. As the model size grows, the generated samples are of higher quality." + ], + "table_footnote": [], + "table_body": "
MethodsInputSizeText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIS↓MSE↓
FLAN-T5-Base[10]Text223M198.480.1583.380.085----
FLAN-T5-Large[10]Text770M175.240.2083.920.142----
FLAN-T5-xl[10]Text3B160.280.2584.310.192----
blip2-flan-t5-xl[28]Text/Image3.94B152.110.2354.480.2150.8980.8910.2550.041
OmniSVG(4B)Text/Image3.7B145.890.3084.590.2380.9460.9280.1380.020
", + "bbox": [ + 187, + 118, + 803, + 200 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "others, particularly as SVG complexity increases. The non-parameterization method fails to generate SVGs for complex images. These findings underscore the effectiveness of our full parameterization strategy in balancing performance and resource efficiency for SVG generation tasks.", + "bbox": [ + 169, + 210, + 823, + 253 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ablation studies on model size. To analyze whether training a larger model benefits SVG generation, we evaluate OmniSVG base models with different sizes on the MMSVG-2M dataset in Tab. 4. We evaluate OmniSVG with base models of varying sizes on the MMSVG-2M dataset in Tab. 4 by progressively scaling up the model size. The results show that as the model size grows, we can generate SVG samples with a better quality.", + "bbox": [ + 169, + 258, + 826, + 330 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/afdb539875426e118405d4db2d308c5bb36d4e628c45078b40a4de7fcddde2a5.jpg", + "table_caption": [ + "Table 5: Ablation on VLM architecture." + ], + "table_footnote": [], + "table_body": "
Vision ModelLanguage ModelText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
CLIPQwen2.5185.310.2494.520.2150.8670.8560.2670.058
VQGANQwen2.5198.740.2344.490.2030.8390.8280.2950.071
Qwen2.5-VL-3B-Instruct145.890.3084.590.2380.9460.9280.1380.020
Qwen2.5-VL-7B-Instruct134.450.2544.560.2370.9140.9000.2330.036
", + "bbox": [ + 189, + 369, + 803, + 448 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ablation Studies on the VLM Architecture. To evaluate the effectiveness of the VLM architecture, we conducted an ablation study replacing it with alternative LLM-based architectures incorporating image encoders such as CLIP ViT-B/32 [39], VQGAN [14], and Qwen2.5-VL [1].", + "bbox": [ + 169, + 460, + 823, + 503 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The results in Tab. 5 show that Qwen2.5-VL consistently outperformed all alternatives under all evaluation metrics.", + "bbox": [ + 169, + 508, + 485, + 550 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "User Study. We extract one-tenth of the samples from the evaluation dataset and conducted a user study with 15 participants to evaluate user preferences, vividness, and the alignment between text-to-SVG and image-to-SVG. Participants are asked to assess SVGs generated by different models based on 150 text descriptions and 150 image prompts, comparing the results generated using our method and baseline models. The results in Fig. 7 show that OmniSVG is widely preferred, with higher scores for vividness and superior semantic alignment with the input conditions.", + "bbox": [ + 169, + 556, + 486, + 736 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Conclusions", + "text_level": 1, + "bbox": [ + 171, + 756, + 307, + 771 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Conclusions. We introduce OmniSVG, a unified framework for multimodal SVG generation that leverages pre-trained Vision-Language Models (VLMs). By parameterizing SVG com", + "bbox": [ + 169, + 786, + 485, + 842 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "mands and coordinates as discrete tokens, OmniSVG efficiently decouples structural logic from geometry, addressing issues like \"coordinate hallucination\" while maintaining design expressiveness. Our method outperforms existing approaches in both quality and efficiency, offering high-quality, editable SVG across various design domains. Additionally, we proposed MMSVG-2M, a large-scale multimodal dataset with two million annotated SVG assets and a standardized evaluation protocol.", + "bbox": [ + 169, + 842, + 826, + 911 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f4018d8e6b84d4441104ddcc05ee39760d0faf2a95c81a23d8f047c80b19142b.jpg", + "image_caption": [ + "Figure 6: Qualitative Study on Parametrization." + ], + "image_footnote": [], + "bbox": [ + 506, + 521, + 808, + 792 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 491, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Extensive experiments show that OmniSVG surpasses prior SVG generation methods in various conditional generation tasks, highlighting its potential for integration into professional SVG design workflows.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Limitations and Future Work. During inference, OmniSVG generates tens of thousands of tokens for complex samples, which inevitably leads to a considerable generation time. OmniSVG is only bounded by vector style image prompt and fails on natural images. As for future work, recent endeavors on multi-token prediction [15, 2] and KV-cache compression [68, 3] provide a promising way to save the generation cost. Additionally, the auto-regressive nature of OmniSVG also unlocks future", + "bbox": [ + 169, + 138, + 503, + 277 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/9d402d8c09bf0bd876bc6d4063630a82a87d9bd5d4ba548e776d476dfc8b87fc.jpg", + "table_caption": [ + "Figure 7: User Study of OmniSVG and baselines." + ], + "table_footnote": [], + "table_body": "
MethodPreference ↑Vividity↑Alignment↑
Vectorfusion [22]355876
SVGDreamer [60]416579
Chat2SVG [56]556186
IconShop [57]795775
GPT-4o [21]385480
StarVector(8B) [42]378168
DiffVG [29]887696
LIVE [34]867095
OmniSVG968898
", + "bbox": [ + 522, + 155, + 816, + 260 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "opportunities for in-context learning [67, 69, 47], chain-of-thought reasoning [55, 16], and multi-turn interleaved generation [20, 31], thereby providing a more precise user control.", + "bbox": [ + 169, + 277, + 823, + 306 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 171, + 325, + 339, + 343 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This work is in part supported by National Key R&D Program of China (Grant No. 2022ZD0160103), National Natural Science Foundation of China (Grant No. 62276067), and National Natural Science Foundation of China (Grant No. 62472104).", + "bbox": [ + 169, + 354, + 826, + 398 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The computations in this research were performed using the CFFF platform of Fudan University.", + "bbox": [ + 171, + 404, + 807, + 420 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 106 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025.", + "[2] Tianle Cai, Yuhong Li, Zhengyang Geng, Hongwu Peng, and Tri Dao. Medusa: Simple framework for accelerating IIm generation with multiple decoding heads. Retrieved December, 2023.", + "[3] Zefan Cai, Yichi Zhang, Bofei Gao, Yuliang Liu, Tianyu Liu, Keming Lu, Wayne Xiong, Yue Dong, Baobao Chang, Junjie Hu, et al. Pyramidkv: Dynamic kv cache compression based on pyramidal information tunneling. arXiv preprint arXiv:2406.02069, 2024.", + "[4] Alexandre Carlier, Martin Danelljan, Alexandre Alahi, and Radu Timofte. Deepsvg: A hierarchical generative network for vector graphics animation. NeurIPS, 2020.", + "[5] Sijin Chen, Xin Chen, Anqi Pang, Xianfang Zeng, Wei Cheng, Yijun Fu, Fukun Yin, Billzb Wang, Jingyi Yu, Gang Yu, et al. Meshxl: Neural coordinate field for generative 3d foundation models. NeurIPS, 2024.", + "[6] Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. L13da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In CVPR, 2024.", + "[7] Zehao Chen and Rong Pan. Svgbuilder: Component-based colored graphic generation with text-guided autoregressive transformers. arXiv preprint arXiv:2412.10488, 2024.", + "[8] Wei Cheng, Ruixiang Chen, Siming Fan, Wanqi Yin, Keyu Chen, Zhongang Cai, Jingbo Wang, Yang Gao, Zhengming Yu, Zhengyu Lin, et al. Dna-rendering: A diverse neural actor repository for high-fidelity human-centric rendering. In ICCV, 2023.", + "[9] Wei Cheng, Su Xu, Jingtan Piao, Chen Qian, Wayne Wu, Kwan-Yee Lin, and Hongsheng Li. Generalizable neural performer: Learning robust radiance fields for human novel view synthesis. arXiv preprint arXiv:2204.11798, 2022.", + "[10] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instruction-finetuned language models. JMLR, 2024.", + "[11] Louis Clouatre and Marc Demers. Figr: Few-shot image generation with reptile. arXiv preprint arXiv:1901.02199, 2019.", + "[12] Vision Cortex. Vtracer. https://www.visioncortex.org/vtracer-docs, 2023.", + "[13] Nyanko Devs. Danbooru2023: A large-scale crowdsourced and tagged anime illustration dataset. Hugging Face, 2023.", + "[14] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In CVPR, 2021.", + "[15] Fabian Gloeckle, Badr Youbi Idrissi, Baptiste Rozière, David Lopez-Paz, and Gabriel Synnaeve. Better & faster large language models via multi-token prediction. arXiv preprint arXiv:2404.19737, 2024.", + "[16] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "[17] Han Guo, Songlin Yang, Tarushii Goel, Eric P Xing, Tri Dao, and Yoon Kim. Log-linear attention. arXiv preprint arXiv:2506.04761, 2025.", + "[18] David Ha and Douglas Eck. A neural representation of sketch drawings. In ICLR, 2018.", + "[19] Teng Hu, Ran Yi, Baihong Qian, Jiangning Zhang, Paul L Rosin, and Yu-Kun Lai. Supersvg: Superpixel-based scalable vector graphics synthesis. In CVPR, 2024.", + "[20] Minbin Huang, Yanxin Long, Xinchi Deng, Ruihang Chu, Jiangfeng Xiong, Xiaodan Liang, Hong Cheng, Qinglin Lu, and Wei Liu. Dialoggen: Multi-modal interactive dialogue system for multi-turn text-to-image generation. arXiv preprint arXiv:2403.08857, 2024.", + "[21] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + ], + "bbox": [ + 171, + 111, + 825, + 910 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[22] Ajay Jain, Amber Xie, and Pieter Abbeel. Vectorfusion: Text-to-sv by abstracting pixel-based diffusion models. In CVPR, 2023.", + "[23] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast autoregressive transformers with linear attention. In International conference on machine learning, pages 5156-5165. PMLR, 2020.", + "[24] Denis Kocetkov, Raymond Li, Loubna Ben Allal, Jia Li, Chenghao Mou, Carlos Muñoz Ferrandis, Yacine Jernite, Margaret Mitchell, Sean Hughes, Thomas Wolf, et al. The stack: 3 tb of permissively licensed source code. arXiv preprint arXiv:2211.15533, 2022.", + "[25] Kozea. Cairosvg. https://cairosvg.org/, 2023.", + "[26] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.", + "[27] Black Forest Labs. Flux.1Redux-dev. https://huggingface.co/black-forest-labs/FLUX.1-Redux-dev, 2024.", + "[28] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023.", + "[29] Tzu-Mao Li, Michal Lukáč, Gharbi Michael, and Jonathan Ragan-Kelley. Differentiable vector graphics rasterization for editing and learning. SIGGRAPH Asia, 2020.", + "[30] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2023.", + "[31] Ziyu Liu, Tao Chu, Yuhang Zang, Xilin Wei, Xiaoyi Dong, Pan Zhang, Zijian Liang, Yuanjun Xiong, Yu Qiao, Dahua Lin, et al. Mmdu: A multi-turn multi-image dialog understanding benchmark and instruction-tuning dataset for lvlms. arXiv preprint arXiv:2406.11833, 2024.", + "[32] Raphael Gontijo Lopes, David Ha, Douglas Eck, and Jonathon Shlens. A learned representation for scalable vector graphics. In CVPR, 2019.", + "[33] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017.", + "[34] Xu Ma, Yuqian Zhou, Xingqian Xu, Bin Sun, Valerii Filev, Nikita Orlov, Yun Fu, and Humphrey Shi. Towards layer-wise image vectorization. In CVPR, 2022.", + "[35] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023.", + "[36] Dongwei Pan, Long Zhuo, Jingtan Piao, Huiwen Luo, Wei Cheng, Yuxin Wang, Siming Fan, Shengqi Liu, Lei Yang, Bo Dai, et al. Renderme-360: a large digital asset library and benchmarks towards high-fidelity head avatars. NeurIPS, 2023.", + "[37] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In ICLR, 2023.", + "[38] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021.", + "[39] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021.", + "[40] Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, and Yuxiong He. Zero: Memory optimizations toward training trillion parameter models. In SC20: International Conference for High Performance Computing, Networking, Storage and Analysis. IEEE, 2020.", + "[41] Pradyumna Reddy, Michael Gharbi, Michal Lukac, and Niloy J Mitra. Im2vec: Synthesizing vector graphics without vector supervision. In CVPR, 2021.", + "[42] Juan A Rodriguez, Shubham Agarwal, Issam H Laradji, Pau Rodriguez, David Vazquez, Christopher Pal, and Marco Pedersoli. Starvector: Generating scalable vector graphics code from images. arXiv preprint arXiv:2312.11556, 2023." + ], + "bbox": [ + 173, + 90, + 826, + 910 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[43] Christoph Schuhmann. Improved aesthetic predictor. https://github.com/christophschuhmann/improved-aesthetic-predictor, 2022.", + "[44] I-Chao Shen and Bing-Yu Chen. Clipgen: A deep generative model for clipart vectorization and synthesis. TVCG, 2022.", + "[45] Yiren Song, Xuning Shao, Kang Chen, Weidong Zhang, Zhongliang Jing, and Minzhe Li. Clipvg: Text-guided image manipulation using differentiable vector graphics. In AAAI, 2023.", + "[46] Hao Su, Xuefeng Liu, Jianwei Niu, Jiahe Cui, Ji Wan, Xinghao Wu, and Nana Wang. Marvel: Raster gray-level manga vectorization via primitive-wise deep reinforcement learning. TCSVT, 2023.", + "[47] Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In CVPR, 2024.", + "[48] Zecheng Tang, Chenfei Wu, Zekai Zhang, Mingheng Ni, Shengming Yin, Yu Liu, Zhengyuan Yang, Lijuan Wang, Zicheng Liu, Juntao Li, et al. Strokenuwa: Tokenizing strokes for vector graphic synthesis. arXiv preprint arXiv:2401.17093, 2024.", + "[49] Zecheng Tang, Chenfei Wu, Zekai Zhang, Mingheng Ni, Shengming Yin, Yu Liu, Zhengyuan Yang, Lijuan Wang, Zicheng Liu, Juntao Li, et al. Strokenuwa: Tokenizing strokes for vector graphic synthesis. arXiv preprint arXiv:2401.17093, 2024.", + "[50] Lucas Theis, Aäron van den Oord, and Matthias Bethge. A note on the evaluation of generative models. arXiv preprint arXiv:1511.01844, 2015.", + "[51] Yingtao Tian and David Ha. Modern evolution strategies for creativity: Fitting concrete images and abstract concepts. In Artificial Intelligence in Music, Sound, Art and Design, 2022.", + "[52] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024.", + "[53] Yizhi Wang and Zhouhui Lian. Deepvecfont: synthesizing high-quality vector fonts via dual-modality learning. TOG, 2021.", + "[54] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. TIP, 2004.", + "[55] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. NeurIPS, 2022.", + "[56] Ronghuan Wu, Wanchao Su, and Jing Liao. Chat2svg: Vector graphics generation with large language models and image diffusion models. arXiv preprint arXiv:2411.16602, 2024.", + "[57] Ronghuan Wu, Wanchao Su, Kede Ma, and Jing Liao. Iconshop: Text-guided vector icon synthesis with autoregressive transformers. TOG, 2023.", + "[58] Xiaoshi Wu, Keqiang Sun, Feng Zhu, Rui Zhao, and Hongsheng Li. Human preference score: Better aligning text-to-image models with human preference. In ICCV, 2023.", + "[59] Ximing Xing, Juncheng Hu, Guotao Liang, Jing Zhang, Dong Xu, and Qian Yu. Empowering llms to understand and generate complex vector graphics. arXiv preprint arXiv:2412.11102, 2024.", + "[60] Ximing Xing, Haitao Zhou, Chuang Wang, Jing Zhang, Dong Xu, and Qian Yu. SVGdreamer: Text guided. \nsvg generation with diffusion model. In CVPR, 2024.", + "[61] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024.", + "[62] Yiying Yang, Fukun Yin, Wen Liu, Jiayuan Fan, Xin Chen, Gang Yu, and Tao Chen. Pm-inr: Prior-rich multi-modal implicit large-scale scene neural representation. In AAAI, 2024.", + "[63] Fukun Yin, Xin Chen, Chi Zhang, Biao Jiang, Zibo Zhao, Wen Liu, Gang Yu, and Tao Chen. Shapept: 3d shape generation with a unified multi-modal language model. TMM, 2025.", + "[64] Alex Young, Bei Chen, Chao Li, Chengen Huang, Ge Zhang, Guanwei Zhang, Heng Li, Jiangcheng Zhu, Jianqun Chen, Jing Chang, et al. Yi: Open foundation models by 01. ai. arXiv preprint arXiv:2403.04652, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 910 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[65] Zhengming Yu, Wei Cheng, Xian Liu, Wayne Wu, and Kwan-Yee Lin. Monohuman: Animatable human neural field from monocular video. In CVPR, 2023.", + "[66] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018.", + "[67] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. What makes good examples for visual in-context learning? NeurIPS, 2023.", + "[68] Xiabin Zhou, Wenbin Wang, Minyan Zeng, Jiaxian Guo, Xuebo Liu, Li Shen, Min Zhang, and Liang Ding. Dynamicky: Task-aware adaptive kv cache compression for long context llms. arXiv preprint arXiv:2412.14838, 2024.", + "[69] Yucheng Zhou, Xiang Li, Qianning Wang, and Jianbing Shen. Visual in-context learning for large vision-language models. arXiv preprint arXiv:2402.11574, 2024.", + "[70] Bocheng Zou, Mu Cai, Jianrui Zhang, and Yong Jae Lee. Vgbench: A comprehensive benchmark of vector graphics understanding and generation for large language models. In EMNLP, 2024." + ], + "bbox": [ + 173, + 90, + 825, + 308 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 169, + 88, + 295, + 112 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A Additional Details of MMSVG-2M dataset", + "text_level": 1, + "bbox": [ + 171, + 128, + 565, + 146 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.1 Samples of MMSVG-2M Dataset", + "text_level": 1, + "bbox": [ + 171, + 161, + 447, + 176 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We visualize samples of our MMSVG-2M dataset in Fig. 8. In our MMSVG-2M dataset, $55\\%$ of the SVG samples belongs to the MMSVG-Icon, $25\\%$ belongs to the MMSVG-Illustration, and the rest $20\\%$ belongs to the MMSVG-Character. Among the SVG samples within the MMSVG-Character category, half of them comes from Freepik, while another half is generated by our data creation pipeline. We also collect image-SVG pairs for the character-reference SVG generation tasks during the generation process.", + "bbox": [ + 169, + 186, + 826, + 272 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/041793a88ae441883cee99e8fcd32d85151503f775b754e305d890cf08cd57b0.jpg", + "table_caption": [ + "Table 6: Data Statistics for MMSVG-2M. Our MMSVG-2M consists of 1.1 million SVG icons, 0.5 million SVG illustrations, and 0.4 million SVG anime characters." + ], + "table_footnote": [], + "table_body": "
DatasetTrainValTotalSourceToken Length
MMSVG-Icon990k110k1,100kIconfont2.2k ± 0.9k
MMSVG-Illustration450k50k500kIconScout8.1k ± 3.3k
MMSVG-Character350k50k400kFreepik & generated28k ± 7.3k
", + "bbox": [ + 207, + 310, + 785, + 377 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.2 SVG-Image-Text Pairs Construction", + "text_level": 1, + "bbox": [ + 171, + 401, + 470, + 416 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Our MMSVG-2M dataset comprises two million SVG samples with the corresponding rasterized images. We generate captions on the rasterized images with BLIP-2 [28], thereby providing textual descriptions that enable us to fine-tune our model to follow these instructions. We use CairoSVG [25] for rasterization and remove samples that produced completely white images.", + "bbox": [ + 169, + 426, + 823, + 484 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Annotation. We employ an off-the-shelf VLM, specifically BLIP-2 [28], to generate SVG captions with the prompt below. To reduce hallucinations, we drop the samples with CLIP scores less than 30. We also visualize the distribution annotated keywords of MMSVG-2M dataset in Fig. 10 with word cloud format. And the instruction template for annotation is shown in Tab. 7.", + "bbox": [ + 169, + 488, + 826, + 546 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Instruction templates. MMSVGBench provides three tasks, including text-to-SVG task, image-to-SVG task and character-reference SVG generation task. Each task needs different instruction templates. For the text and image conditioning SVG generation, we provide the input text or image with VLM architecture. For character-reference SVG generation, we provide the natural character", + "bbox": [ + 169, + 550, + 826, + 608 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Instructions for Different Tasks", + "text_level": 1, + "bbox": [ + 197, + 636, + 423, + 650 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Employed BLIP2 for SVG Captioning: You are a helpful assistant. Your task is to describe this image in a single sentence, including the object, its color, and its overall arrangement. For example: \"Yellow cheers with glasses of alcohol drinks.\" / \"Heart emojis represent love on Valentine's Day.\"", + "- Text-to-SVG: You are a helpful SVG Generation assistant, designed to generate SVG. We provide the text description as input, generate SVG based on the text.", + "- Image-to-SVG: You are a helpful SVG Generation assistant, designed to generate SVG. We provide an image as input, generate SVG for this image.", + "- Character-Reference SVG Generation: You are a helpful SVG Generation assistant, designed to generate SVG. We provide a natural image as input, please generate the simplified character SVG based on the reference input image." + ], + "bbox": [ + 194, + 660, + 800, + 856 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 7: Instructions for Different Tasks. Instructions including annotation, text-to-SVG, image-to-SVG and character-reference SVG generation.", + "bbox": [ + 169, + 880, + 823, + 907 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/c5d2637f15e3c2c9618d7e718252bc140df50e4b622c6efe968a8714b6abd547.jpg", + "image_caption": [ + "Figure 8: Samples from MMSVG-2M Dataset. The proposed MMSVG-2M dataset can be separated into three subset, namely Icon, Illustration and Character. Samples from Icon, Illustration and part of Character subsets are downloaded from Internet. Another part of Character subset is generated by our data creation pipeline, which can provide image and SVG pairs for image prompting task." + ], + "image_footnote": [], + "bbox": [ + 189, + 87, + 803, + 378 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "reference image and the original image with the VLM architecture. The list of instruction templates for different tasks are shown in Tab. 7.", + "bbox": [ + 169, + 452, + 823, + 479 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.3 Character-SVG Pairs Construction", + "text_level": 1, + "bbox": [ + 171, + 500, + 460, + 513 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "As illustrated in the Fig. 6, part of our proposed MMSVG-2M-Character subset is constructed using a generative pipeline. As shown in the pipeline diagram in Fig. 2, we employ a FLUX [26]-based generative model enhanced with a vector-style LoRA to enable the generation of SVG-style data. For image-based conditioning, we adopt FLUX-Redux [27], which injects image features via a SigLIP encoder and projects them into image embeddings. These embeddings are then concatenated with the text tokens as conditioning inputs for FLUX [26]. However, in practice, the original Redux [27] conditioning proves to be overly strong. To address this, we adopt a community-implemented variant of Redux that downsamples the image embeddings in 2D space. As observed in our experiments shown in Fig. 9, a downsampling factor between $2 \\times$ and $3 \\times$ yields the most reasonable SVG-style character references. Finally, we employ VTracer [12] to perform near-instant vectorization of the generated images. To construct the MMSVG-2M-Character subset, we first filter $103k$ character instances from the Danbooru [13] dataset and apply the aforementioned pipeline with motion and expression keywords like previous works [8, 9, 36, 65]. We compare the raw FLUX [26] outputs and their vectorized counterparts, retaining only those samples with PSNR and SSIM scores above a certain threshold as valid data.", + "bbox": [ + 169, + 526, + 826, + 733 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B Additional Details", + "text_level": 1, + "bbox": [ + 171, + 755, + 362, + 771 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.1 Scaling Up", + "text_level": 1, + "bbox": [ + 171, + 787, + 292, + 803 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "To study the effectiveness of scaling up multimodal SVG generation, we scale up OmniSVG from 4B to 8B parameters. We present training perplexity in Fig. 11, where both models are trained from scratch on 250 billion tokens. We show that, as the size of the model grows, the model achieves a lower validation perplexity, indicating a higher probability of producing the validation data.", + "bbox": [ + 169, + 814, + 823, + 871 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.2 Implementation Details", + "text_level": 1, + "bbox": [ + 171, + 888, + 380, + 902 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We train our models in bfloat16 with the ZeRO-2 strategy [40] for memory-efficient training. We also adopt the AdamW [33] optimizer with a learning rate decaying from $3 \\times 10^{-4}$ to $3 \\times 10^{-6}$ and a weight decay of 0.1 to train our model. In practice, we load the pre-trained weights from the Qwen2.5-VL [1] model and initialize the SVG embeddings from scratch. Without further specification, we generate SVGs with the top-k and top-p sampling strategy with $k = 50$ and $p = 0.95$ for diversity.", + "bbox": [ + 169, + 90, + 486, + 244 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C Additional Results", + "text_level": 1, + "bbox": [ + 171, + 268, + 366, + 286 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "As list in full comparisons in Tab. 2, including all the baselines mentioned in Sec. 5. For the text-to-SVG task, we compare our method with language-based (LLM-based) methods, including VectorFusion [22], SVGDreamer [60], Chat2SVG [56] and IconShop [57]. For image-to-SVG task, we compare our method with baseline methods across image vectorization and Multimodal Large Language Modeling ap", + "bbox": [ + 169, + 303, + 486, + 428 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "proaches, including LIVE [34], DiffVG [29], StarVector [42] and GPT-4o [21] using the official implementations with the hyperparameters proposed by the authors, and apply their pre- and post-processing code as required. Specifically, for the text-to-SVG task, the optimization-based method SVGDreamer excels in enhancing editability by employing a semantic-driven image vectorization process that effectively separates foreground objects from the background, while failing to handle complex scenes. Another optimization-based work, VectorFusion, stands out for generating SVG-exportable vector graphics without relying on large captioned datasets. However, Vectorfusion is also unable to handle complex scenarios and diverse styles. The significant problem with these optimization-based works is that the optimization time is too long. Generating an SVG usually takes more than ten minutes, which is too expensive. For the LLM-based method, Chat2SVG integrates Large Language Models (LLMs) with image diffusion models to create semantically rich SVG templates. However, Chat2SVG still needs to optimize the output SVG script from LLM, which introduces increased computational complexity and poses challenges during model training. In comparison, IconShop utilizes a transformer-based architecture to autoregressively model SVG path sequences, demonstrating exceptional performance in simplified icon SVGs, which offers effective solutions for text-to-SVG generation. It can only generate black simple Icon SVGs.", + "bbox": [ + 169, + 428, + 826, + 650 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/06747bc02f04585aabd024e7ccafb4f8d7839e47fc78f613ad60da4b3238485c.jpg", + "image_caption": [ + "Figure 10: Word Cloud Visualization of Label Distribution in the MMSVG-2M Dataset. The size of each label corresponds to its frequency of occurrence. The larger the label, the more frequently it appears in the dataset." + ], + "image_footnote": [], + "bbox": [ + 500, + 77, + 821, + 325 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/dbd2c1184a18cb173aa8a27178432adf7f48ac0b537a06ddb31d90e52a5f32cd.jpg", + "image_caption": [ + "Figure 9: Image Prompting Dataset Creation of MMSVG-2M Character. By utilizing FLUX-Redux and SVG vectorization tools, image prompting data pairs can be generated. We adipot FLUX-Redux downsampling scale with 2, 3 in practice by trading-off the character similarity and complexity of generated SVG." + ], + "image_footnote": [], + "bbox": [ + 200, + 674, + 808, + 851 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/b122d2212096046f8e937b7fe80bc42797f0338f419bcb3492121a56f357ccd3.jpg", + "image_caption": [ + "(a) Training PPL for our models." + ], + "image_footnote": [], + "bbox": [ + 178, + 92, + 480, + 256 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/f2a3bf5441468ccd44b23b048b566fd1b98834e333baba08f597b0619c99d3b5.jpg", + "image_caption": [ + "(b) Validation PPL for our models.", + "Figure 11: Training and Validation Perplexity (PPL) for OmniSVG Models. We train all the models from scratch on 250 billion tokens. We observe that the performance grows with model sizes." + ], + "image_footnote": [], + "bbox": [ + 500, + 89, + 815, + 256 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For the image-to-SVG task, we compare our method with the image vectorization methods. LIVE allows progressive and efficient generation of SVGs, optimizing closed vector paths under raster image supervision with shape complexity control. However, LIVE needs to optimize for a long time when generating complex SVGs. DiffVG enables end-to-end differentiability in vector graphics rasterization, improving optimization through anti-aliasing and gradient-based methods while also is computationally expensive due to the complexity of the forward-backward rasterization process. Recently, the Multimodal Large Language Model (MLLM) based method StarVector leverages the visual understanding to apply accurate SVG primitive to the LLM architecture, which also can generate SVGs from both text and image inputs. However, it still fails to generate complex SVGs. Since Starvector [42] has not yet opened up its text-to-SVG model weights, our MMSVGBench does not evaluate Starvector's text-to-SVG capabilities. MMSVG-Bench also evaluates our methods with VLM methods, GPT-4o, to conduct a comprehensive assessment. We compare our method with these baselines on our MMSVG-2M dataset, from simple MMSVG-Icon dataset, a bit complex MMSVG-illustration dataset, to the very complex MMSVG-Character dataset.", + "bbox": [ + 169, + 339, + 826, + 534 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D More details of the baselines", + "text_level": 1, + "bbox": [ + 171, + 555, + 447, + 571 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D.1 Text-to-SVG Task", + "text_level": 1, + "bbox": [ + 171, + 588, + 341, + 602 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "SVGDreamer [60] uses a semantic-driven image vectorization (SIVE) process to separate foreground objects and background, improving editability. The SIVE process utilizes attention-based primitive control and an attention-mask loss function to manipulate individual elements effectively. To address issues in existing text-to-SVG generation methods, the proposed Vectorized Particle-based Score Distillation (VPSD) approach models SVGs as distributions of control points and colors, improving shape, color diversity, and convergence speed.", + "bbox": [ + 169, + 614, + 823, + 699 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "VectorFusion [22] leverages a text-conditioned diffusion model trained on pixel representations to generate SVG exportable vector graphics without needing large captioned SVG datasets. By optimizing a differentiable vector graphics rasterizer, it distills semantic knowledge from a pretrained diffusion model and uses Score Distillation Sampling to generate an SVG consistent with a caption. Experiments show that VectorFusion improves both quality and fidelity, offering a variety of styles such as pixel art and sketches.", + "bbox": [ + 169, + 704, + 823, + 787 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Chat2SVG [56] proposes a hybrid framework that combines the strengths of Large Language Models (LLMs) and image diffusion models for text-to-SVG generation. The approach first uses an LLM to create semantically meaningful SVG templates from basic geometric primitives. A dual-stage optimization pipeline, guided by image diffusion models, refines paths in latent space and adjusts point coordinates to enhance geometric complexity.", + "bbox": [ + 169, + 792, + 823, + 864 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "IconShop [57] uses a transformer-based architecture to encode path commands and learn to model SVG path sequences autoregressively. It has shown excellent results in simplified icon scenarios and provides a good solution to Text-to-SVG generation by extending the FIGR-8-SVG dataset with", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/1aa916043462691eacdf1ec6504ad4cdb68fdafec3028a2b4e705575becadf71.jpg", + "image_caption": [ + "Figure 12: Illustration of the SVG Generation Capabilities of OmniSVG." + ], + "image_footnote": [], + "bbox": [ + 205, + 147, + 787, + 824 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": " captions. We have access to their dataset and original splits and have trained our model on that data using a pre-trained checkpoint (trained on OmniVG dataset). We have extracted the results from IconShop and included them here to compare our method.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "LLM4SVG [59] is a framework that leverages Large Language Models (LLMs) to understand and generate Scalable Vector Graphics (SVGs). It employs a structured SVG encoding approach, utilizing learnable semantic tokens to accurately represent SVG components and their properties. This design enables LLMs to produce SVGs that are both semantically aligned with textual descriptions and visually coherent. However, LLM4SVG also has a maximum token length of 2048, limiting its ability to generate highly complex SVGs that require longer sequences.", + "bbox": [ + 169, + 138, + 826, + 224 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D.2 Image-to-SVG Task", + "text_level": 1, + "bbox": [ + 171, + 239, + 354, + 253 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "LIVE (Layer-wise Image Vectorization) [34] is a method for progressively generating SVGs that closely fit a given raster image by recursively adding and optimizing closed vector paths. Using a differentiable renderer (based on DiffVG [29]), LIVE enables direct optimization of paths under raster image supervision while controlling shape complexity by adjusting the number of path segments. It introduces component-wise path initialization, identifying key visual components to ensure efficient topology extraction and minimize redundant shapes.", + "bbox": [ + 169, + 263, + 823, + 348 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "DiffVG [29] is a landmark in vector graphics research, pioneering deep learning-based methods with the first differentiable vector graphics rasterization pipeline. By leveraging a combination of anti-aliasing techniques and gradient-based optimization, DiffVG ensures differentiability. Unlike methods relying on non-differentiable curve-to-mesh conversions, DiffVG employs a forward-backward rasterization process, where the forward pass generates antialiased images and the backward pass computes gradients with respect to vector graphic parameters.", + "bbox": [ + 169, + 354, + 825, + 438 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "StarVector [42] works directly in the SVG code space, leveraging visual understanding to apply accurate SVG primitives. StarVector employs a transformer-based architecture that integrates an image encoder with a language model, enabling it to process visual inputs and produce precise SVG code. StarVector effectively handles diverse SVG types, including icons, logos, and complex diagrams, demonstrating robust generalization across various vectorization tasks. However, with a 16k token context window, StarVector may struggle to process highly complex SVGs that require longer sequences.", + "bbox": [ + 169, + 444, + 823, + 542 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Vtracer [12] is an image processing algorithm designed to convert raster images into SVGs. The algorithm follows a three-step pipeline, which involves the hierarchical clustering of images for vectorization. Initially, the pixels are transformed into paths, which are subsequently simplified into polygons. In the final step, these polygons are smoothed and approximated using a Bezier curve fitter.", + "bbox": [ + 169, + 547, + 826, + 604 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 19 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_model.json b/data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6c5f5faf256ba8b8eb6bdf8c95a6935ef7805b5c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_model.json @@ -0,0 +1,3023 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.283, + 0.058, + 0.716 + ], + "angle": 270, + "content": "arXiv:2504.06263v3 [cs.CV] 1 Dec 2025" + }, + { + "type": "title", + "bbox": [ + 0.214, + 0.123, + 0.784, + 0.171 + ], + "angle": 0, + "content": "OmniSVG: A Unified Scalable Vector Graphics Generation Model" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.219, + 0.783, + 0.274 + ], + "angle": 0, + "content": "Yiying Yang\\(^{1,2*}\\) Wei Cheng\\(^{2*}\\) Sijin Chen\\(^{1}\\) Xianfang Zeng\\(^{2}\\) Fukun Yin\\(^{1,2}\\) \nJiaxu Zhang\\(^{2}\\) Liao Wang\\(^{2}\\) Gang Yu\\(^{2\\ddagger}\\) Xingjun Ma\\(^{1\\ddagger}\\) Yu-Gang Jiang\\(^{1}\\) \n\\(^{1}\\) Fudan University \\(^{2}\\) StepFun" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.28, + 0.25, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.252, + 0.284, + 0.333, + 0.296 + ], + "angle": 0, + "content": "Project Page" + }, + { + "type": "image", + "bbox": [ + 0.349, + 0.281, + 0.374, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.284, + 0.46, + 0.295 + ], + "angle": 0, + "content": "MMSVG-2M" + }, + { + "type": "image", + "bbox": [ + 0.478, + 0.28, + 0.501, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.283, + 0.602, + 0.295 + ], + "angle": 0, + "content": "MMSVGBench" + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.28, + 0.644, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.646, + 0.284, + 0.693, + 0.294 + ], + "angle": 0, + "content": "Models" + }, + { + "type": "image", + "bbox": [ + 0.711, + 0.28, + 0.746, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.737, + 0.284, + 0.771, + 0.294 + ], + "angle": 0, + "content": "Code" + }, + { + "type": "title", + "bbox": [ + 0.225, + 0.329, + 0.254, + 0.339 + ], + "angle": 0, + "content": "Icon" + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.342, + 0.296, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.349, + 0.329, + 0.42, + 0.339 + ], + "angle": 0, + "content": "Illustration" + }, + { + "type": "image", + "bbox": [ + 0.309, + 0.343, + 0.45, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.533, + 0.328, + 0.601, + 0.339 + ], + "angle": 0, + "content": "Character" + }, + { + "type": "image", + "bbox": [ + 0.456, + 0.343, + 0.685, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.715, + 0.319, + 0.781, + 0.329 + ], + "angle": 0, + "content": "Text-to-SVG" + }, + { + "type": "image", + "bbox": [ + 0.708, + 0.331, + 0.79, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.791, + 0.331, + 0.816, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.715, + 0.375, + 0.789, + 0.384 + ], + "angle": 0, + "content": "Image-to-SVG" + }, + { + "type": "image", + "bbox": [ + 0.715, + 0.385, + 0.807, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.711, + 0.43, + 0.813, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.484, + 0.609, + 0.497 + ], + "angle": 0, + "content": "Samples Generated by OmniSVG with Wide Complexity Range" + }, + { + "type": "text", + "bbox": [ + 0.731, + 0.485, + 0.792, + 0.496 + ], + "angle": 0, + "content": "Versatility" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.509, + 0.825, + 0.573 + ], + "angle": 0, + "content": "Figure 1: OmniSVG is capable of autoregressively generating high-quality Scalable Vector Graphs (SVG) across a wide spectrum of complexity, from simple icons to intricate anime characters. OmniSVG demonstrates remarkable versatility in generating high-quality SVGs adhering to multimodal instructions, covering tasks like Text-to-SVG, Image-to-SVG, and Character-Reference SVG, making it a powerful and flexible solution for diverse creative tasks." + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.59, + 0.538, + 0.605 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.621, + 0.767, + 0.844 + ], + "angle": 0, + "content": "Scalable Vector Graphics (SVG) is an important image format widely adopted in graphic design because of their resolution independence and editability. The development of autonomous SVG generation workflows is continuously drawing attention from both designers and researchers in the AIGC community. However, existing methods either produce unstructured outputs at huge computational cost or are limited to generating monochrome icons of over-simplified structures. To produce high-quality and complex SVG adhering to multi-modal instructions, we propose OmniSVG, a unified SVG generation framework that inherits knowledge from a pre-trained Vision-Language Model (VLM). By parameterizing SVG commands and coordinates into discrete token sequences, the auto-regressive nature enables us to seamlessly adapt modern VLMs to the direct SVG generation. To further advance the development of SVG synthesis, we introduce MMSVG-2M, a multimodal dataset with two million richly annotated SVG assets, along with a standardized evaluation protocol for conditional SVG generation tasks. Extensive experiments show that OmniSVG outperforms existing methods and demonstrates its potential for integration into professional SVG design workflows." + }, + { + "type": "page_footnote", + "bbox": [ + 0.199, + 0.874, + 0.576, + 0.888 + ], + "angle": 0, + "content": "* Yiying Yang and Wei Cheng contributed equally to this work." + }, + { + "type": "page_footnote", + "bbox": [ + 0.201, + 0.888, + 0.355, + 0.901 + ], + "angle": 0, + "content": "\\(\\ddagger\\) Corresponding Authors." + }, + { + "type": "footer", + "bbox": [ + 0.173, + 0.923, + 0.63, + 0.937 + ], + "angle": 0, + "content": "39th Conference on Neural Information Processing Systems (NeurIPS 2025)." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.313, + 0.106 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.121, + 0.825, + 0.205 + ], + "angle": 0, + "content": "Scalable Vector Graphics (SVG) have become a cornerstone of modern digital design because of their resolution independence, compact file size, and inherent editability. Widely adopted in professional workflows from UI/UX design to industrial CAD systems, SVG enables precise manipulation of geometric primitives (e.g., Bezier curves, polygons) while maintaining high precision and consistent visual quality across varying resolutions. However, creating high-quality SVG content remains challenging for non-experts, requiring mastery of specialized tools or intricate XML syntax." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.211, + 0.825, + 0.239 + ], + "angle": 0, + "content": "Existing methods adopt either optimization-based methods or auto-regressive approaches to generate SVG contents." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.245, + 0.827, + 0.398 + ], + "angle": 0, + "content": "The optimization-based methods [34, 12, 29] iteratively refine the SVG parameters by minimizing the differences between the input image and the raster image created by differentiable vector graphics rasterizers. Though these methods are sufficient for reconstructing SVG icons, they suffer from significant computational overhead when scaling up to more intricate samples and produce unstructured outputs with redundant anchor points, harming the editability of the reconstructed SVG samples. In contrast, auto-regressive methods build transformer models or adapt pre-trained Large Language Models (LLMs) to directly generate XML parameters [59] or codes [56, 42] representing SVGs. Benefiting from the end-to-end learning pipeline, the auto-regressive method is a more scalable approach [5] as it can learn directly from a large collection of SVG samples. However, existing auto-regressive approaches are limited to basic SVG contents [11, 24, 53] because of the limited context length and the scarcity of complex SVG data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.403, + 0.827, + 0.543 + ], + "angle": 0, + "content": "In this paper, we propose OmniSVG that harnesses native VLMs [1] for various end-to-end multimodal SVG generation tasks. By parameterizing SVG coordinates and commands into discrete tokens, OmniSVG decouples structural logic from low-level geometry, mitigating the \"coordinate hallucination\" problem prevalent in code-based LLMs, and produces vivid and colorful SVG results. Additionally, the next token prediction training objective enables OmniSVG to complete SVGs with diverse generation results given some partial observations. Compared to traditional auto-regressive SVG generation methods, OmniSVG is able to parameterize SVGs exceeding \\(30k\\) tokens, facilitating the generation of detailed and complex SVG contents. Building upon pre-trained VLMs, our method natively integrates the ability to reason upon visual and textual instructions to synthesize editable, high-fidelity SVGs across diverse domains, from icons to intricate illustrations and anime characters." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.825, + 0.591 + ], + "angle": 0, + "content": "To advance the development of SVG synthesis, we introduce MMSVG-2M, a multi-modal SVG synthesis dataset with two million richly annotated assets, encompassing icons, illustrations, and anime designs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.596, + 0.825, + 0.64 + ], + "angle": 0, + "content": "We also establish a standardized evaluation protocol, MMSVG-Bench, for \"Text-to-SVG\" and \"Image-to-SVG\" generation. Extensive experiments show that OmniSVG can produce highly detailed and complex SVG contents, surpassing prior art both quantitatively and qualitatively." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.645, + 0.473, + 0.659 + ], + "angle": 0, + "content": "To summarize, our key contributions include:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.671, + 0.822, + 0.711 + ], + "angle": 0, + "content": "- We introduce OmniSVG, a family of end-to-end multimodal SVG generators that leverage native VLMs for generating complex and detailed SVGs, from simple icons to intricate anime characters." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.718, + 0.822, + 0.757 + ], + "angle": 0, + "content": "- We present MMSVG-2M, a large-scale dataset comprising two million SVG assets, along with a standardized evaluation protocol for various multi-modal SVG generation tasks providing a comprehensive resource for future research." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.764, + 0.822, + 0.802 + ], + "angle": 0, + "content": "- Extensive experiments show that OmniSVG surpasses prior SVG generation methods both qualitatively and quantitatively, highlighting its potential for integration into professional SVG design workflows." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.671, + 0.822, + 0.802 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.825, + 0.332, + 0.841 + ], + "angle": 0, + "content": "2 Related Works" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "SVG Generation. Early attempts to generating SVGs directly utilize architectures like RNNs [18, 41, 19, 44, 45], VAEs [4, 32, 48, 46, 51], and Transformers [4, 57] to compress SVG commands into latent representations. Meanwhile, DeepSVG [4] further parameterizes SVGs using a dual transformer architecture but struggles with geometric consistency. Recently, the advent of large language models" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.825, + 0.176 + ], + "angle": 0, + "content": "(LLMs) [30, 64, 52, 61, 5, 6, 63, 62, 49] unleashes the potential of generating SVGs via XML code synthesis [59, 56, 42]. However, the limited context length of existing LLM-based SVG generation methods [56, 42, 59] poses significant challenges in handling complex SVGs that exceed \\(10k\\) tokens. In this paper, we explore the potential of native Vision-Language Models (VLMs) in multi-modal SVG generation. By combining pre-trained VLMs with SVG command parameterization, we validate that OmniSVG is able to follow multi-modal instructions and generate vivid and complex SVGs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.182, + 0.827, + 0.281 + ], + "angle": 0, + "content": "Image Vectorization. Recent advancements in vectorization harness diffusion models paired with differentiable rasterizers, using techniques like score distillation sampling [37, 22, 7] and specialized regularizers [29, 34] to convert raster images into SVG paths. While these methods achieve remarkable results, they face limitations such as over-smoothing, color over-saturation, and lack of editability, often producing tangled paths that fail to capture hierarchical structures inherent in professional SVG designs. In this paper, we present an end-to-end approach that follows multi-modal instructions to generate high-quality SVGs with improved path clarity and editability." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.285, + 0.828, + 0.452 + ], + "angle": 0, + "content": "SVG Datasets and Benchmarks. The lack of suitable datasets for complex SVG structures presents a significant challenge. Existing datasets [11, 24, 53] primarily focus on simplified path-based SVGs or monochrome icons, overlooking the intricate layered structures and rich color semantics found in real-world designs. For example, FIGR-8-SVG [11] focuses on monochromatic icons, while StarVector [42] proposes categorized datasets, including illustrations, icons, emojis, and fonts. Therefore, existing datasets only present simple SVG samples that do not exceed \\(8.2k\\) tokens, failing to capture the complexities of layered structures and rich color semantics. Benchmark evaluations, such as VGBench [70], further highlight gaps in multi-format testing and the absence of comprehensive coverage for illustrative SVGs. To this end, we introduce MMSVG-2M, a multimodal SVG synthesis dataset comprising two million richly annotated assets, including icons, illustrations, and complex anime designs. We also present a standardized evaluation protocol, MMSVG-Bench, to evaluate diverse multi-modal SVG generation tasks with varying complexity." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.472, + 0.364, + 0.487 + ], + "angle": 0, + "content": "3 OmniSVG Dataset" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.504, + 0.825, + 0.575 + ], + "angle": 0, + "content": "We present MMSVG-2M, a large-scale SVG dataset with two million SVG samples covering website icons, illustrations, graphic designs, anime characters, and etc (Sec. 3.1). To promote the downstream development of SVG generation methods, we also introduce MMSVG-Bench, a standardized evaluation protocol for a series of multi-modal instruction following tasks for conditional SVG generation (Sec. 3.2)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.592, + 0.308, + 0.606 + ], + "angle": 0, + "content": "3.1 MMSVG-2M" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.619, + 0.825, + 0.716 + ], + "angle": 0, + "content": "Data Source. With increasing visual complexity, MMSVG-2M consists of three subsets, 1) the icon subset MMSVG-Icon collected from Iconfont, 2) the illustration subset MMSVG-Illustration sourced from IconSount, and 3) the complex anime character subset MMSVG-Character both curated from Freepik and created by our data creation pipeline as shown in Fig. 2. All these websites are online platforms where users can publish and share SVGs, encompassing a broad variety of categories. Specifically, our collection of MMSVG-2M contains 1.1 million icons, 0.5 million illustrations, and 0.4 million anime characters as shown in Tab. 6." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.724, + 0.827, + 0.793 + ], + "angle": 0, + "content": "Data Curation. We extract SVG samples with a comprehensive dedduplication process based on filenames, SVG code, and metadata. We first fit the collected SVGs within a viewbox of \\(200 \\times 200\\). Then, we employ an off-the-shelf VLM, specifically BLIP-2 [28], to generate captions for the SVGs. Please find more samples from the MMSVG-2M dataset in Fig. 8, and instruction templates in Sec. A.2." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.801, + 0.827, + 0.913 + ], + "angle": 0, + "content": "SVG Simplification is an essential procedure in SVG data cleansing, since the over-complicated XML grammars in the crawled SVG data will lead to ambiguities while representing basic shapes. To standardize training and evaluation, we simplify all SVG commands with atomic commands as shown in Tab. 1. Inspired by FIGR-8-SVG [11] and IconShop [57], we remove all attributes and simplify each SVG with five basic commands, including \"Move To\" (M), \"Line To\" (L), \"Cubic Bezier\" (C), \"Elliptical Arc\" (A), \"ClosePath\" (Z). The introduction of atomic commands further removes the ambiguities, as complex XML grammars can be approximated with the combination of several atomic commands. To efficiently produce a unified and less complex data structure, we utilize" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.099, + 0.825, + 0.136 + ], + "angle": 0, + "content": "Table 1: SVG Draw Commands. Draw commands used in this work along with their arguments and a visualization are listed. The start-position \\((x_{1},y_{1})\\) is implicitly defined as the end-position of the preceding command." + }, + { + "type": "table", + "bbox": [ + 0.202, + 0.15, + 0.803, + 0.482 + ], + "angle": 0, + "content": "
CommandArgumentsDescriptionVisualization
<SOP>‘Start-of-Path’ token.
M\n(MoveTo)x2,y2Move the cursor to the end-point (x2,y2) without drawing anything.(x2,y2)
L\n(LineTo)x2,y2Draw a line to the point (x2,y2).(x1,y1) (x2,y2)
C\n(Cubic\nBézier)qx1, qy1\nqx2, qy2\nx2,y2Draw a cubic Bézier curve with control points (qx1,qy1), (qx2,qy2) and end-point (x2,y2).(x1,y1) (qx2,qy2)\n(qx1,qy1) (x2,y2)
A\n(Elliptical\nArc)rx, ry\nφ, fA, fs\nx2,y2Draw an elliptical arc with radii rx and ry (semi-major and semi-minor axes), rotated by angle φ to the x-axis, and end-point (x2,y2). (x2,y2).fA=1/∑fS=1Ry\nφ(x1,y1) (x2,y2)
Z\n(ClosePath)Close the path by moving the cursor back to the path's starting position (x0,y0).(x0,y0) (x1,y1)
F (Fill)fillDraw the fill attribute of the path.
<EOS>‘End-of-SVG’ token.
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.497, + 0.825, + 0.54 + ], + "angle": 0, + "content": "picosvg to remove grammars like \"group\" and \"transform\", and simplify the complex commands to atomic path commands. It is worth noting that atomic path commands are sufficient to represent complex SVGs shown in Fig. 1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.558, + 0.327, + 0.571 + ], + "angle": 0, + "content": "3.2 MMSVG-Bench" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.584, + 0.825, + 0.723 + ], + "angle": 0, + "content": "To compensate for the vacancy of standardized and open evaluation for SVG generation, we introduce MMSVG-Bench, a comprehensive benchmark for multi-modal SVG generation. We require the corresponding benchmark to be a sufficient verification whether a model is practically useful in real-world scenarios, and avoid the excessive similarity between the benchmark input data and training data as in traditional train/test splits. Therefore, we opt to generate the benchmark inputs with GPT-4o. Specifically, for Text-to-SVG task, we synthesize 150 textual prompts for each SVG subset (i.e. Icon and Illustration). For Image-to-SVG task, we synthesize extra 150 textual descriptions, and prompt GPT-4o to generate vector-style images with transparent backgrounds based on the above texts as the ground truth visual samples. We focus on both the visual quality and semantics of the generation results." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.73, + 0.825, + 0.773 + ], + "angle": 0, + "content": "Text-to-SVG requires a model to generate SVGs from text instructions. We measure the visual quality with Frechet Inception Distance (FID) [50], aesthetic appeal with Aesthetic score [43], text-SVG alignment with CLIP score [38], and Human Preference Scores (HPS) [58]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.78, + 0.827, + 0.837 + ], + "angle": 0, + "content": "Image-to-SVG evaluates a model's ability to convert images into SVGs. To quantify the distance between the input and output SVG, we calculate the cosine similarity of DinoV2 features (DinoScore) [35], Structural Similarity Index (SSIM) [54], Learned Perceptual Image Patch Similarity (LPIPS) [66], and Mean Squared Error (MSE)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.827, + 0.886 + ], + "angle": 0, + "content": "Character-Reference SVG Generation evaluates a model's ability to generate novel SVGs while keeping the profile of the characters depicted in the input image. Different from image-to-SVG, the model does not reconstruct, but generates a specific character SVG for the input image (see" + }, + { + "type": "page_footnote", + "bbox": [ + 0.198, + 0.899, + 0.496, + 0.913 + ], + "angle": 0, + "content": "https://github.com/googlefonts/picosvg" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.176, + 0.09, + 0.825, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.3, + 0.825, + 0.342 + ], + "angle": 0, + "content": "Figure 2: Overview of OmniSVG. OmniSVG is built on a pre-trained vision-language model Qwen2.5-VL and incorporates an SVG tokenizer. The model tokenizes both text and image inputs as prefix tokens, while the SVG tokenizer encodes vector graphics commands into a unified representation space." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.35, + 0.825, + 0.38 + ], + "angle": 0, + "content": "Fig. 5). We evaluate the alignment between input character images and generated SVGs by prompting GPT-4o [21] to generate a score ranging from 1 to 10, the higher the better. [15, 23, 17]" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.398, + 0.293, + 0.414 + ], + "angle": 0, + "content": "4 OmniSVG" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.825, + 0.472 + ], + "angle": 0, + "content": "To support end-to-end training for multi-modal SVG generation, OmniSVG parameterizes a series of atomic SVG path commands into a sequence before feeding into a pre-trained VLM with multi-modal instructions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.478, + 0.825, + 0.573 + ], + "angle": 0, + "content": "SVG Tokenizer. As illustrated in Sec. 3, our MMSVG-2M dataset simplifies an SVG by removing all attributes and using five basic path commands (see Tab. 1). After the simplification, an SVG script \\( G \\) is represented as the combination of \\( M \\) paths, \\( G = \\{P_i\\}_{i=1}^M \\). Here, \\( P_i \\) is the \\( i \\)-th path containing \\( N_i \\) commands, \\( P_i = \\{C_i^j\\}_{j=1}^{N_i} \\), where \\( C_i^j \\) is the \\( j \\)-th command in the \\( i \\)-th path. Each command is represented as \\( C_i^j = (U_i^j, V_i^j) \\), containing both the command type identifier \\( U_i^j \\in \\{\\mathrm{M}, \\mathrm{L}, \\mathrm{C}, \\mathrm{A}, \\mathrm{Z}\\} \\) and the corresponding location argument \\( V_i^j \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.578, + 0.825, + 0.636 + ], + "angle": 0, + "content": "To generate colored SVG contents, we assign special tokens for hex values to control the \"Fill\" (F) attribute, distinguishing it from the original SVG commands and coordinates. To this end, we are able to use a total six types of commands \\( U_{i}^{j} \\in \\{\\mathrm{M}, \\mathrm{L}, \\mathrm{C}, \\mathrm{A}, \\mathrm{Z}, \\mathrm{F}\\} \\) to parameterize a colored SVG parameterization." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.642, + 0.827, + 0.794 + ], + "angle": 0, + "content": "Specifically, our SVG tokenizer transforms SVG scripts \\( X_{s} \\) into an ordered SVG token sequence within the same representation space as the pre-trained VLM. Following IconShop [57], we flatten the layered structure of the SVG script by concatenating different paths into a single command sequence, where each path begins with the drawing commands followed by point coordinates. Therefore, each SVG sequence could be represented as a flattened sequence. As the generation identifier, we apply special tokens like \\( <\\mathrm{SOP}> \\) and \\( <\\mathrm{EOS}> \\) to the two ends of a SVG sequence, identifying the beginning and ending of a SVG sequence. We assign special tokens for each command type, i.e. \\( \\{\\mathrm{M}, \\mathrm{L}, \\mathrm{C}, \\mathrm{A}, \\bar{\\mathrm{Z}}, \\mathrm{F}\\} \\). To shorten the length of the SVG sequence, we further merge the 2D point coordinates into one token with a mapping function: \\( \\rightarrow x \\times w + y \\), where \\( w \\) is the width of the image. The SVG sequence are then lifted into the same embedding space as the pre-trained VLM with a learnable embedding layer." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.8, + 0.827, + 0.858 + ], + "angle": 0, + "content": "Model Architecture. OmniSVG adopts Qwen2.5-VL [1], an open-sourced VLM that excels at understanding intricate vision-text inputs, as its backbone (Fig. 2) to produce precise and compact SVG outputs. OmniSVG is trained to predict the SVG suffix tokens \\((x_{s})\\) conditioned on the multi-modal instruction prefix tokens \\((x_{c})\\) with the standard next-token prediction objective." + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.875, + 0.826, + 0.915 + ], + "angle": 0, + "content": "\\[\n\\theta^ {*} = \\arg \\max _ {\\theta} \\prod_ {i = 1} ^ {L} P \\left(x _ {s, i} \\mid x _ {s, < i}, x _ {c}\\right) \\tag {1}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.089, + 0.825, + 0.141 + ], + "angle": 0, + "content": "Table 2: Quantitative Evaluations. Quantitative results between OmniSVG and current state-of-the-art text-to-SVG and image-to-SVG baseline methods. The bold numbers and underlined numbers represent the best and second best performance respectively. Our OmniSVG model demonstrates superior performance compared SOTA SVG generation baselines." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.149, + 0.825, + 0.485 + ], + "angle": 0, + "content": "
Evaluation DatasetMethods# TokensText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
MMSVG-IconVectorfusion [22]66.2k250.770.2404.760.237----
SVGDreamer [60]132.0k308.940.2074.260.221----
Chat2SVG [56]0.6k190.870.2994.410.247----
IconShop [57]2.0k213.280.2884.550.244----
LIVE [34]52.5k----0.9320.9430.1060.011
DiffVG [29]322.0k----0.9400.9540.0660.002
GPT-4o [21]0.3k----0.8600.7920.4030.124
StarVector(8B) [42]2.0k----0.8950.8810.2310.059
Vtracer52.4k----0.9930.9660.0390.002
OmniSVG(4B)3.8k137.400.2754.620.2440.9930.9500.0500.006
OmniSVG-L(8B)5.7k130.560.2764.600.2420.9220.8930.2350.040
MMSVG-IllustrationVectorfusion [22]66.1k253.940.1854.940.226----
SVGDreamer [60]132.0k419.700.2014.370.221----
Chat2SVG [56]1.0k210.030.2834.450.250----
IconShop [57]2.6k107.930.2334.460.224----
LIVE [34]52.2k----0.9350.9500.1110.008
DiffVG [29]322.0k----0.9450.9550.0650.001
GPT-4o [21]0.4k----0.8750.8540.3730.077
StarVector(8B) [42]2.6k----0.8770.9000.2380.046
Vtracer57.6k----0.9940.9660.0350.002
OmniSVG(4B)5.8k154.370.2264.560.2320.8990.9060.2370.034
OmniSVG-L(8B)6.9k138.420.2314.510.2320.9050.9070.2310.031
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.508, + 0.315, + 0.525 + ], + "angle": 0, + "content": "5 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.54, + 0.825, + 0.584 + ], + "angle": 0, + "content": "To validate the effectiveness of our method, we first introduce the baselines (Sec. 5.1). Then, we make quantitative comparisons with prior arts (Secs. 5.2 and 5.3) and conduct ablations (Sec. 5.4) to study the effectiveness of our design." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.6, + 0.279, + 0.613 + ], + "angle": 0, + "content": "5.1Baselines" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.626, + 0.827, + 0.71 + ], + "angle": 0, + "content": "For the text-to-SVG task, we compare our method with language-based (LLM-based) methods, including VectorFusion [22], SVGDreamer [60], Chat2SVG [56] and IconShop [57]. For image-to-SVG task, we compare our method with baseline methods across image vectorization and Multimodal Large Language Modeling approaches, including LIVE [34], DiffVG [29], StarVector [42], Vtracer [12] and GPT-4o [21] using the official implementations with the hyperparameters proposed by the authors, and apply their pre- and post-processing code as required." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.727, + 0.398, + 0.743 + ], + "angle": 0, + "content": "5.2 Quantitative Comparisons" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.827, + 0.797 + ], + "angle": 0, + "content": "We compare our OmniSVG with other baseline methods on the \"text-to-SVG\" and \"image-to-SVG\" tasks in our MMSVG-Bench. In addition to the metrics mentioned in Sec. 3, we also report the average token length (# tokens) of a generated SVG sample utilizing the Qwen2.5-VL [1] tokenizer." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.825, + 0.913 + ], + "angle": 0, + "content": "As shown in Tab. 2, OmniSVG demonstrates strong performance compared to state-of-the-art baselines in text-to-SVG generation, achieving superior FID scores and competitive CLIP score, aesthetic quality, and HPS. For image-to-SVG, OmniSVG also achieves competitive results with traditional vectorization methods, i.e. LIVE [34], DiffVG [29], and VTracer [12], but with a much shorter sequence length. When comparing to auto-regressive methods, i.e. GPT-4o [21] and StarVector [42], OmniSVG also achieves a superior performance across all metrics. The above results indicate that OmniSVG effectively balances the generation cost and the visual quality when generating SVGs according to multi-modal conditions." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.176, + 0.096, + 0.822, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.425, + 0.825, + 0.452 + ], + "angle": 0, + "content": "Figure 3: Qualitative Comparison with SOTA Methods on Text-to-SVG Task. We compare the propose method with SOTA Text-to-SVG methods on our evaluation benchmarks, namely Icon and Illustration." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.487, + 0.378, + 0.501 + ], + "angle": 0, + "content": "5.3 Qualitative Evaluations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.517, + 0.828, + 0.657 + ], + "angle": 0, + "content": "Text-to-SVG task. We compare our method with baseline approaches using seven distinct text prompts for the text-to-SVG task, as shown in Fig. 4. Optimization-based methods like SVGDreamer [60] and VectorFusion [22] require significant computation time due to their iterative optimization processes, which, while effective for refining SVG details, are computationally expensive. Auto-regressive methods, such as IconShop [57] and Chat2SVG [56], generate SVGs more quickly by leveraging pre-trained models but have notable limitations. IconShop produces monochrome SVGs, restricting its applicability, while Chat2SVG, though flexible, generates less detailed and semantically consistent SVGs in its first stage. Our OmniSVG consistently outperforms all baselines across various text prompts in generating high-fidelity SVGs with rich color, geometric accuracy, and the ability to handle complex visual cues." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.663, + 0.487, + 0.913 + ], + "angle": 0, + "content": "Image-to-SVG Task. We compare our method with classical image vectorization approaches, including DiffVG [29], LIVE [34], and VLM-based methods GPT-4o [21], StarVector [42] and Vtracer [12] As shown in Fig. 4, our method outperforms these baselines in both quality and efficiency. Optimization-based methods like DiffVG and LIVE perform well on simple icons but struggle with complex images, often generating visual artifacts. The GPT-4o model, while capable of generating SVGs for complex images, is limited to icon-level outputs and cannot handle detailed illustrations. StarVector excels at simple icons but fails to produce accurate SVGs for more intricate images, highlighting its limited generalization capability. Vtracer is an image processing algorithm designed to convert raster images into SVGs. In contrast, OmniSVG effi" + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.668, + 0.805, + 0.877 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.495, + 0.878, + 0.825, + 0.904 + ], + "angle": 0, + "content": "Figure 5: Generated SVG with Character-Reference (CRef) by OmniSVG." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.096, + 0.818, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.42, + 0.825, + 0.447 + ], + "angle": 0, + "content": "Figure 4: Qualitative Comparison with SOTA Methods on Image-to-SVG Task. We compare the propose method with SOTA Image-to-SVG methods on our evaluation benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.476, + 0.827, + 0.547 + ], + "angle": 0, + "content": "ciently converts a wide range of images, from icons to complex illustrations and character images, into high-quality, editable SVGs. This superior performance in handling diverse visual cues distinguishes OmniSVG from traditional vectorization methods. Additional visual results can be found in Fig. 12. We provide more detailed discussions with existing methods, particularly the recent works LLM4SVG [59] and StarVector [42], in the Sec. D." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.553, + 0.825, + 0.596 + ], + "angle": 0, + "content": "Character-Reference SVG generation task. As shown in Fig. 5, by training on MMSVG-Character with natural character image and SVG pair data, OmniSVG is capable of generating character SVGs through image references." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.614, + 0.327, + 0.628 + ], + "angle": 0, + "content": "5.4 Ablation studies" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.64, + 0.827, + 0.696 + ], + "angle": 0, + "content": "Effectiveness of SVG Parameterization. We present a comprehensive comparison among different SVG parameterization strategy with the traditional non-parameterized methods for SVG representation in large language models. We ablates on the parameterization on both coordinate and color attributes of the SVG." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.702, + 0.825, + 0.759 + ], + "angle": 0, + "content": "The results, shown in Tab. 3 and Fig. 6 demonstrate that parameterizing both coordinate and color attributes yields a better generation results under all metrics with the shortest token length. It further validates that the efficient token representation allows our method to generate complex SVGs with fewer computational resources. Additionally, qualitative results show that our method outperforms" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.793, + 0.825, + 0.82 + ], + "angle": 0, + "content": "Table 3: Quantitative Study on SVG Parameterization. Ablation studies on color parametrization (abbreviated as color param.) and coordinate parameterization (abbreviated as coord param.) are conducted." + }, + { + "type": "table", + "bbox": [ + 0.2, + 0.828, + 0.799, + 0.911 + ], + "angle": 0, + "content": "
MethodsText-to-SVGImage-to-SVG# Tokens
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
w/o param.218.760.1853.430.1380.7410.7180.3150.18218.5k
w/o coordinate param.193.420.2163.900.1690.8260.8090.2480.11910.2k
w/o color param.167.280.2694.310.2110.8950.8790.1790.0536.3k
OmniSVG(4B)145.890.3084.590.2380.9460.9280.1380.0204.8k
" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.177, + 0.099, + 0.818, + 0.113 + ], + "angle": 0, + "content": "Table 4: Ablation of the Model Size. As the model size grows, the generated samples are of higher quality." + }, + { + "type": "table", + "bbox": [ + 0.188, + 0.119, + 0.805, + 0.202 + ], + "angle": 0, + "content": "
MethodsInputSizeText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIS↓MSE↓
FLAN-T5-Base[10]Text223M198.480.1583.380.085----
FLAN-T5-Large[10]Text770M175.240.2083.920.142----
FLAN-T5-xl[10]Text3B160.280.2584.310.192----
blip2-flan-t5-xl[28]Text/Image3.94B152.110.2354.480.2150.8980.8910.2550.041
OmniSVG(4B)Text/Image3.7B145.890.3084.590.2380.9460.9280.1380.020
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.212, + 0.825, + 0.255 + ], + "angle": 0, + "content": "others, particularly as SVG complexity increases. The non-parameterization method fails to generate SVGs for complex images. These findings underscore the effectiveness of our full parameterization strategy in balancing performance and resource efficiency for SVG generation tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.26, + 0.827, + 0.331 + ], + "angle": 0, + "content": "Ablation studies on model size. To analyze whether training a larger model benefits SVG generation, we evaluate OmniSVG base models with different sizes on the MMSVG-2M dataset in Tab. 4. We evaluate OmniSVG with base models of varying sizes on the MMSVG-2M dataset in Tab. 4 by progressively scaling up the model size. The results show that as the model size grows, we can generate SVG samples with a better quality." + }, + { + "type": "table_caption", + "bbox": [ + 0.37, + 0.351, + 0.626, + 0.364 + ], + "angle": 0, + "content": "Table 5: Ablation on VLM architecture." + }, + { + "type": "table", + "bbox": [ + 0.19, + 0.37, + 0.805, + 0.449 + ], + "angle": 0, + "content": "
Vision ModelLanguage ModelText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
CLIPQwen2.5185.310.2494.520.2150.8670.8560.2670.058
VQGANQwen2.5198.740.2344.490.2030.8390.8280.2950.071
Qwen2.5-VL-3B-Instruct145.890.3084.590.2380.9460.9280.1380.020
Qwen2.5-VL-7B-Instruct134.450.2544.560.2370.9140.9000.2330.036
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.462, + 0.825, + 0.504 + ], + "angle": 0, + "content": "Ablation Studies on the VLM Architecture. To evaluate the effectiveness of the VLM architecture, we conducted an ablation study replacing it with alternative LLM-based architectures incorporating image encoders such as CLIP ViT-B/32 [39], VQGAN [14], and Qwen2.5-VL [1]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.51, + 0.486, + 0.551 + ], + "angle": 0, + "content": "The results in Tab. 5 show that Qwen2.5-VL consistently outperformed all alternatives under all evaluation metrics." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.558, + 0.487, + 0.737 + ], + "angle": 0, + "content": "User Study. We extract one-tenth of the samples from the evaluation dataset and conducted a user study with 15 participants to evaluate user preferences, vividness, and the alignment between text-to-SVG and image-to-SVG. Participants are asked to assess SVGs generated by different models based on 150 text descriptions and 150 image prompts, comparing the results generated using our method and baseline models. The results in Fig. 7 show that OmniSVG is widely preferred, with higher scores for vividness and superior semantic alignment with the input conditions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.757, + 0.308, + 0.772 + ], + "angle": 0, + "content": "6 Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.787, + 0.486, + 0.843 + ], + "angle": 0, + "content": "Conclusions. We introduce OmniSVG, a unified framework for multimodal SVG generation that leverages pre-trained Vision-Language Models (VLMs). By parameterizing SVG com" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.827, + 0.912 + ], + "angle": 0, + "content": "mands and coordinates as discrete tokens, OmniSVG efficiently decouples structural logic from geometry, addressing issues like \"coordinate hallucination\" while maintaining design expressiveness. Our method outperforms existing approaches in both quality and efficiency, offering high-quality, editable SVG across various design domains. Additionally, we proposed MMSVG-2M, a large-scale multimodal dataset with two million annotated SVG assets and a standardized evaluation protocol." + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.522, + 0.809, + 0.793 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.798, + 0.813, + 0.813 + ], + "angle": 0, + "content": "Figure 6: Qualitative Study on Parametrization." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "Extensive experiments show that OmniSVG surpasses prior SVG generation methods in various conditional generation tasks, highlighting its potential for integration into professional SVG design workflows." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.504, + 0.278 + ], + "angle": 0, + "content": "Limitations and Future Work. During inference, OmniSVG generates tens of thousands of tokens for complex samples, which inevitably leads to a considerable generation time. OmniSVG is only bounded by vector style image prompt and fails on natural images. As for future work, recent endeavors on multi-token prediction [15, 2] and KV-cache compression [68, 3] provide a promising way to save the generation cost. Additionally, the auto-regressive nature of OmniSVG also unlocks future" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.136, + 0.825, + 0.151 + ], + "angle": 0, + "content": "Figure 7: User Study of OmniSVG and baselines." + }, + { + "type": "table", + "bbox": [ + 0.524, + 0.156, + 0.817, + 0.261 + ], + "angle": 0, + "content": "
MethodPreference ↑Vividity↑Alignment↑
Vectorfusion [22]355876
SVGDreamer [60]416579
Chat2SVG [56]556186
IconShop [57]795775
GPT-4o [21]385480
StarVector(8B) [42]378168
DiffVG [29]887696
LIVE [34]867095
OmniSVG968898
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.279, + 0.825, + 0.308 + ], + "angle": 0, + "content": "opportunities for in-context learning [67, 69, 47], chain-of-thought reasoning [55, 16], and multi-turn interleaved generation [20, 31], thereby providing a more precise user control." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.326, + 0.341, + 0.344 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.356, + 0.827, + 0.4 + ], + "angle": 0, + "content": "This work is in part supported by National Key R&D Program of China (Grant No. 2022ZD0160103), National Natural Science Foundation of China (Grant No. 62276067), and National Natural Science Foundation of China (Grant No. 62472104)." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.405, + 0.808, + 0.421 + ], + "angle": 0, + "content": "The computations in this research were performed using the CFFF platform of Fudan University." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.112, + 0.826, + 0.141 + ], + "angle": 0, + "content": "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.147, + 0.826, + 0.174 + ], + "angle": 0, + "content": "[2] Tianle Cai, Yuhong Li, Zhengyang Geng, Hongwu Peng, and Tri Dao. Medusa: Simple framework for accelerating IIm generation with multiple decoding heads. Retrieved December, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.182, + 0.826, + 0.222 + ], + "angle": 0, + "content": "[3] Zefan Cai, Yichi Zhang, Bofei Gao, Yuliang Liu, Tianyu Liu, Keming Lu, Wayne Xiong, Yue Dong, Baobao Chang, Junjie Hu, et al. Pyramidkv: Dynamic kv cache compression based on pyramidal information tunneling. arXiv preprint arXiv:2406.02069, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.23, + 0.826, + 0.257 + ], + "angle": 0, + "content": "[4] Alexandre Carlier, Martin Danelljan, Alexandre Alahi, and Radu Timofte. Deepsvg: A hierarchical generative network for vector graphics animation. NeurIPS, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.264, + 0.826, + 0.292 + ], + "angle": 0, + "content": "[5] Sijin Chen, Xin Chen, Anqi Pang, Xianfang Zeng, Wei Cheng, Yijun Fu, Fukun Yin, Billzb Wang, Jingyi Yu, Gang Yu, et al. Meshxl: Neural coordinate field for generative 3d foundation models. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.299, + 0.826, + 0.338 + ], + "angle": 0, + "content": "[6] Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. L13da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.347, + 0.826, + 0.374 + ], + "angle": 0, + "content": "[7] Zehao Chen and Rong Pan. Svgbuilder: Component-based colored graphic generation with text-guided autoregressive transformers. arXiv preprint arXiv:2412.10488, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.381, + 0.826, + 0.421 + ], + "angle": 0, + "content": "[8] Wei Cheng, Ruixiang Chen, Siming Fan, Wanqi Yin, Keyu Chen, Zhongang Cai, Jingbo Wang, Yang Gao, Zhengming Yu, Zhengyu Lin, et al. Dna-rendering: A diverse neural actor repository for high-fidelity human-centric rendering. In ICCV, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.429, + 0.826, + 0.468 + ], + "angle": 0, + "content": "[9] Wei Cheng, Su Xu, Jingtan Piao, Chen Qian, Wayne Wu, Kwan-Yee Lin, and Hongsheng Li. Generalizable neural performer: Learning robust radiance fields for human novel view synthesis. arXiv preprint arXiv:2204.11798, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.476, + 0.826, + 0.515 + ], + "angle": 0, + "content": "[10] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instruction-finetuned language models. JMLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.523, + 0.826, + 0.551 + ], + "angle": 0, + "content": "[11] Louis Clouatre and Marc Demers. Figr: Few-shot image generation with reptile. arXiv preprint arXiv:1901.02199, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.558, + 0.708, + 0.573 + ], + "angle": 0, + "content": "[12] Vision Cortex. Vtracer. https://www.visioncortex.org/vtracer-docs, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.581, + 0.826, + 0.607 + ], + "angle": 0, + "content": "[13] Nyanko Devs. Danbooru2023: A large-scale crowdsourced and tagged anime illustration dataset. Hugging Face, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.616, + 0.826, + 0.643 + ], + "angle": 0, + "content": "[14] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.651, + 0.826, + 0.678 + ], + "angle": 0, + "content": "[15] Fabian Gloeckle, Badr Youbi Idrissi, Baptiste Rozière, David Lopez-Paz, and Gabriel Synnaeve. Better & faster large language models via multi-token prediction. arXiv preprint arXiv:2404.19737, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.685, + 0.826, + 0.725 + ], + "angle": 0, + "content": "[16] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.733, + 0.826, + 0.761 + ], + "angle": 0, + "content": "[17] Han Guo, Songlin Yang, Tarushii Goel, Eric P Xing, Tri Dao, and Yoon Kim. Log-linear attention. arXiv preprint arXiv:2506.04761, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.768, + 0.726, + 0.783 + ], + "angle": 0, + "content": "[18] David Ha and Douglas Eck. A neural representation of sketch drawings. In ICLR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.79, + 0.826, + 0.817 + ], + "angle": 0, + "content": "[19] Teng Hu, Ran Yi, Baihong Qian, Jiangning Zhang, Paul L Rosin, and Yu-Kun Lai. Supersvg: Superpixel-based scalable vector graphics synthesis. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.825, + 0.826, + 0.865 + ], + "angle": 0, + "content": "[20] Minbin Huang, Yanxin Long, Xinchi Deng, Ruihang Chu, Jiangfeng Xiong, Xiaodan Liang, Hong Cheng, Qinglin Lu, and Wei Liu. Dialoggen: Multi-modal interactive dialogue system for multi-turn text-to-image generation. arXiv preprint arXiv:2403.08857, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.873, + 0.826, + 0.911 + ], + "angle": 0, + "content": "[21] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.112, + 0.826, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.12 + ], + "angle": 0, + "content": "[22] Ajay Jain, Amber Xie, and Pieter Abbeel. Vectorfusion: Text-to-sv by abstracting pixel-based diffusion models. In CVPR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.128, + 0.826, + 0.167 + ], + "angle": 0, + "content": "[23] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast autoregressive transformers with linear attention. In International conference on machine learning, pages 5156-5165. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.176, + 0.826, + 0.217 + ], + "angle": 0, + "content": "[24] Denis Kocetkov, Raymond Li, Loubna Ben Allal, Jia Li, Chenghao Mou, Carlos Muñoz Ferrandis, Yacine Jernite, Margaret Mitchell, Sean Hughes, Thomas Wolf, et al. The stack: 3 tb of permissively licensed source code. arXiv preprint arXiv:2211.15533, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.225, + 0.52, + 0.24 + ], + "angle": 0, + "content": "[25] Kozea. Cairosvg. https://cairosvg.org/, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.248, + 0.718, + 0.263 + ], + "angle": 0, + "content": "[26] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.271, + 0.827, + 0.298 + ], + "angle": 0, + "content": "[27] Black Forest Labs. Flux.1Redux-dev. https://huggingface.co/black-forest-labs/FLUX.1-Redux-dev, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.307, + 0.825, + 0.334 + ], + "angle": 0, + "content": "[28] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.343, + 0.824, + 0.371 + ], + "angle": 0, + "content": "[29] Tzu-Mao Li, Michal Lukáč, Gharbi Michael, and Jonathan Ragan-Kelley. Differentiable vector graphics rasterization for editing and learning. SIGGRAPH Asia, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.379, + 0.826, + 0.394 + ], + "angle": 0, + "content": "[30] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.402, + 0.826, + 0.442 + ], + "angle": 0, + "content": "[31] Ziyu Liu, Tao Chu, Yuhang Zang, Xilin Wei, Xiaoyi Dong, Pan Zhang, Zijian Liang, Yuanjun Xiong, Yu Qiao, Dahua Lin, et al. Mmdu: A multi-turn multi-image dialog understanding benchmark and instruction-tuning dataset for lvlms. arXiv preprint arXiv:2406.11833, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.451, + 0.824, + 0.478 + ], + "angle": 0, + "content": "[32] Raphael Gontijo Lopes, David Ha, Douglas Eck, and Jonathon Shlens. A learned representation for scalable vector graphics. In CVPR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.486, + 0.824, + 0.513 + ], + "angle": 0, + "content": "[33] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.522, + 0.826, + 0.55 + ], + "angle": 0, + "content": "[34] Xu Ma, Yuqian Zhou, Xingqian Xu, Bin Sun, Valerii Filev, Nikita Orlov, Yun Fu, and Humphrey Shi. Towards layer-wise image vectorization. In CVPR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.558, + 0.826, + 0.598 + ], + "angle": 0, + "content": "[35] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.606, + 0.826, + 0.646 + ], + "angle": 0, + "content": "[36] Dongwei Pan, Long Zhuo, Jingtan Piao, Huiwen Luo, Wei Cheng, Yuxin Wang, Siming Fan, Shengqi Liu, Lei Yang, Bo Dai, et al. Renderme-360: a large digital asset library and benchmarks towards high-fidelity head avatars. NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.655, + 0.826, + 0.682 + ], + "angle": 0, + "content": "[37] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In ICLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.691, + 0.826, + 0.731 + ], + "angle": 0, + "content": "[38] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.74, + 0.826, + 0.779 + ], + "angle": 0, + "content": "[39] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.788, + 0.826, + 0.828 + ], + "angle": 0, + "content": "[40] Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, and Yuxiong He. Zero: Memory optimizations toward training trillion parameter models. In SC20: International Conference for High Performance Computing, Networking, Storage and Analysis. IEEE, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.837, + 0.826, + 0.863 + ], + "angle": 0, + "content": "[41] Pradyumna Reddy, Michael Gharbi, Michal Lukac, and Niloy J Mitra. Im2vec: Synthesizing vector graphics without vector supervision. In CVPR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.873, + 0.826, + 0.911 + ], + "angle": 0, + "content": "[42] Juan A Rodriguez, Shubham Agarwal, Issam H Laradji, Pau Rodriguez, David Vazquez, Christopher Pal, and Marco Pedersoli. Starvector: Generating scalable vector graphics code from images. arXiv preprint arXiv:2312.11556, 2023." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.12 + ], + "angle": 0, + "content": "[43] Christoph Schuhmann. Improved aesthetic predictor. https://github.com/christophschuhmann/improved-aesthetic-predictor, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.126, + 0.827, + 0.154 + ], + "angle": 0, + "content": "[44] I-Chao Shen and Bing-Yu Chen. Clipgen: A deep generative model for clipart vectorization and synthesis. TVCG, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.161, + 0.827, + 0.189 + ], + "angle": 0, + "content": "[45] Yiren Song, Xuning Shao, Kang Chen, Weidong Zhang, Zhongliang Jing, and Minzhe Li. Clipvg: Text-guided image manipulation using differentiable vector graphics. In AAAI, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.197, + 0.825, + 0.224 + ], + "angle": 0, + "content": "[46] Hao Su, Xuefeng Liu, Jianwei Niu, Jiahe Cui, Ji Wan, Xinghao Wu, and Nana Wang. Marvel: Raster gray-level manga vectorization via primitive-wise deep reinforcement learning. TCSVT, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.231, + 0.826, + 0.27 + ], + "angle": 0, + "content": "[47] Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.279, + 0.825, + 0.318 + ], + "angle": 0, + "content": "[48] Zecheng Tang, Chenfei Wu, Zekai Zhang, Mingheng Ni, Shengming Yin, Yu Liu, Zhengyuan Yang, Lijuan Wang, Zicheng Liu, Juntao Li, et al. Strokenuwa: Tokenizing strokes for vector graphic synthesis. arXiv preprint arXiv:2401.17093, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.326, + 0.825, + 0.366 + ], + "angle": 0, + "content": "[49] Zecheng Tang, Chenfei Wu, Zekai Zhang, Mingheng Ni, Shengming Yin, Yu Liu, Zhengyuan Yang, Lijuan Wang, Zicheng Liu, Juntao Li, et al. Strokenuwa: Tokenizing strokes for vector graphic synthesis. arXiv preprint arXiv:2401.17093, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.373, + 0.827, + 0.401 + ], + "angle": 0, + "content": "[50] Lucas Theis, Aäron van den Oord, and Matthias Bethge. A note on the evaluation of generative models. arXiv preprint arXiv:1511.01844, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.408, + 0.825, + 0.435 + ], + "angle": 0, + "content": "[51] Yingtao Tian and David Ha. Modern evolution strategies for creativity: Fitting concrete images and abstract concepts. In Artificial Intelligence in Music, Sound, Art and Design, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.443, + 0.825, + 0.482 + ], + "angle": 0, + "content": "[52] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.49, + 0.825, + 0.517 + ], + "angle": 0, + "content": "[53] Yizhi Wang and Zhouhui Lian. Deepvecfont: synthesizing high-quality vector fonts via dual-modality learning. TOG, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.525, + 0.825, + 0.552 + ], + "angle": 0, + "content": "[54] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. TIP, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.56, + 0.827, + 0.587 + ], + "angle": 0, + "content": "[55] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. NeurIPS, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.595, + 0.825, + 0.622 + ], + "angle": 0, + "content": "[56] Ronghuan Wu, Wanchao Su, and Jing Liao. Chat2svg: Vector graphics generation with large language models and image diffusion models. arXiv preprint arXiv:2411.16602, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.629, + 0.825, + 0.656 + ], + "angle": 0, + "content": "[57] Ronghuan Wu, Wanchao Su, Kede Ma, and Jing Liao. Iconshop: Text-guided vector icon synthesis with autoregressive transformers. TOG, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.664, + 0.825, + 0.691 + ], + "angle": 0, + "content": "[58] Xiaoshi Wu, Keqiang Sun, Feng Zhu, Rui Zhao, and Hongsheng Li. Human preference score: Better aligning text-to-image models with human preference. In ICCV, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.699, + 0.825, + 0.726 + ], + "angle": 0, + "content": "[59] Ximing Xing, Juncheng Hu, Guotao Liang, Jing Zhang, Dong Xu, and Qian Yu. Empowering llms to understand and generate complex vector graphics. arXiv preprint arXiv:2412.11102, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.734, + 0.825, + 0.761 + ], + "angle": 0, + "content": "[60] Ximing Xing, Haitao Zhou, Chuang Wang, Jing Zhang, Dong Xu, and Qian Yu. SVGdreamer: Text guided. \nsvg generation with diffusion model. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.769, + 0.825, + 0.796 + ], + "angle": 0, + "content": "[61] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.803, + 0.825, + 0.83 + ], + "angle": 0, + "content": "[62] Yiying Yang, Fukun Yin, Wen Liu, Jiayuan Fan, Xin Chen, Gang Yu, and Tao Chen. Pm-inr: Prior-rich multi-modal implicit large-scale scene neural representation. In AAAI, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.838, + 0.825, + 0.865 + ], + "angle": 0, + "content": "[63] Fukun Yin, Xin Chen, Chi Zhang, Biao Jiang, Zibo Zhao, Wen Liu, Gang Yu, and Tao Chen. Shapept: 3d shape generation with a unified multi-modal language model. TMM, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.873, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[64] Alex Young, Bei Chen, Chao Li, Chengen Huang, Ge Zhang, Guanwei Zhang, Heng Li, Jiangcheng Zhu, Jianqun Chen, Jing Chang, et al. Yi: Open foundation models by 01. ai. arXiv preprint arXiv:2403.04652, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.119 + ], + "angle": 0, + "content": "[65] Zhengming Yu, Wei Cheng, Xian Liu, Wayne Wu, and Kwan-Yee Lin. Monohuman: Animatable human neural field from monocular video. In CVPR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.127, + 0.825, + 0.155 + ], + "angle": 0, + "content": "[66] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.163, + 0.825, + 0.189 + ], + "angle": 0, + "content": "[67] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. What makes good examples for visual in-context learning? NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.198, + 0.825, + 0.236 + ], + "angle": 0, + "content": "[68] Xiabin Zhou, Wenbin Wang, Minyan Zeng, Jiaxian Guo, Xuebo Liu, Li Shen, Min Zhang, and Liang Ding. Dynamicky: Task-aware adaptive kv cache compression for long context llms. arXiv preprint arXiv:2412.14838, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.245, + 0.825, + 0.274 + ], + "angle": 0, + "content": "[69] Yucheng Zhou, Xiang Li, Qianning Wang, and Jianbing Shen. Visual in-context learning for large vision-language models. arXiv preprint arXiv:2402.11574, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.281, + 0.825, + 0.309 + ], + "angle": 0, + "content": "[70] Bocheng Zou, Mu Cai, Jianrui Zhang, and Yong Jae Lee. Vgbench: A comprehensive benchmark of vector graphics understanding and generation for large language models. In EMNLP, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.171, + 0.089, + 0.296, + 0.113 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.13, + 0.566, + 0.147 + ], + "angle": 0, + "content": "A Additional Details of MMSVG-2M dataset" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.162, + 0.448, + 0.178 + ], + "angle": 0, + "content": "A.1 Samples of MMSVG-2M Dataset" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.187, + 0.827, + 0.273 + ], + "angle": 0, + "content": "We visualize samples of our MMSVG-2M dataset in Fig. 8. In our MMSVG-2M dataset, \\(55\\%\\) of the SVG samples belongs to the MMSVG-Icon, \\(25\\%\\) belongs to the MMSVG-Illustration, and the rest \\(20\\%\\) belongs to the MMSVG-Character. Among the SVG samples within the MMSVG-Character category, half of them comes from Freepik, while another half is generated by our data creation pipeline. We also collect image-SVG pairs for the character-reference SVG generation tasks during the generation process." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.284, + 0.825, + 0.31 + ], + "angle": 0, + "content": "Table 6: Data Statistics for MMSVG-2M. Our MMSVG-2M consists of 1.1 million SVG icons, 0.5 million SVG illustrations, and 0.4 million SVG anime characters." + }, + { + "type": "table", + "bbox": [ + 0.208, + 0.311, + 0.787, + 0.378 + ], + "angle": 0, + "content": "
DatasetTrainValTotalSourceToken Length
MMSVG-Icon990k110k1,100kIconfont2.2k ± 0.9k
MMSVG-Illustration450k50k500kIconScout8.1k ± 3.3k
MMSVG-Character350k50k400kFreepik & generated28k ± 7.3k
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.402, + 0.471, + 0.417 + ], + "angle": 0, + "content": "A.2 SVG-Image-Text Pairs Construction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.428, + 0.825, + 0.485 + ], + "angle": 0, + "content": "Our MMSVG-2M dataset comprises two million SVG samples with the corresponding rasterized images. We generate captions on the rasterized images with BLIP-2 [28], thereby providing textual descriptions that enable us to fine-tune our model to follow these instructions. We use CairoSVG [25] for rasterization and remove samples that produced completely white images." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.489, + 0.827, + 0.547 + ], + "angle": 0, + "content": "Annotation. We employ an off-the-shelf VLM, specifically BLIP-2 [28], to generate SVG captions with the prompt below. To reduce hallucinations, we drop the samples with CLIP scores less than 30. We also visualize the distribution annotated keywords of MMSVG-2M dataset in Fig. 10 with word cloud format. And the instruction template for annotation is shown in Tab. 7." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.551, + 0.828, + 0.609 + ], + "angle": 0, + "content": "Instruction templates. MMSVGBench provides three tasks, including text-to-SVG task, image-to-SVG task and character-reference SVG generation task. Each task needs different instruction templates. For the text and image conditioning SVG generation, we provide the input text or image with VLM architecture. For character-reference SVG generation, we provide the natural character" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.637, + 0.424, + 0.651 + ], + "angle": 0, + "content": "Instructions for Different Tasks" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.661, + 0.801, + 0.719 + ], + "angle": 0, + "content": "- Employed BLIP2 for SVG Captioning: You are a helpful assistant. Your task is to describe this image in a single sentence, including the object, its color, and its overall arrangement. For example: \"Yellow cheers with glasses of alcohol drinks.\" / \"Heart emojis represent love on Valentine's Day.\"" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.73, + 0.801, + 0.76 + ], + "angle": 0, + "content": "- Text-to-SVG: You are a helpful SVG Generation assistant, designed to generate SVG. We provide the text description as input, generate SVG based on the text." + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.771, + 0.801, + 0.801 + ], + "angle": 0, + "content": "- Image-to-SVG: You are a helpful SVG Generation assistant, designed to generate SVG. We provide an image as input, generate SVG for this image." + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.812, + 0.802, + 0.857 + ], + "angle": 0, + "content": "- Character-Reference SVG Generation: You are a helpful SVG Generation assistant, designed to generate SVG. We provide a natural image as input, please generate the simplified character SVG based on the reference input image." + }, + { + "type": "list", + "bbox": [ + 0.195, + 0.661, + 0.802, + 0.857 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.881, + 0.825, + 0.909 + ], + "angle": 0, + "content": "Table 7: Instructions for Different Tasks. Instructions including annotation, text-to-SVG, image-to-SVG and character-reference SVG generation." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.191, + 0.088, + 0.805, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.39, + 0.825, + 0.444 + ], + "angle": 0, + "content": "Figure 8: Samples from MMSVG-2M Dataset. The proposed MMSVG-2M dataset can be separated into three subset, namely Icon, Illustration and Character. Samples from Icon, Illustration and part of Character subsets are downloaded from Internet. Another part of Character subset is generated by our data creation pipeline, which can provide image and SVG pairs for image prompting task." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.453, + 0.825, + 0.481 + ], + "angle": 0, + "content": "reference image and the original image with the VLM architecture. The list of instruction templates for different tasks are shown in Tab. 7." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.5, + 0.462, + 0.515 + ], + "angle": 0, + "content": "A.3 Character-SVG Pairs Construction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.527, + 0.827, + 0.734 + ], + "angle": 0, + "content": "As illustrated in the Fig. 6, part of our proposed MMSVG-2M-Character subset is constructed using a generative pipeline. As shown in the pipeline diagram in Fig. 2, we employ a FLUX [26]-based generative model enhanced with a vector-style LoRA to enable the generation of SVG-style data. For image-based conditioning, we adopt FLUX-Redux [27], which injects image features via a SigLIP encoder and projects them into image embeddings. These embeddings are then concatenated with the text tokens as conditioning inputs for FLUX [26]. However, in practice, the original Redux [27] conditioning proves to be overly strong. To address this, we adopt a community-implemented variant of Redux that downsamples the image embeddings in 2D space. As observed in our experiments shown in Fig. 9, a downsampling factor between \\(2 \\times\\) and \\(3 \\times\\) yields the most reasonable SVG-style character references. Finally, we employ VTracer [12] to perform near-instant vectorization of the generated images. To construct the MMSVG-2M-Character subset, we first filter \\(103k\\) character instances from the Danbooru [13] dataset and apply the aforementioned pipeline with motion and expression keywords like previous works [8, 9, 36, 65]. We compare the raw FLUX [26] outputs and their vectorized counterparts, retaining only those samples with PSNR and SSIM scores above a certain threshold as valid data." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.756, + 0.363, + 0.772 + ], + "angle": 0, + "content": "B Additional Details" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.788, + 0.294, + 0.804 + ], + "angle": 0, + "content": "B.1 Scaling Up" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.825, + 0.872 + ], + "angle": 0, + "content": "To study the effectiveness of scaling up multimodal SVG generation, we scale up OmniSVG from 4B to 8B parameters. We present training perplexity in Fig. 11, where both models are trained from scratch on 250 billion tokens. We show that, as the size of the model grows, the model achieves a lower validation perplexity, indicating a higher probability of producing the validation data." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.889, + 0.381, + 0.904 + ], + "angle": 0, + "content": "B.2 Implementation Details" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.488, + 0.246 + ], + "angle": 0, + "content": "We train our models in bfloat16 with the ZeRO-2 strategy [40] for memory-efficient training. We also adopt the AdamW [33] optimizer with a learning rate decaying from \\( 3 \\times 10^{-4} \\) to \\( 3 \\times 10^{-6} \\) and a weight decay of 0.1 to train our model. In practice, we load the pre-trained weights from the Qwen2.5-VL [1] model and initialize the SVG embeddings from scratch. Without further specification, we generate SVGs with the top-k and top-p sampling strategy with \\( k = 50 \\) and \\( p = 0.95 \\) for diversity." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.27, + 0.367, + 0.287 + ], + "angle": 0, + "content": "C Additional Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.304, + 0.487, + 0.429 + ], + "angle": 0, + "content": "As list in full comparisons in Tab. 2, including all the baselines mentioned in Sec. 5. For the text-to-SVG task, we compare our method with language-based (LLM-based) methods, including VectorFusion [22], SVGDreamer [60], Chat2SVG [56] and IconShop [57]. For image-to-SVG task, we compare our method with baseline methods across image vectorization and Multimodal Large Language Modeling ap" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.429, + 0.828, + 0.651 + ], + "angle": 0, + "content": "proaches, including LIVE [34], DiffVG [29], StarVector [42] and GPT-4o [21] using the official implementations with the hyperparameters proposed by the authors, and apply their pre- and post-processing code as required. Specifically, for the text-to-SVG task, the optimization-based method SVGDreamer excels in enhancing editability by employing a semantic-driven image vectorization process that effectively separates foreground objects from the background, while failing to handle complex scenes. Another optimization-based work, VectorFusion, stands out for generating SVG-exportable vector graphics without relying on large captioned datasets. However, Vectorfusion is also unable to handle complex scenarios and diverse styles. The significant problem with these optimization-based works is that the optimization time is too long. Generating an SVG usually takes more than ten minutes, which is too expensive. For the LLM-based method, Chat2SVG integrates Large Language Models (LLMs) with image diffusion models to create semantically rich SVG templates. However, Chat2SVG still needs to optimize the output SVG script from LLM, which introduces increased computational complexity and poses challenges during model training. In comparison, IconShop utilizes a transformer-based architecture to autoregressively model SVG path sequences, demonstrating exceptional performance in simplified icon SVGs, which offers effective solutions for text-to-SVG generation. It can only generate black simple Icon SVGs." + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.078, + 0.822, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.495, + 0.335, + 0.828, + 0.399 + ], + "angle": 0, + "content": "Figure 10: Word Cloud Visualization of Label Distribution in the MMSVG-2M Dataset. The size of each label corresponds to its frequency of occurrence. The larger the label, the more frequently it appears in the dataset." + }, + { + "type": "image", + "bbox": [ + 0.201, + 0.675, + 0.81, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.869, + 0.825, + 0.91 + ], + "angle": 0, + "content": "Figure 9: Image Prompting Dataset Creation of MMSVG-2M Character. By utilizing FLUX-Redux and SVG vectorization tools, image prompting data pairs can be generated. We adipot FLUX-Redux downsampling scale with 2, 3 in practice by trading-off the character similarity and complexity of generated SVG." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.093, + 0.481, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.235, + 0.262, + 0.432, + 0.277 + ], + "angle": 0, + "content": "(a) Training PPL for our models." + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.09, + 0.816, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.561, + 0.263, + 0.766, + 0.277 + ], + "angle": 0, + "content": "(b) Validation PPL for our models." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.285, + 0.825, + 0.313 + ], + "angle": 0, + "content": "Figure 11: Training and Validation Perplexity (PPL) for OmniSVG Models. We train all the models from scratch on 250 billion tokens. We observe that the performance grows with model sizes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.34, + 0.828, + 0.535 + ], + "angle": 0, + "content": "For the image-to-SVG task, we compare our method with the image vectorization methods. LIVE allows progressive and efficient generation of SVGs, optimizing closed vector paths under raster image supervision with shape complexity control. However, LIVE needs to optimize for a long time when generating complex SVGs. DiffVG enables end-to-end differentiability in vector graphics rasterization, improving optimization through anti-aliasing and gradient-based methods while also is computationally expensive due to the complexity of the forward-backward rasterization process. Recently, the Multimodal Large Language Model (MLLM) based method StarVector leverages the visual understanding to apply accurate SVG primitive to the LLM architecture, which also can generate SVGs from both text and image inputs. However, it still fails to generate complex SVGs. Since Starvector [42] has not yet opened up its text-to-SVG model weights, our MMSVGBench does not evaluate Starvector's text-to-SVG capabilities. MMSVG-Bench also evaluates our methods with VLM methods, GPT-4o, to conduct a comprehensive assessment. We compare our method with these baselines on our MMSVG-2M dataset, from simple MMSVG-Icon dataset, a bit complex MMSVG-illustration dataset, to the very complex MMSVG-Character dataset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.556, + 0.449, + 0.572 + ], + "angle": 0, + "content": "D More details of the baselines" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.589, + 0.343, + 0.603 + ], + "angle": 0, + "content": "D.1 Text-to-SVG Task" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.615, + 0.825, + 0.7 + ], + "angle": 0, + "content": "SVGDreamer [60] uses a semantic-driven image vectorization (SIVE) process to separate foreground objects and background, improving editability. The SIVE process utilizes attention-based primitive control and an attention-mask loss function to manipulate individual elements effectively. To address issues in existing text-to-SVG generation methods, the proposed Vectorized Particle-based Score Distillation (VPSD) approach models SVGs as distributions of control points and colors, improving shape, color diversity, and convergence speed." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.705, + 0.825, + 0.789 + ], + "angle": 0, + "content": "VectorFusion [22] leverages a text-conditioned diffusion model trained on pixel representations to generate SVG exportable vector graphics without needing large captioned SVG datasets. By optimizing a differentiable vector graphics rasterizer, it distills semantic knowledge from a pretrained diffusion model and uses Score Distillation Sampling to generate an SVG consistent with a caption. Experiments show that VectorFusion improves both quality and fidelity, offering a variety of styles such as pixel art and sketches." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.794, + 0.825, + 0.866 + ], + "angle": 0, + "content": "Chat2SVG [56] proposes a hybrid framework that combines the strengths of Large Language Models (LLMs) and image diffusion models for text-to-SVG generation. The approach first uses an LLM to create semantically meaningful SVG templates from basic geometric primitives. A dual-stage optimization pipeline, guided by image diffusion models, refines paths in latent space and adjusts point coordinates to enhance geometric complexity." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "IconShop [57] uses a transformer-based architecture to encode path commands and learn to model SVG path sequences autoregressively. It has shown excellent results in simplified icon scenarios and provides a good solution to Text-to-SVG generation by extending the FIGR-8-SVG dataset with" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.207, + 0.148, + 0.788, + 0.825 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.267, + 0.83, + 0.731, + 0.846 + ], + "angle": 0, + "content": "Figure 12: Illustration of the SVG Generation Capabilities of OmniSVG." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": " captions. We have access to their dataset and original splits and have trained our model on that data using a pre-trained checkpoint (trained on OmniVG dataset). We have extracted the results from IconShop and included them here to compare our method." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.225 + ], + "angle": 0, + "content": "LLM4SVG [59] is a framework that leverages Large Language Models (LLMs) to understand and generate Scalable Vector Graphics (SVGs). It employs a structured SVG encoding approach, utilizing learnable semantic tokens to accurately represent SVG components and their properties. This design enables LLMs to produce SVGs that are both semantically aligned with textual descriptions and visually coherent. However, LLM4SVG also has a maximum token length of 2048, limiting its ability to generate highly complex SVGs that require longer sequences." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.24, + 0.356, + 0.255 + ], + "angle": 0, + "content": "D.2 Image-to-SVG Task" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.265, + 0.825, + 0.349 + ], + "angle": 0, + "content": "LIVE (Layer-wise Image Vectorization) [34] is a method for progressively generating SVGs that closely fit a given raster image by recursively adding and optimizing closed vector paths. Using a differentiable renderer (based on DiffVG [29]), LIVE enables direct optimization of paths under raster image supervision while controlling shape complexity by adjusting the number of path segments. It introduces component-wise path initialization, identifying key visual components to ensure efficient topology extraction and minimize redundant shapes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.355, + 0.826, + 0.439 + ], + "angle": 0, + "content": "DiffVG [29] is a landmark in vector graphics research, pioneering deep learning-based methods with the first differentiable vector graphics rasterization pipeline. By leveraging a combination of anti-aliasing techniques and gradient-based optimization, DiffVG ensures differentiability. Unlike methods relying on non-differentiable curve-to-mesh conversions, DiffVG employs a forward-backward rasterization process, where the forward pass generates antialiased images and the backward pass computes gradients with respect to vector graphic parameters." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.445, + 0.825, + 0.543 + ], + "angle": 0, + "content": "StarVector [42] works directly in the SVG code space, leveraging visual understanding to apply accurate SVG primitives. StarVector employs a transformer-based architecture that integrates an image encoder with a language model, enabling it to process visual inputs and produce precise SVG code. StarVector effectively handles diverse SVG types, including icons, logos, and complex diagrams, demonstrating robust generalization across various vectorization tasks. However, with a 16k token context window, StarVector may struggle to process highly complex SVGs that require longer sequences." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.827, + 0.605 + ], + "angle": 0, + "content": "Vtracer [12] is an image processing algorithm designed to convert raster images into SVGs. The algorithm follows a three-step pipeline, which involves the hierarchical clustering of images for vectorization. Initially, the pixels are transformed into paths, which are subsequently simplified into polygons. In the final step, these polygons are smoothed and approximated using a Bezier curve fitter." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "20" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_origin.pdf b/data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..099030ec126ee202caade3baa0b2d25159feb37a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62a7f23c272b8fa5c9c6b0ef33e2daca5ce0be98bfd460737ec283524c02d936 +size 7735328 diff --git a/data/2025/2504_06xxx/2504.06263/full.md b/data/2025/2504_06xxx/2504.06263/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5fdcb848a45dadbbb904299bff3ab1f752d408bb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/full.md @@ -0,0 +1,413 @@ +# OmniSVG: A Unified Scalable Vector Graphics Generation Model + +Yiying Yang $^{1,2*}$ Wei Cheng $^{2*}$ Sijin Chen $^{1}$ Xianfang Zeng $^{2}$ Fukun Yin $^{1,2}$ +Jiaxu Zhang $^{2}$ Liao Wang $^{2}$ Gang Yu $^{2\ddagger}$ Xingjun Ma $^{1\ddagger}$ Yu-Gang Jiang $^{1}$ $^{1}$ Fudan University $^{2}$ StepFun + +![](images/ba725e00ce2f094419da2329d07c4a29c4c5b39cf31f5c5cab46aca8243a4b94.jpg) + +Project Page + +![](images/923416a225130c97c601eacd176d61f636bb8514d9daa38229bd075edd1cde88.jpg) + +MMSVG-2M + +![](images/7d2fbfce65c3b19406e9aca6bef44f843f580a523a02d36a2e58d59d410b190b.jpg) + +MMSVGBench + +![](images/1f92246b71d2b71347d70f2d6d09408e5ac28af07385da8375387d51730ec9e0.jpg) + +Models + +![](images/64a4597fc022169aa9518e8f8138e7fed6d0e631485be9646c2900262d8721e4.jpg) + +Code + +# Icon + +![](images/159b5270617b8ba36e1cfeaf7311e08671a5b1b0eb436704a4b5cee27646eaeb.jpg) +Figure 1: OmniSVG is capable of autoregressively generating high-quality Scalable Vector Graphs (SVG) across a wide spectrum of complexity, from simple icons to intricate anime characters. OmniSVG demonstrates remarkable versatility in generating high-quality SVGs adhering to multimodal instructions, covering tasks like Text-to-SVG, Image-to-SVG, and Character-Reference SVG, making it a powerful and flexible solution for diverse creative tasks. + +# Illustration + +![](images/4279ac5b0b7bd39e0fbef10bbf6251dfdc8dd6f9b0ef61548e401177ef229a96.jpg) + +# Character + +![](images/aa3c3ece9f9bd13fb92b24dc92df6642857ae3e037a781c4949e294265ef189e.jpg) + +# Text-to-SVG + +![](images/22c950090cdb82fa973bb44474a61e96feba1d2d5b4006804615b7cc6584a3b0.jpg) + +![](images/d6ddb92fedfc27029a0187a1301eb25f0cd201b3585a66c8e60afa541a6c5379.jpg) + +# Image-to-SVG + +![](images/47003e91f94681858c0acae88d53a55388f807b9d3e547fe579b6f9e179f7526.jpg) + +![](images/4c37cb908f4690ee150ed69ea6d33667b65dc7b961a9774fc264effd63f957d6.jpg) + +Samples Generated by OmniSVG with Wide Complexity Range + +Versatility + +# Abstract + +Scalable Vector Graphics (SVG) is an important image format widely adopted in graphic design because of their resolution independence and editability. The development of autonomous SVG generation workflows is continuously drawing attention from both designers and researchers in the AIGC community. However, existing methods either produce unstructured outputs at huge computational cost or are limited to generating monochrome icons of over-simplified structures. To produce high-quality and complex SVG adhering to multi-modal instructions, we propose OmniSVG, a unified SVG generation framework that inherits knowledge from a pre-trained Vision-Language Model (VLM). By parameterizing SVG commands and coordinates into discrete token sequences, the auto-regressive nature enables us to seamlessly adapt modern VLMs to the direct SVG generation. To further advance the development of SVG synthesis, we introduce MMSVG-2M, a multimodal dataset with two million richly annotated SVG assets, along with a standardized evaluation protocol for conditional SVG generation tasks. Extensive experiments show that OmniSVG outperforms existing methods and demonstrates its potential for integration into professional SVG design workflows. + +# 1 Introduction + +Scalable Vector Graphics (SVG) have become a cornerstone of modern digital design because of their resolution independence, compact file size, and inherent editability. Widely adopted in professional workflows from UI/UX design to industrial CAD systems, SVG enables precise manipulation of geometric primitives (e.g., Bezier curves, polygons) while maintaining high precision and consistent visual quality across varying resolutions. However, creating high-quality SVG content remains challenging for non-experts, requiring mastery of specialized tools or intricate XML syntax. + +Existing methods adopt either optimization-based methods or auto-regressive approaches to generate SVG contents. + +The optimization-based methods [34, 12, 29] iteratively refine the SVG parameters by minimizing the differences between the input image and the raster image created by differentiable vector graphics rasterizers. Though these methods are sufficient for reconstructing SVG icons, they suffer from significant computational overhead when scaling up to more intricate samples and produce unstructured outputs with redundant anchor points, harming the editability of the reconstructed SVG samples. In contrast, auto-regressive methods build transformer models or adapt pre-trained Large Language Models (LLMs) to directly generate XML parameters [59] or codes [56, 42] representing SVGs. Benefiting from the end-to-end learning pipeline, the auto-regressive method is a more scalable approach [5] as it can learn directly from a large collection of SVG samples. However, existing auto-regressive approaches are limited to basic SVG contents [11, 24, 53] because of the limited context length and the scarcity of complex SVG data. + +In this paper, we propose OmniSVG that harnesses native VLMs [1] for various end-to-end multimodal SVG generation tasks. By parameterizing SVG coordinates and commands into discrete tokens, OmniSVG decouples structural logic from low-level geometry, mitigating the "coordinate hallucination" problem prevalent in code-based LLMs, and produces vivid and colorful SVG results. Additionally, the next token prediction training objective enables OmniSVG to complete SVGs with diverse generation results given some partial observations. Compared to traditional auto-regressive SVG generation methods, OmniSVG is able to parameterize SVGs exceeding $30k$ tokens, facilitating the generation of detailed and complex SVG contents. Building upon pre-trained VLMs, our method natively integrates the ability to reason upon visual and textual instructions to synthesize editable, high-fidelity SVGs across diverse domains, from icons to intricate illustrations and anime characters. + +To advance the development of SVG synthesis, we introduce MMSVG-2M, a multi-modal SVG synthesis dataset with two million richly annotated assets, encompassing icons, illustrations, and anime designs. + +We also establish a standardized evaluation protocol, MMSVG-Bench, for "Text-to-SVG" and "Image-to-SVG" generation. Extensive experiments show that OmniSVG can produce highly detailed and complex SVG contents, surpassing prior art both quantitatively and qualitatively. + +To summarize, our key contributions include: + +- We introduce OmniSVG, a family of end-to-end multimodal SVG generators that leverage native VLMs for generating complex and detailed SVGs, from simple icons to intricate anime characters. +- We present MMSVG-2M, a large-scale dataset comprising two million SVG assets, along with a standardized evaluation protocol for various multi-modal SVG generation tasks providing a comprehensive resource for future research. +- Extensive experiments show that OmniSVG surpasses prior SVG generation methods both qualitatively and quantitatively, highlighting its potential for integration into professional SVG design workflows. + +# 2 Related Works + +SVG Generation. Early attempts to generating SVGs directly utilize architectures like RNNs [18, 41, 19, 44, 45], VAEs [4, 32, 48, 46, 51], and Transformers [4, 57] to compress SVG commands into latent representations. Meanwhile, DeepSVG [4] further parameterizes SVGs using a dual transformer architecture but struggles with geometric consistency. Recently, the advent of large language models + +(LLMs) [30, 64, 52, 61, 5, 6, 63, 62, 49] unleashes the potential of generating SVGs via XML code synthesis [59, 56, 42]. However, the limited context length of existing LLM-based SVG generation methods [56, 42, 59] poses significant challenges in handling complex SVGs that exceed $10k$ tokens. In this paper, we explore the potential of native Vision-Language Models (VLMs) in multi-modal SVG generation. By combining pre-trained VLMs with SVG command parameterization, we validate that OmniSVG is able to follow multi-modal instructions and generate vivid and complex SVGs. + +Image Vectorization. Recent advancements in vectorization harness diffusion models paired with differentiable rasterizers, using techniques like score distillation sampling [37, 22, 7] and specialized regularizers [29, 34] to convert raster images into SVG paths. While these methods achieve remarkable results, they face limitations such as over-smoothing, color over-saturation, and lack of editability, often producing tangled paths that fail to capture hierarchical structures inherent in professional SVG designs. In this paper, we present an end-to-end approach that follows multi-modal instructions to generate high-quality SVGs with improved path clarity and editability. + +SVG Datasets and Benchmarks. The lack of suitable datasets for complex SVG structures presents a significant challenge. Existing datasets [11, 24, 53] primarily focus on simplified path-based SVGs or monochrome icons, overlooking the intricate layered structures and rich color semantics found in real-world designs. For example, FIGR-8-SVG [11] focuses on monochromatic icons, while StarVector [42] proposes categorized datasets, including illustrations, icons, emojis, and fonts. Therefore, existing datasets only present simple SVG samples that do not exceed $8.2k$ tokens, failing to capture the complexities of layered structures and rich color semantics. Benchmark evaluations, such as VGBench [70], further highlight gaps in multi-format testing and the absence of comprehensive coverage for illustrative SVGs. To this end, we introduce MMSVG-2M, a multimodal SVG synthesis dataset comprising two million richly annotated assets, including icons, illustrations, and complex anime designs. We also present a standardized evaluation protocol, MMSVG-Bench, to evaluate diverse multi-modal SVG generation tasks with varying complexity. + +# 3 OmniSVG Dataset + +We present MMSVG-2M, a large-scale SVG dataset with two million SVG samples covering website icons, illustrations, graphic designs, anime characters, and etc (Sec. 3.1). To promote the downstream development of SVG generation methods, we also introduce MMSVG-Bench, a standardized evaluation protocol for a series of multi-modal instruction following tasks for conditional SVG generation (Sec. 3.2). + +# 3.1 MMSVG-2M + +Data Source. With increasing visual complexity, MMSVG-2M consists of three subsets, 1) the icon subset MMSVG-Icon collected from Iconfont, 2) the illustration subset MMSVG-Illustration sourced from IconSount, and 3) the complex anime character subset MMSVG-Character both curated from Freepik and created by our data creation pipeline as shown in Fig. 2. All these websites are online platforms where users can publish and share SVGs, encompassing a broad variety of categories. Specifically, our collection of MMSVG-2M contains 1.1 million icons, 0.5 million illustrations, and 0.4 million anime characters as shown in Tab. 6. + +Data Curation. We extract SVG samples with a comprehensive dedduplication process based on filenames, SVG code, and metadata. We first fit the collected SVGs within a viewbox of $200 \times 200$ . Then, we employ an off-the-shelf VLM, specifically BLIP-2 [28], to generate captions for the SVGs. Please find more samples from the MMSVG-2M dataset in Fig. 8, and instruction templates in Sec. A.2. + +SVG Simplification is an essential procedure in SVG data cleansing, since the over-complicated XML grammars in the crawled SVG data will lead to ambiguities while representing basic shapes. To standardize training and evaluation, we simplify all SVG commands with atomic commands as shown in Tab. 1. Inspired by FIGR-8-SVG [11] and IconShop [57], we remove all attributes and simplify each SVG with five basic commands, including "Move To" (M), "Line To" (L), "Cubic Bezier" (C), "Elliptical Arc" (A), "ClosePath" (Z). The introduction of atomic commands further removes the ambiguities, as complex XML grammars can be approximated with the combination of several atomic commands. To efficiently produce a unified and less complex data structure, we utilize + +Table 1: SVG Draw Commands. Draw commands used in this work along with their arguments and a visualization are listed. The start-position $(x_{1},y_{1})$ is implicitly defined as the end-position of the preceding command. + +
CommandArgumentsDescriptionVisualization
<SOP>‘Start-of-Path’ token.
M +(MoveTo)x2,y2Move the cursor to the end-point (x2,y2) without drawing anything.(x2,y2)
L +(LineTo)x2,y2Draw a line to the point (x2,y2).(x1,y1) (x2,y2)
C +(Cubic +Bézier)qx1, qy1 +qx2, qy2 +x2,y2Draw a cubic Bézier curve with control points (qx1,qy1), (qx2,qy2) and end-point (x2,y2).(x1,y1) (qx2,qy2) +(qx1,qy1) (x2,y2)
A +(Elliptical +Arc)rx, ry +φ, fA, fs +x2,y2Draw an elliptical arc with radii rx and ry (semi-major and semi-minor axes), rotated by angle φ to the x-axis, and end-point (x2,y2). (x2,y2).fA=1/∑fS=1Ry +φ(x1,y1) (x2,y2)
Z +(ClosePath)Close the path by moving the cursor back to the path's starting position (x0,y0).(x0,y0) (x1,y1)
F (Fill)fillDraw the fill attribute of the path.
<EOS>‘End-of-SVG’ token.
+ +picosvg to remove grammars like "group" and "transform", and simplify the complex commands to atomic path commands. It is worth noting that atomic path commands are sufficient to represent complex SVGs shown in Fig. 1. + +# 3.2 MMSVG-Bench + +To compensate for the vacancy of standardized and open evaluation for SVG generation, we introduce MMSVG-Bench, a comprehensive benchmark for multi-modal SVG generation. We require the corresponding benchmark to be a sufficient verification whether a model is practically useful in real-world scenarios, and avoid the excessive similarity between the benchmark input data and training data as in traditional train/test splits. Therefore, we opt to generate the benchmark inputs with GPT-4o. Specifically, for Text-to-SVG task, we synthesize 150 textual prompts for each SVG subset (i.e. Icon and Illustration). For Image-to-SVG task, we synthesize extra 150 textual descriptions, and prompt GPT-4o to generate vector-style images with transparent backgrounds based on the above texts as the ground truth visual samples. We focus on both the visual quality and semantics of the generation results. + +Text-to-SVG requires a model to generate SVGs from text instructions. We measure the visual quality with Frechet Inception Distance (FID) [50], aesthetic appeal with Aesthetic score [43], text-SVG alignment with CLIP score [38], and Human Preference Scores (HPS) [58]. + +Image-to-SVG evaluates a model's ability to convert images into SVGs. To quantify the distance between the input and output SVG, we calculate the cosine similarity of DinoV2 features (DinoScore) [35], Structural Similarity Index (SSIM) [54], Learned Perceptual Image Patch Similarity (LPIPS) [66], and Mean Squared Error (MSE). + +Character-Reference SVG Generation evaluates a model's ability to generate novel SVGs while keeping the profile of the characters depicted in the input image. Different from image-to-SVG, the model does not reconstruct, but generates a specific character SVG for the input image (see + +![](images/a091a9687594e4c4fa9988fcbe29d8f537e1b6f79b8e6716b558e58af1afa32c.jpg) +Figure 2: Overview of OmniSVG. OmniSVG is built on a pre-trained vision-language model Qwen2.5-VL and incorporates an SVG tokenizer. The model tokenizes both text and image inputs as prefix tokens, while the SVG tokenizer encodes vector graphics commands into a unified representation space. +Fig. 5). We evaluate the alignment between input character images and generated SVGs by prompting GPT-4o [21] to generate a score ranging from 1 to 10, the higher the better. [15, 23, 17] + +# 4 OmniSVG + +To support end-to-end training for multi-modal SVG generation, OmniSVG parameterizes a series of atomic SVG path commands into a sequence before feeding into a pre-trained VLM with multi-modal instructions. + +SVG Tokenizer. As illustrated in Sec. 3, our MMSVG-2M dataset simplifies an SVG by removing all attributes and using five basic path commands (see Tab. 1). After the simplification, an SVG script $G$ is represented as the combination of $M$ paths, $G = \{P_i\}_{i=1}^M$ . Here, $P_i$ is the $i$ -th path containing $N_i$ commands, $P_i = \{C_i^j\}_{j=1}^{N_i}$ , where $C_i^j$ is the $j$ -th command in the $i$ -th path. Each command is represented as $C_i^j = (U_i^j, V_i^j)$ , containing both the command type identifier $U_i^j \in \{\mathrm{M}, \mathrm{L}, \mathrm{C}, \mathrm{A}, \mathrm{Z}\}$ and the corresponding location argument $V_i^j$ . + +To generate colored SVG contents, we assign special tokens for hex values to control the "Fill" (F) attribute, distinguishing it from the original SVG commands and coordinates. To this end, we are able to use a total six types of commands $U_{i}^{j} \in \{\mathrm{M}, \mathrm{L}, \mathrm{C}, \mathrm{A}, \mathrm{Z}, \mathrm{F}\}$ to parameterize a colored SVG parameterization. + +Specifically, our SVG tokenizer transforms SVG scripts $X_{s}$ into an ordered SVG token sequence within the same representation space as the pre-trained VLM. Following IconShop [57], we flatten the layered structure of the SVG script by concatenating different paths into a single command sequence, where each path begins with the drawing commands followed by point coordinates. Therefore, each SVG sequence could be represented as a flattened sequence. As the generation identifier, we apply special tokens like $<\mathrm{SOP}>$ and $<\mathrm{EOS}>$ to the two ends of a SVG sequence, identifying the beginning and ending of a SVG sequence. We assign special tokens for each command type, i.e. $\{\mathrm{M}, \mathrm{L}, \mathrm{C}, \mathrm{A}, \bar{\mathrm{Z}}, \mathrm{F}\}$ . To shorten the length of the SVG sequence, we further merge the 2D point coordinates into one token with a mapping function: $ \rightarrow x \times w + y$ , where $w$ is the width of the image. The SVG sequence are then lifted into the same embedding space as the pre-trained VLM with a learnable embedding layer. + +Model Architecture. OmniSVG adopts Qwen2.5-VL [1], an open-sourced VLM that excels at understanding intricate vision-text inputs, as its backbone (Fig. 2) to produce precise and compact SVG outputs. OmniSVG is trained to predict the SVG suffix tokens $(x_{s})$ conditioned on the multi-modal instruction prefix tokens $(x_{c})$ with the standard next-token prediction objective. + +$$ +\theta^ {*} = \arg \max _ {\theta} \prod_ {i = 1} ^ {L} P \left(x _ {s, i} \mid x _ {s, < i}, x _ {c}\right) \tag {1} +$$ + +Table 2: Quantitative Evaluations. Quantitative results between OmniSVG and current state-of-the-art text-to-SVG and image-to-SVG baseline methods. The bold numbers and underlined numbers represent the best and second best performance respectively. Our OmniSVG model demonstrates superior performance compared SOTA SVG generation baselines. + +
Evaluation DatasetMethods# TokensText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
MMSVG-IconVectorfusion [22]66.2k250.770.2404.760.237----
SVGDreamer [60]132.0k308.940.2074.260.221----
Chat2SVG [56]0.6k190.870.2994.410.247----
IconShop [57]2.0k213.280.2884.550.244----
LIVE [34]52.5k----0.9320.9430.1060.011
DiffVG [29]322.0k----0.9400.9540.0660.002
GPT-4o [21]0.3k----0.8600.7920.4030.124
StarVector(8B) [42]2.0k----0.8950.8810.2310.059
Vtracer52.4k----0.9930.9660.0390.002
OmniSVG(4B)3.8k137.400.2754.620.2440.9930.9500.0500.006
OmniSVG-L(8B)5.7k130.560.2764.600.2420.9220.8930.2350.040
MMSVG-IllustrationVectorfusion [22]66.1k253.940.1854.940.226----
SVGDreamer [60]132.0k419.700.2014.370.221----
Chat2SVG [56]1.0k210.030.2834.450.250----
IconShop [57]2.6k107.930.2334.460.224----
LIVE [34]52.2k----0.9350.9500.1110.008
DiffVG [29]322.0k----0.9450.9550.0650.001
GPT-4o [21]0.4k----0.8750.8540.3730.077
StarVector(8B) [42]2.6k----0.8770.9000.2380.046
Vtracer57.6k----0.9940.9660.0350.002
OmniSVG(4B)5.8k154.370.2264.560.2320.8990.9060.2370.034
OmniSVG-L(8B)6.9k138.420.2314.510.2320.9050.9070.2310.031
+ +# 5 Experiments + +To validate the effectiveness of our method, we first introduce the baselines (Sec. 5.1). Then, we make quantitative comparisons with prior arts (Secs. 5.2 and 5.3) and conduct ablations (Sec. 5.4) to study the effectiveness of our design. + +# 5.1Baselines + +For the text-to-SVG task, we compare our method with language-based (LLM-based) methods, including VectorFusion [22], SVGDreamer [60], Chat2SVG [56] and IconShop [57]. For image-to-SVG task, we compare our method with baseline methods across image vectorization and Multimodal Large Language Modeling approaches, including LIVE [34], DiffVG [29], StarVector [42], Vtracer [12] and GPT-4o [21] using the official implementations with the hyperparameters proposed by the authors, and apply their pre- and post-processing code as required. + +# 5.2 Quantitative Comparisons + +We compare our OmniSVG with other baseline methods on the "text-to-SVG" and "image-to-SVG" tasks in our MMSVG-Bench. In addition to the metrics mentioned in Sec. 3, we also report the average token length (# tokens) of a generated SVG sample utilizing the Qwen2.5-VL [1] tokenizer. + +As shown in Tab. 2, OmniSVG demonstrates strong performance compared to state-of-the-art baselines in text-to-SVG generation, achieving superior FID scores and competitive CLIP score, aesthetic quality, and HPS. For image-to-SVG, OmniSVG also achieves competitive results with traditional vectorization methods, i.e. LIVE [34], DiffVG [29], and VTracer [12], but with a much shorter sequence length. When comparing to auto-regressive methods, i.e. GPT-4o [21] and StarVector [42], OmniSVG also achieves a superior performance across all metrics. The above results indicate that OmniSVG effectively balances the generation cost and the visual quality when generating SVGs according to multi-modal conditions. + +![](images/0b0a38efbb695a95d71553a06e7819b3d49df273d684e1a78144d9c2d90c71b6.jpg) +Figure 3: Qualitative Comparison with SOTA Methods on Text-to-SVG Task. We compare the propose method with SOTA Text-to-SVG methods on our evaluation benchmarks, namely Icon and Illustration. + +# 5.3 Qualitative Evaluations + +Text-to-SVG task. We compare our method with baseline approaches using seven distinct text prompts for the text-to-SVG task, as shown in Fig. 4. Optimization-based methods like SVGDreamer [60] and VectorFusion [22] require significant computation time due to their iterative optimization processes, which, while effective for refining SVG details, are computationally expensive. Auto-regressive methods, such as IconShop [57] and Chat2SVG [56], generate SVGs more quickly by leveraging pre-trained models but have notable limitations. IconShop produces monochrome SVGs, restricting its applicability, while Chat2SVG, though flexible, generates less detailed and semantically consistent SVGs in its first stage. Our OmniSVG consistently outperforms all baselines across various text prompts in generating high-fidelity SVGs with rich color, geometric accuracy, and the ability to handle complex visual cues. + +Image-to-SVG Task. We compare our method with classical image vectorization approaches, including DiffVG [29], LIVE [34], and VLM-based methods GPT-4o [21], StarVector [42] and Vtracer [12] As shown in Fig. 4, our method outperforms these baselines in both quality and efficiency. Optimization-based methods like DiffVG and LIVE perform well on simple icons but struggle with complex images, often generating visual artifacts. The GPT-4o model, while capable of generating SVGs for complex images, is limited to icon-level outputs and cannot handle detailed illustrations. StarVector excels at simple icons but fails to produce accurate SVGs for more intricate images, highlighting its limited generalization capability. Vtracer is an image processing algorithm designed to convert raster images into SVGs. In contrast, OmniSVG effi + +![](images/9cce668291ae3db38c36dcf3597d0b593fc88ffc1dd7c1c70776e983a717947f.jpg) +Figure 5: Generated SVG with Character-Reference (CRef) by OmniSVG. + +![](images/1848b53d51571e79abf00b6041fd54b00a12f7df7b9c077a30c48675555e314e.jpg) +Figure 4: Qualitative Comparison with SOTA Methods on Image-to-SVG Task. We compare the propose method with SOTA Image-to-SVG methods on our evaluation benchmarks. + +ciently converts a wide range of images, from icons to complex illustrations and character images, into high-quality, editable SVGs. This superior performance in handling diverse visual cues distinguishes OmniSVG from traditional vectorization methods. Additional visual results can be found in Fig. 12. We provide more detailed discussions with existing methods, particularly the recent works LLM4SVG [59] and StarVector [42], in the Sec. D. + +Character-Reference SVG generation task. As shown in Fig. 5, by training on MMSVG-Character with natural character image and SVG pair data, OmniSVG is capable of generating character SVGs through image references. + +# 5.4 Ablation studies + +Effectiveness of SVG Parameterization. We present a comprehensive comparison among different SVG parameterization strategy with the traditional non-parameterized methods for SVG representation in large language models. We ablates on the parameterization on both coordinate and color attributes of the SVG. + +The results, shown in Tab. 3 and Fig. 6 demonstrate that parameterizing both coordinate and color attributes yields a better generation results under all metrics with the shortest token length. It further validates that the efficient token representation allows our method to generate complex SVGs with fewer computational resources. Additionally, qualitative results show that our method outperforms + +Table 3: Quantitative Study on SVG Parameterization. Ablation studies on color parametrization (abbreviated as color param.) and coordinate parameterization (abbreviated as coord param.) are conducted. + +
MethodsText-to-SVGImage-to-SVG# Tokens
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
w/o param.218.760.1853.430.1380.7410.7180.3150.18218.5k
w/o coordinate param.193.420.2163.900.1690.8260.8090.2480.11910.2k
w/o color param.167.280.2694.310.2110.8950.8790.1790.0536.3k
OmniSVG(4B)145.890.3084.590.2380.9460.9280.1380.0204.8k
+ +Table 4: Ablation of the Model Size. As the model size grows, the generated samples are of higher quality. + +
MethodsInputSizeText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIS↓MSE↓
FLAN-T5-Base[10]Text223M198.480.1583.380.085----
FLAN-T5-Large[10]Text770M175.240.2083.920.142----
FLAN-T5-xl[10]Text3B160.280.2584.310.192----
blip2-flan-t5-xl[28]Text/Image3.94B152.110.2354.480.2150.8980.8910.2550.041
OmniSVG(4B)Text/Image3.7B145.890.3084.590.2380.9460.9280.1380.020
+ +others, particularly as SVG complexity increases. The non-parameterization method fails to generate SVGs for complex images. These findings underscore the effectiveness of our full parameterization strategy in balancing performance and resource efficiency for SVG generation tasks. + +Ablation studies on model size. To analyze whether training a larger model benefits SVG generation, we evaluate OmniSVG base models with different sizes on the MMSVG-2M dataset in Tab. 4. We evaluate OmniSVG with base models of varying sizes on the MMSVG-2M dataset in Tab. 4 by progressively scaling up the model size. The results show that as the model size grows, we can generate SVG samples with a better quality. + +Table 5: Ablation on VLM architecture. + +
Vision ModelLanguage ModelText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
CLIPQwen2.5185.310.2494.520.2150.8670.8560.2670.058
VQGANQwen2.5198.740.2344.490.2030.8390.8280.2950.071
Qwen2.5-VL-3B-Instruct145.890.3084.590.2380.9460.9280.1380.020
Qwen2.5-VL-7B-Instruct134.450.2544.560.2370.9140.9000.2330.036
+ +Ablation Studies on the VLM Architecture. To evaluate the effectiveness of the VLM architecture, we conducted an ablation study replacing it with alternative LLM-based architectures incorporating image encoders such as CLIP ViT-B/32 [39], VQGAN [14], and Qwen2.5-VL [1]. + +The results in Tab. 5 show that Qwen2.5-VL consistently outperformed all alternatives under all evaluation metrics. + +User Study. We extract one-tenth of the samples from the evaluation dataset and conducted a user study with 15 participants to evaluate user preferences, vividness, and the alignment between text-to-SVG and image-to-SVG. Participants are asked to assess SVGs generated by different models based on 150 text descriptions and 150 image prompts, comparing the results generated using our method and baseline models. The results in Fig. 7 show that OmniSVG is widely preferred, with higher scores for vividness and superior semantic alignment with the input conditions. + +# 6 Conclusions + +Conclusions. We introduce OmniSVG, a unified framework for multimodal SVG generation that leverages pre-trained Vision-Language Models (VLMs). By parameterizing SVG com + +mands and coordinates as discrete tokens, OmniSVG efficiently decouples structural logic from geometry, addressing issues like "coordinate hallucination" while maintaining design expressiveness. Our method outperforms existing approaches in both quality and efficiency, offering high-quality, editable SVG across various design domains. Additionally, we proposed MMSVG-2M, a large-scale multimodal dataset with two million annotated SVG assets and a standardized evaluation protocol. + +![](images/f4018d8e6b84d4441104ddcc05ee39760d0faf2a95c81a23d8f047c80b19142b.jpg) +Figure 6: Qualitative Study on Parametrization. + +Extensive experiments show that OmniSVG surpasses prior SVG generation methods in various conditional generation tasks, highlighting its potential for integration into professional SVG design workflows. + +Limitations and Future Work. During inference, OmniSVG generates tens of thousands of tokens for complex samples, which inevitably leads to a considerable generation time. OmniSVG is only bounded by vector style image prompt and fails on natural images. As for future work, recent endeavors on multi-token prediction [15, 2] and KV-cache compression [68, 3] provide a promising way to save the generation cost. Additionally, the auto-regressive nature of OmniSVG also unlocks future + +Figure 7: User Study of OmniSVG and baselines. + +
MethodPreference ↑Vividity↑Alignment↑
Vectorfusion [22]355876
SVGDreamer [60]416579
Chat2SVG [56]556186
IconShop [57]795775
GPT-4o [21]385480
StarVector(8B) [42]378168
DiffVG [29]887696
LIVE [34]867095
OmniSVG968898
+ +opportunities for in-context learning [67, 69, 47], chain-of-thought reasoning [55, 16], and multi-turn interleaved generation [20, 31], thereby providing a more precise user control. + +# Acknowledgements + +This work is in part supported by National Key R&D Program of China (Grant No. 2022ZD0160103), National Natural Science Foundation of China (Grant No. 62276067), and National Natural Science Foundation of China (Grant No. 62472104). + +The computations in this research were performed using the CFFF platform of Fudan University. + +# References + +[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. +[2] Tianle Cai, Yuhong Li, Zhengyang Geng, Hongwu Peng, and Tri Dao. Medusa: Simple framework for accelerating IIm generation with multiple decoding heads. Retrieved December, 2023. +[3] Zefan Cai, Yichi Zhang, Bofei Gao, Yuliang Liu, Tianyu Liu, Keming Lu, Wayne Xiong, Yue Dong, Baobao Chang, Junjie Hu, et al. Pyramidkv: Dynamic kv cache compression based on pyramidal information tunneling. arXiv preprint arXiv:2406.02069, 2024. +[4] Alexandre Carlier, Martin Danelljan, Alexandre Alahi, and Radu Timofte. Deepsvg: A hierarchical generative network for vector graphics animation. NeurIPS, 2020. +[5] Sijin Chen, Xin Chen, Anqi Pang, Xianfang Zeng, Wei Cheng, Yijun Fu, Fukun Yin, Billzb Wang, Jingyi Yu, Gang Yu, et al. Meshxl: Neural coordinate field for generative 3d foundation models. NeurIPS, 2024. +[6] Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. L13da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In CVPR, 2024. +[7] Zehao Chen and Rong Pan. Svgbuilder: Component-based colored graphic generation with text-guided autoregressive transformers. arXiv preprint arXiv:2412.10488, 2024. +[8] Wei Cheng, Ruixiang Chen, Siming Fan, Wanqi Yin, Keyu Chen, Zhongang Cai, Jingbo Wang, Yang Gao, Zhengming Yu, Zhengyu Lin, et al. Dna-rendering: A diverse neural actor repository for high-fidelity human-centric rendering. In ICCV, 2023. +[9] Wei Cheng, Su Xu, Jingtan Piao, Chen Qian, Wayne Wu, Kwan-Yee Lin, and Hongsheng Li. Generalizable neural performer: Learning robust radiance fields for human novel view synthesis. arXiv preprint arXiv:2204.11798, 2022. +[10] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instruction-finetuned language models. JMLR, 2024. +[11] Louis Clouatre and Marc Demers. Figr: Few-shot image generation with reptile. arXiv preprint arXiv:1901.02199, 2019. +[12] Vision Cortex. Vtracer. https://www.visioncortex.org/vtracer-docs, 2023. +[13] Nyanko Devs. Danbooru2023: A large-scale crowdsourced and tagged anime illustration dataset. Hugging Face, 2023. +[14] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In CVPR, 2021. +[15] Fabian Gloeckle, Badr Youbi Idrissi, Baptiste Rozière, David Lopez-Paz, and Gabriel Synnaeve. Better & faster large language models via multi-token prediction. arXiv preprint arXiv:2404.19737, 2024. +[16] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +[17] Han Guo, Songlin Yang, Tarushii Goel, Eric P Xing, Tri Dao, and Yoon Kim. Log-linear attention. arXiv preprint arXiv:2506.04761, 2025. +[18] David Ha and Douglas Eck. A neural representation of sketch drawings. In ICLR, 2018. +[19] Teng Hu, Ran Yi, Baihong Qian, Jiangning Zhang, Paul L Rosin, and Yu-Kun Lai. Supersvg: Superpixel-based scalable vector graphics synthesis. In CVPR, 2024. +[20] Minbin Huang, Yanxin Long, Xinchi Deng, Ruihang Chu, Jiangfeng Xiong, Xiaodan Liang, Hong Cheng, Qinglin Lu, and Wei Liu. Dialoggen: Multi-modal interactive dialogue system for multi-turn text-to-image generation. arXiv preprint arXiv:2403.08857, 2024. +[21] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. + +[22] Ajay Jain, Amber Xie, and Pieter Abbeel. Vectorfusion: Text-to-sv by abstracting pixel-based diffusion models. In CVPR, 2023. +[23] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast autoregressive transformers with linear attention. In International conference on machine learning, pages 5156-5165. PMLR, 2020. +[24] Denis Kocetkov, Raymond Li, Loubna Ben Allal, Jia Li, Chenghao Mou, Carlos Muñoz Ferrandis, Yacine Jernite, Margaret Mitchell, Sean Hughes, Thomas Wolf, et al. The stack: 3 tb of permissively licensed source code. arXiv preprint arXiv:2211.15533, 2022. +[25] Kozea. Cairosvg. https://cairosvg.org/, 2023. +[26] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024. +[27] Black Forest Labs. Flux.1Redux-dev. https://huggingface.co/black-forest-labs/FLUX.1-Redux-dev, 2024. +[28] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023. +[29] Tzu-Mao Li, Michal Lukáč, Gharbi Michael, and Jonathan Ragan-Kelley. Differentiable vector graphics rasterization for editing and learning. SIGGRAPH Asia, 2020. +[30] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2023. +[31] Ziyu Liu, Tao Chu, Yuhang Zang, Xilin Wei, Xiaoyi Dong, Pan Zhang, Zijian Liang, Yuanjun Xiong, Yu Qiao, Dahua Lin, et al. Mmdu: A multi-turn multi-image dialog understanding benchmark and instruction-tuning dataset for lvlms. arXiv preprint arXiv:2406.11833, 2024. +[32] Raphael Gontijo Lopes, David Ha, Douglas Eck, and Jonathon Shlens. A learned representation for scalable vector graphics. In CVPR, 2019. +[33] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. +[34] Xu Ma, Yuqian Zhou, Xingqian Xu, Bin Sun, Valerii Filev, Nikita Orlov, Yun Fu, and Humphrey Shi. Towards layer-wise image vectorization. In CVPR, 2022. +[35] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. +[36] Dongwei Pan, Long Zhuo, Jingtan Piao, Huiwen Luo, Wei Cheng, Yuxin Wang, Siming Fan, Shengqi Liu, Lei Yang, Bo Dai, et al. Renderme-360: a large digital asset library and benchmarks towards high-fidelity head avatars. NeurIPS, 2023. +[37] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In ICLR, 2023. +[38] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. +[39] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. +[40] Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, and Yuxiong He. Zero: Memory optimizations toward training trillion parameter models. In SC20: International Conference for High Performance Computing, Networking, Storage and Analysis. IEEE, 2020. +[41] Pradyumna Reddy, Michael Gharbi, Michal Lukac, and Niloy J Mitra. Im2vec: Synthesizing vector graphics without vector supervision. In CVPR, 2021. +[42] Juan A Rodriguez, Shubham Agarwal, Issam H Laradji, Pau Rodriguez, David Vazquez, Christopher Pal, and Marco Pedersoli. Starvector: Generating scalable vector graphics code from images. arXiv preprint arXiv:2312.11556, 2023. + +[43] Christoph Schuhmann. Improved aesthetic predictor. https://github.com/christophschuhmann/improved-aesthetic-predictor, 2022. +[44] I-Chao Shen and Bing-Yu Chen. Clipgen: A deep generative model for clipart vectorization and synthesis. TVCG, 2022. +[45] Yiren Song, Xuning Shao, Kang Chen, Weidong Zhang, Zhongliang Jing, and Minzhe Li. Clipvg: Text-guided image manipulation using differentiable vector graphics. In AAAI, 2023. +[46] Hao Su, Xuefeng Liu, Jianwei Niu, Jiahe Cui, Ji Wan, Xinghao Wu, and Nana Wang. Marvel: Raster gray-level manga vectorization via primitive-wise deep reinforcement learning. TCSVT, 2023. +[47] Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In CVPR, 2024. +[48] Zecheng Tang, Chenfei Wu, Zekai Zhang, Mingheng Ni, Shengming Yin, Yu Liu, Zhengyuan Yang, Lijuan Wang, Zicheng Liu, Juntao Li, et al. Strokenuwa: Tokenizing strokes for vector graphic synthesis. arXiv preprint arXiv:2401.17093, 2024. +[49] Zecheng Tang, Chenfei Wu, Zekai Zhang, Mingheng Ni, Shengming Yin, Yu Liu, Zhengyuan Yang, Lijuan Wang, Zicheng Liu, Juntao Li, et al. Strokenuwa: Tokenizing strokes for vector graphic synthesis. arXiv preprint arXiv:2401.17093, 2024. +[50] Lucas Theis, Aäron van den Oord, and Matthias Bethge. A note on the evaluation of generative models. arXiv preprint arXiv:1511.01844, 2015. +[51] Yingtao Tian and David Ha. Modern evolution strategies for creativity: Fitting concrete images and abstract concepts. In Artificial Intelligence in Music, Sound, Art and Design, 2022. +[52] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. +[53] Yizhi Wang and Zhouhui Lian. Deepvecfont: synthesizing high-quality vector fonts via dual-modality learning. TOG, 2021. +[54] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. TIP, 2004. +[55] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. NeurIPS, 2022. +[56] Ronghuan Wu, Wanchao Su, and Jing Liao. Chat2svg: Vector graphics generation with large language models and image diffusion models. arXiv preprint arXiv:2411.16602, 2024. +[57] Ronghuan Wu, Wanchao Su, Kede Ma, and Jing Liao. Iconshop: Text-guided vector icon synthesis with autoregressive transformers. TOG, 2023. +[58] Xiaoshi Wu, Keqiang Sun, Feng Zhu, Rui Zhao, and Hongsheng Li. Human preference score: Better aligning text-to-image models with human preference. In ICCV, 2023. +[59] Ximing Xing, Juncheng Hu, Guotao Liang, Jing Zhang, Dong Xu, and Qian Yu. Empowering llms to understand and generate complex vector graphics. arXiv preprint arXiv:2412.11102, 2024. +[60] Ximing Xing, Haitao Zhou, Chuang Wang, Jing Zhang, Dong Xu, and Qian Yu. SVGdreamer: Text guided. +svg generation with diffusion model. In CVPR, 2024. +[61] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024. +[62] Yiying Yang, Fukun Yin, Wen Liu, Jiayuan Fan, Xin Chen, Gang Yu, and Tao Chen. Pm-inr: Prior-rich multi-modal implicit large-scale scene neural representation. In AAAI, 2024. +[63] Fukun Yin, Xin Chen, Chi Zhang, Biao Jiang, Zibo Zhao, Wen Liu, Gang Yu, and Tao Chen. Shapept: 3d shape generation with a unified multi-modal language model. TMM, 2025. +[64] Alex Young, Bei Chen, Chao Li, Chengen Huang, Ge Zhang, Guanwei Zhang, Heng Li, Jiangcheng Zhu, Jianqun Chen, Jing Chang, et al. Yi: Open foundation models by 01. ai. arXiv preprint arXiv:2403.04652, 2024. + +[65] Zhengming Yu, Wei Cheng, Xian Liu, Wayne Wu, and Kwan-Yee Lin. Monohuman: Animatable human neural field from monocular video. In CVPR, 2023. +[66] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. +[67] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. What makes good examples for visual in-context learning? NeurIPS, 2023. +[68] Xiabin Zhou, Wenbin Wang, Minyan Zeng, Jiaxian Guo, Xuebo Liu, Li Shen, Min Zhang, and Liang Ding. Dynamicky: Task-aware adaptive kv cache compression for long context llms. arXiv preprint arXiv:2412.14838, 2024. +[69] Yucheng Zhou, Xiang Li, Qianning Wang, and Jianbing Shen. Visual in-context learning for large vision-language models. arXiv preprint arXiv:2402.11574, 2024. +[70] Bocheng Zou, Mu Cai, Jianrui Zhang, and Yong Jae Lee. Vgbench: A comprehensive benchmark of vector graphics understanding and generation for large language models. In EMNLP, 2024. + +# Appendix + +# A Additional Details of MMSVG-2M dataset + +# A.1 Samples of MMSVG-2M Dataset + +We visualize samples of our MMSVG-2M dataset in Fig. 8. In our MMSVG-2M dataset, $55\%$ of the SVG samples belongs to the MMSVG-Icon, $25\%$ belongs to the MMSVG-Illustration, and the rest $20\%$ belongs to the MMSVG-Character. Among the SVG samples within the MMSVG-Character category, half of them comes from Freepik, while another half is generated by our data creation pipeline. We also collect image-SVG pairs for the character-reference SVG generation tasks during the generation process. + +Table 6: Data Statistics for MMSVG-2M. Our MMSVG-2M consists of 1.1 million SVG icons, 0.5 million SVG illustrations, and 0.4 million SVG anime characters. + +
DatasetTrainValTotalSourceToken Length
MMSVG-Icon990k110k1,100kIconfont2.2k ± 0.9k
MMSVG-Illustration450k50k500kIconScout8.1k ± 3.3k
MMSVG-Character350k50k400kFreepik & generated28k ± 7.3k
+ +# A.2 SVG-Image-Text Pairs Construction + +Our MMSVG-2M dataset comprises two million SVG samples with the corresponding rasterized images. We generate captions on the rasterized images with BLIP-2 [28], thereby providing textual descriptions that enable us to fine-tune our model to follow these instructions. We use CairoSVG [25] for rasterization and remove samples that produced completely white images. + +Annotation. We employ an off-the-shelf VLM, specifically BLIP-2 [28], to generate SVG captions with the prompt below. To reduce hallucinations, we drop the samples with CLIP scores less than 30. We also visualize the distribution annotated keywords of MMSVG-2M dataset in Fig. 10 with word cloud format. And the instruction template for annotation is shown in Tab. 7. + +Instruction templates. MMSVGBench provides three tasks, including text-to-SVG task, image-to-SVG task and character-reference SVG generation task. Each task needs different instruction templates. For the text and image conditioning SVG generation, we provide the input text or image with VLM architecture. For character-reference SVG generation, we provide the natural character + +# Instructions for Different Tasks + +- Employed BLIP2 for SVG Captioning: You are a helpful assistant. Your task is to describe this image in a single sentence, including the object, its color, and its overall arrangement. For example: "Yellow cheers with glasses of alcohol drinks." / "Heart emojis represent love on Valentine's Day." +- Text-to-SVG: You are a helpful SVG Generation assistant, designed to generate SVG. We provide the text description as input, generate SVG based on the text. +- Image-to-SVG: You are a helpful SVG Generation assistant, designed to generate SVG. We provide an image as input, generate SVG for this image. +- Character-Reference SVG Generation: You are a helpful SVG Generation assistant, designed to generate SVG. We provide a natural image as input, please generate the simplified character SVG based on the reference input image. + +Table 7: Instructions for Different Tasks. Instructions including annotation, text-to-SVG, image-to-SVG and character-reference SVG generation. + +![](images/c5d2637f15e3c2c9618d7e718252bc140df50e4b622c6efe968a8714b6abd547.jpg) +Figure 8: Samples from MMSVG-2M Dataset. The proposed MMSVG-2M dataset can be separated into three subset, namely Icon, Illustration and Character. Samples from Icon, Illustration and part of Character subsets are downloaded from Internet. Another part of Character subset is generated by our data creation pipeline, which can provide image and SVG pairs for image prompting task. + +reference image and the original image with the VLM architecture. The list of instruction templates for different tasks are shown in Tab. 7. + +# A.3 Character-SVG Pairs Construction + +As illustrated in the Fig. 6, part of our proposed MMSVG-2M-Character subset is constructed using a generative pipeline. As shown in the pipeline diagram in Fig. 2, we employ a FLUX [26]-based generative model enhanced with a vector-style LoRA to enable the generation of SVG-style data. For image-based conditioning, we adopt FLUX-Redux [27], which injects image features via a SigLIP encoder and projects them into image embeddings. These embeddings are then concatenated with the text tokens as conditioning inputs for FLUX [26]. However, in practice, the original Redux [27] conditioning proves to be overly strong. To address this, we adopt a community-implemented variant of Redux that downsamples the image embeddings in 2D space. As observed in our experiments shown in Fig. 9, a downsampling factor between $2 \times$ and $3 \times$ yields the most reasonable SVG-style character references. Finally, we employ VTracer [12] to perform near-instant vectorization of the generated images. To construct the MMSVG-2M-Character subset, we first filter $103k$ character instances from the Danbooru [13] dataset and apply the aforementioned pipeline with motion and expression keywords like previous works [8, 9, 36, 65]. We compare the raw FLUX [26] outputs and their vectorized counterparts, retaining only those samples with PSNR and SSIM scores above a certain threshold as valid data. + +# B Additional Details + +# B.1 Scaling Up + +To study the effectiveness of scaling up multimodal SVG generation, we scale up OmniSVG from 4B to 8B parameters. We present training perplexity in Fig. 11, where both models are trained from scratch on 250 billion tokens. We show that, as the size of the model grows, the model achieves a lower validation perplexity, indicating a higher probability of producing the validation data. + +# B.2 Implementation Details + +We train our models in bfloat16 with the ZeRO-2 strategy [40] for memory-efficient training. We also adopt the AdamW [33] optimizer with a learning rate decaying from $3 \times 10^{-4}$ to $3 \times 10^{-6}$ and a weight decay of 0.1 to train our model. In practice, we load the pre-trained weights from the Qwen2.5-VL [1] model and initialize the SVG embeddings from scratch. Without further specification, we generate SVGs with the top-k and top-p sampling strategy with $k = 50$ and $p = 0.95$ for diversity. + +# C Additional Results + +As list in full comparisons in Tab. 2, including all the baselines mentioned in Sec. 5. For the text-to-SVG task, we compare our method with language-based (LLM-based) methods, including VectorFusion [22], SVGDreamer [60], Chat2SVG [56] and IconShop [57]. For image-to-SVG task, we compare our method with baseline methods across image vectorization and Multimodal Large Language Modeling ap + +proaches, including LIVE [34], DiffVG [29], StarVector [42] and GPT-4o [21] using the official implementations with the hyperparameters proposed by the authors, and apply their pre- and post-processing code as required. Specifically, for the text-to-SVG task, the optimization-based method SVGDreamer excels in enhancing editability by employing a semantic-driven image vectorization process that effectively separates foreground objects from the background, while failing to handle complex scenes. Another optimization-based work, VectorFusion, stands out for generating SVG-exportable vector graphics without relying on large captioned datasets. However, Vectorfusion is also unable to handle complex scenarios and diverse styles. The significant problem with these optimization-based works is that the optimization time is too long. Generating an SVG usually takes more than ten minutes, which is too expensive. For the LLM-based method, Chat2SVG integrates Large Language Models (LLMs) with image diffusion models to create semantically rich SVG templates. However, Chat2SVG still needs to optimize the output SVG script from LLM, which introduces increased computational complexity and poses challenges during model training. In comparison, IconShop utilizes a transformer-based architecture to autoregressively model SVG path sequences, demonstrating exceptional performance in simplified icon SVGs, which offers effective solutions for text-to-SVG generation. It can only generate black simple Icon SVGs. + +![](images/06747bc02f04585aabd024e7ccafb4f8d7839e47fc78f613ad60da4b3238485c.jpg) +Figure 10: Word Cloud Visualization of Label Distribution in the MMSVG-2M Dataset. The size of each label corresponds to its frequency of occurrence. The larger the label, the more frequently it appears in the dataset. + +![](images/dbd2c1184a18cb173aa8a27178432adf7f48ac0b537a06ddb31d90e52a5f32cd.jpg) +Figure 9: Image Prompting Dataset Creation of MMSVG-2M Character. By utilizing FLUX-Redux and SVG vectorization tools, image prompting data pairs can be generated. We adipot FLUX-Redux downsampling scale with 2, 3 in practice by trading-off the character similarity and complexity of generated SVG. + +![](images/b122d2212096046f8e937b7fe80bc42797f0338f419bcb3492121a56f357ccd3.jpg) +(a) Training PPL for our models. + +![](images/f2a3bf5441468ccd44b23b048b566fd1b98834e333baba08f597b0619c99d3b5.jpg) +(b) Validation PPL for our models. +Figure 11: Training and Validation Perplexity (PPL) for OmniSVG Models. We train all the models from scratch on 250 billion tokens. We observe that the performance grows with model sizes. + +For the image-to-SVG task, we compare our method with the image vectorization methods. LIVE allows progressive and efficient generation of SVGs, optimizing closed vector paths under raster image supervision with shape complexity control. However, LIVE needs to optimize for a long time when generating complex SVGs. DiffVG enables end-to-end differentiability in vector graphics rasterization, improving optimization through anti-aliasing and gradient-based methods while also is computationally expensive due to the complexity of the forward-backward rasterization process. Recently, the Multimodal Large Language Model (MLLM) based method StarVector leverages the visual understanding to apply accurate SVG primitive to the LLM architecture, which also can generate SVGs from both text and image inputs. However, it still fails to generate complex SVGs. Since Starvector [42] has not yet opened up its text-to-SVG model weights, our MMSVGBench does not evaluate Starvector's text-to-SVG capabilities. MMSVG-Bench also evaluates our methods with VLM methods, GPT-4o, to conduct a comprehensive assessment. We compare our method with these baselines on our MMSVG-2M dataset, from simple MMSVG-Icon dataset, a bit complex MMSVG-illustration dataset, to the very complex MMSVG-Character dataset. + +# D More details of the baselines + +# D.1 Text-to-SVG Task + +SVGDreamer [60] uses a semantic-driven image vectorization (SIVE) process to separate foreground objects and background, improving editability. The SIVE process utilizes attention-based primitive control and an attention-mask loss function to manipulate individual elements effectively. To address issues in existing text-to-SVG generation methods, the proposed Vectorized Particle-based Score Distillation (VPSD) approach models SVGs as distributions of control points and colors, improving shape, color diversity, and convergence speed. + +VectorFusion [22] leverages a text-conditioned diffusion model trained on pixel representations to generate SVG exportable vector graphics without needing large captioned SVG datasets. By optimizing a differentiable vector graphics rasterizer, it distills semantic knowledge from a pretrained diffusion model and uses Score Distillation Sampling to generate an SVG consistent with a caption. Experiments show that VectorFusion improves both quality and fidelity, offering a variety of styles such as pixel art and sketches. + +Chat2SVG [56] proposes a hybrid framework that combines the strengths of Large Language Models (LLMs) and image diffusion models for text-to-SVG generation. The approach first uses an LLM to create semantically meaningful SVG templates from basic geometric primitives. A dual-stage optimization pipeline, guided by image diffusion models, refines paths in latent space and adjusts point coordinates to enhance geometric complexity. + +IconShop [57] uses a transformer-based architecture to encode path commands and learn to model SVG path sequences autoregressively. It has shown excellent results in simplified icon scenarios and provides a good solution to Text-to-SVG generation by extending the FIGR-8-SVG dataset with + +![](images/1aa916043462691eacdf1ec6504ad4cdb68fdafec3028a2b4e705575becadf71.jpg) +Figure 12: Illustration of the SVG Generation Capabilities of OmniSVG. + +captions. We have access to their dataset and original splits and have trained our model on that data using a pre-trained checkpoint (trained on OmniVG dataset). We have extracted the results from IconShop and included them here to compare our method. + +LLM4SVG [59] is a framework that leverages Large Language Models (LLMs) to understand and generate Scalable Vector Graphics (SVGs). It employs a structured SVG encoding approach, utilizing learnable semantic tokens to accurately represent SVG components and their properties. This design enables LLMs to produce SVGs that are both semantically aligned with textual descriptions and visually coherent. However, LLM4SVG also has a maximum token length of 2048, limiting its ability to generate highly complex SVGs that require longer sequences. + +# D.2 Image-to-SVG Task + +LIVE (Layer-wise Image Vectorization) [34] is a method for progressively generating SVGs that closely fit a given raster image by recursively adding and optimizing closed vector paths. Using a differentiable renderer (based on DiffVG [29]), LIVE enables direct optimization of paths under raster image supervision while controlling shape complexity by adjusting the number of path segments. It introduces component-wise path initialization, identifying key visual components to ensure efficient topology extraction and minimize redundant shapes. + +DiffVG [29] is a landmark in vector graphics research, pioneering deep learning-based methods with the first differentiable vector graphics rasterization pipeline. By leveraging a combination of anti-aliasing techniques and gradient-based optimization, DiffVG ensures differentiability. Unlike methods relying on non-differentiable curve-to-mesh conversions, DiffVG employs a forward-backward rasterization process, where the forward pass generates antialiased images and the backward pass computes gradients with respect to vector graphic parameters. + +StarVector [42] works directly in the SVG code space, leveraging visual understanding to apply accurate SVG primitives. StarVector employs a transformer-based architecture that integrates an image encoder with a language model, enabling it to process visual inputs and produce precise SVG code. StarVector effectively handles diverse SVG types, including icons, logos, and complex diagrams, demonstrating robust generalization across various vectorization tasks. However, with a 16k token context window, StarVector may struggle to process highly complex SVGs that require longer sequences. + +Vtracer [12] is an image processing algorithm designed to convert raster images into SVGs. The algorithm follows a three-step pipeline, which involves the hierarchical clustering of images for vectorization. Initially, the pixels are transformed into paths, which are subsequently simplified into polygons. In the final step, these polygons are smoothed and approximated using a Bezier curve fitter. \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06263/images/041793a88ae441883cee99e8fcd32d85151503f775b754e305d890cf08cd57b0.jpg b/data/2025/2504_06xxx/2504.06263/images/041793a88ae441883cee99e8fcd32d85151503f775b754e305d890cf08cd57b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45ea24fd110e6d1bc0ad261342818c5569972b5b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/041793a88ae441883cee99e8fcd32d85151503f775b754e305d890cf08cd57b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f763c7909aa4d4f0fd37f22edd5dc73fbb4a1c606b0fcee49242f035e2053b85 +size 32895 diff --git a/data/2025/2504_06xxx/2504.06263/images/06747bc02f04585aabd024e7ccafb4f8d7839e47fc78f613ad60da4b3238485c.jpg b/data/2025/2504_06xxx/2504.06263/images/06747bc02f04585aabd024e7ccafb4f8d7839e47fc78f613ad60da4b3238485c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0e3100d165eb5349d1b82ddf467d64b8d26035f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/06747bc02f04585aabd024e7ccafb4f8d7839e47fc78f613ad60da4b3238485c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e8eeb228eb64171f025ad9489966bc3c44db2ab1d770f699ae28dff43a49bb2 +size 77308 diff --git a/data/2025/2504_06xxx/2504.06263/images/0b0a38efbb695a95d71553a06e7819b3d49df273d684e1a78144d9c2d90c71b6.jpg b/data/2025/2504_06xxx/2504.06263/images/0b0a38efbb695a95d71553a06e7819b3d49df273d684e1a78144d9c2d90c71b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b6ed05355f1b9ab2971696332969950c2c888e75 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/0b0a38efbb695a95d71553a06e7819b3d49df273d684e1a78144d9c2d90c71b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95a6f68d5298b332114886f15f19245b148d081bd6dd0ecaa0570121375b82ab +size 127722 diff --git a/data/2025/2504_06xxx/2504.06263/images/159b5270617b8ba36e1cfeaf7311e08671a5b1b0eb436704a4b5cee27646eaeb.jpg b/data/2025/2504_06xxx/2504.06263/images/159b5270617b8ba36e1cfeaf7311e08671a5b1b0eb436704a4b5cee27646eaeb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b78202ae07b716542b6a9d70e767652b6525eb56 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/159b5270617b8ba36e1cfeaf7311e08671a5b1b0eb436704a4b5cee27646eaeb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b2a8b9f9eca978dd33304df7d45c088dee44b736461d686ad260569e8f2ec98 +size 12721 diff --git a/data/2025/2504_06xxx/2504.06263/images/1848b53d51571e79abf00b6041fd54b00a12f7df7b9c077a30c48675555e314e.jpg b/data/2025/2504_06xxx/2504.06263/images/1848b53d51571e79abf00b6041fd54b00a12f7df7b9c077a30c48675555e314e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37574da948e762e1bae90d793232bd223fcfacda --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/1848b53d51571e79abf00b6041fd54b00a12f7df7b9c077a30c48675555e314e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72b0a4ffaeadd34dde080c4dd33aed99dc21d65676956850fd3739e3c33fcebe +size 107864 diff --git a/data/2025/2504_06xxx/2504.06263/images/1aa916043462691eacdf1ec6504ad4cdb68fdafec3028a2b4e705575becadf71.jpg b/data/2025/2504_06xxx/2504.06263/images/1aa916043462691eacdf1ec6504ad4cdb68fdafec3028a2b4e705575becadf71.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20a4a43bd1b329f9b673d551dac05a4ee20bbf4b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/1aa916043462691eacdf1ec6504ad4cdb68fdafec3028a2b4e705575becadf71.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89c22ffe4d63b67e0ffe05ab370d8a4680a3e338d5b1294c362838d7b4f94a38 +size 197031 diff --git a/data/2025/2504_06xxx/2504.06263/images/1f92246b71d2b71347d70f2d6d09408e5ac28af07385da8375387d51730ec9e0.jpg b/data/2025/2504_06xxx/2504.06263/images/1f92246b71d2b71347d70f2d6d09408e5ac28af07385da8375387d51730ec9e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75a139e973d1e4ad6820cb8801156d781b494925 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/1f92246b71d2b71347d70f2d6d09408e5ac28af07385da8375387d51730ec9e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:871a2ec8846b549775563a411ea645b006fa3a26ec6b56268948d12d7a23d378 +size 1266 diff --git a/data/2025/2504_06xxx/2504.06263/images/22c950090cdb82fa973bb44474a61e96feba1d2d5b4006804615b7cc6584a3b0.jpg b/data/2025/2504_06xxx/2504.06263/images/22c950090cdb82fa973bb44474a61e96feba1d2d5b4006804615b7cc6584a3b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..553deca5f92daa6bcd09afe767854581cc778ac4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/22c950090cdb82fa973bb44474a61e96feba1d2d5b4006804615b7cc6584a3b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa41e61df7597d89762103a4e56ac5c39f2e1b824a0b31d83508a993f65752c2 +size 3875 diff --git a/data/2025/2504_06xxx/2504.06263/images/4279ac5b0b7bd39e0fbef10bbf6251dfdc8dd6f9b0ef61548e401177ef229a96.jpg b/data/2025/2504_06xxx/2504.06263/images/4279ac5b0b7bd39e0fbef10bbf6251dfdc8dd6f9b0ef61548e401177ef229a96.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cedb638ce367e4cb296214e4605ac5ac02befc64 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/4279ac5b0b7bd39e0fbef10bbf6251dfdc8dd6f9b0ef61548e401177ef229a96.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b5d192dd7238ac93969869b4212ffe31f7bf717e48e12a41dd1337f0230ffdd +size 16120 diff --git a/data/2025/2504_06xxx/2504.06263/images/47003e91f94681858c0acae88d53a55388f807b9d3e547fe579b6f9e179f7526.jpg b/data/2025/2504_06xxx/2504.06263/images/47003e91f94681858c0acae88d53a55388f807b9d3e547fe579b6f9e179f7526.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b50f0c319ee37d69200f5023c3303102be92755 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/47003e91f94681858c0acae88d53a55388f807b9d3e547fe579b6f9e179f7526.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b71ddd304c36d12d7c6c430243bbf46b8b40430dd9a682d385c207df08aa8407 +size 4238 diff --git a/data/2025/2504_06xxx/2504.06263/images/4c37cb908f4690ee150ed69ea6d33667b65dc7b961a9774fc264effd63f957d6.jpg b/data/2025/2504_06xxx/2504.06263/images/4c37cb908f4690ee150ed69ea6d33667b65dc7b961a9774fc264effd63f957d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13651f43a1afa7201720164ddcd7c1430b4ff0ad --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/4c37cb908f4690ee150ed69ea6d33667b65dc7b961a9774fc264effd63f957d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adfafb1f2adf54ec9fbf6e83012c3a191fd7a4434dcf600ab3fe1a306e2c9e2d +size 4984 diff --git a/data/2025/2504_06xxx/2504.06263/images/5016e0dda01fc72e711a0637f464fa6dca63956af554e7623e81300c9c91050a.jpg b/data/2025/2504_06xxx/2504.06263/images/5016e0dda01fc72e711a0637f464fa6dca63956af554e7623e81300c9c91050a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0d2d799c19400dffd6bf9e5ef448b5972c82e49 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/5016e0dda01fc72e711a0637f464fa6dca63956af554e7623e81300c9c91050a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc3fee4d3004be0550c10490cedaaa254145ff15c486d52f84704b58a6c34db9 +size 46496 diff --git a/data/2025/2504_06xxx/2504.06263/images/5ba0a3aa19f972201f2521d49b389b0fd36c034b5c010cc63336a808ef94e72e.jpg b/data/2025/2504_06xxx/2504.06263/images/5ba0a3aa19f972201f2521d49b389b0fd36c034b5c010cc63336a808ef94e72e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f688cffd51edf53f3d0e17beb94129d70a2d5369 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/5ba0a3aa19f972201f2521d49b389b0fd36c034b5c010cc63336a808ef94e72e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97f626b80dbecfdf7277bcbb94b3c49d3475b4a626a982d936ae614745e4eed8 +size 107184 diff --git a/data/2025/2504_06xxx/2504.06263/images/64a4597fc022169aa9518e8f8138e7fed6d0e631485be9646c2900262d8721e4.jpg b/data/2025/2504_06xxx/2504.06263/images/64a4597fc022169aa9518e8f8138e7fed6d0e631485be9646c2900262d8721e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71550fbdcdd53def904980bfdf2870b84f741546 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/64a4597fc022169aa9518e8f8138e7fed6d0e631485be9646c2900262d8721e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae8c744dabff752af6bacfbdcd6a6583f1f013a666fc24746af5c162213c0357 +size 1304 diff --git a/data/2025/2504_06xxx/2504.06263/images/7d2fbfce65c3b19406e9aca6bef44f843f580a523a02d36a2e58d59d410b190b.jpg b/data/2025/2504_06xxx/2504.06263/images/7d2fbfce65c3b19406e9aca6bef44f843f580a523a02d36a2e58d59d410b190b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f753b9684c706942002feb200a7ea56e68c36bc3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/7d2fbfce65c3b19406e9aca6bef44f843f580a523a02d36a2e58d59d410b190b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8db75f052352260f8f04117ddb0033f9fefbc3e78bc571b7dad9a04be97313d9 +size 1233 diff --git a/data/2025/2504_06xxx/2504.06263/images/923416a225130c97c601eacd176d61f636bb8514d9daa38229bd075edd1cde88.jpg b/data/2025/2504_06xxx/2504.06263/images/923416a225130c97c601eacd176d61f636bb8514d9daa38229bd075edd1cde88.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75f57451d203d00d752dcd409ef1cb7acb86af4b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/923416a225130c97c601eacd176d61f636bb8514d9daa38229bd075edd1cde88.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2462183d4d56562a94693bb0ffefbe0a612377427747f30ca4b39487e25f8c54 +size 1290 diff --git a/data/2025/2504_06xxx/2504.06263/images/9cce668291ae3db38c36dcf3597d0b593fc88ffc1dd7c1c70776e983a717947f.jpg b/data/2025/2504_06xxx/2504.06263/images/9cce668291ae3db38c36dcf3597d0b593fc88ffc1dd7c1c70776e983a717947f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70cd35ab22eedcc1d734401cd2ac08faa0f6a51e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/9cce668291ae3db38c36dcf3597d0b593fc88ffc1dd7c1c70776e983a717947f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f506a2bf5f8e8781b823f89c2231c9e7889a18ba5290a0c0e2c4c7d2ec311094 +size 35369 diff --git a/data/2025/2504_06xxx/2504.06263/images/9d402d8c09bf0bd876bc6d4063630a82a87d9bd5d4ba548e776d476dfc8b87fc.jpg b/data/2025/2504_06xxx/2504.06263/images/9d402d8c09bf0bd876bc6d4063630a82a87d9bd5d4ba548e776d476dfc8b87fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9d42f52fcbd02b6bb81464d3707237f9bdd2554 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/9d402d8c09bf0bd876bc6d4063630a82a87d9bd5d4ba548e776d476dfc8b87fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:684b1b47965778ffdc55e2683a00a1e43b4c6e766304591b77a86772af0d9a0a +size 21259 diff --git a/data/2025/2504_06xxx/2504.06263/images/a091a9687594e4c4fa9988fcbe29d8f537e1b6f79b8e6716b558e58af1afa32c.jpg b/data/2025/2504_06xxx/2504.06263/images/a091a9687594e4c4fa9988fcbe29d8f537e1b6f79b8e6716b558e58af1afa32c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..63c573c3b952d9de99f6a503bb7bcc4a72366a76 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/a091a9687594e4c4fa9988fcbe29d8f537e1b6f79b8e6716b558e58af1afa32c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a4a9d80d4dac6c8a97ff8bda723e15e0a1200469a3f3d74676c3b8e90e96821 +size 93497 diff --git a/data/2025/2504_06xxx/2504.06263/images/aa3c3ece9f9bd13fb92b24dc92df6642857ae3e037a781c4949e294265ef189e.jpg b/data/2025/2504_06xxx/2504.06263/images/aa3c3ece9f9bd13fb92b24dc92df6642857ae3e037a781c4949e294265ef189e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..836a42deb75a2f2b09f0201b8fc3169d81ed7155 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/aa3c3ece9f9bd13fb92b24dc92df6642857ae3e037a781c4949e294265ef189e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eccdfad5f82843d9d97677f42a3b47b920831877f3de0b61029502c6c4305926 +size 27732 diff --git a/data/2025/2504_06xxx/2504.06263/images/afdb539875426e118405d4db2d308c5bb36d4e628c45078b40a4de7fcddde2a5.jpg b/data/2025/2504_06xxx/2504.06263/images/afdb539875426e118405d4db2d308c5bb36d4e628c45078b40a4de7fcddde2a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..822403ee8518a937de26b602e4f41e854bfbb5c1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/afdb539875426e118405d4db2d308c5bb36d4e628c45078b40a4de7fcddde2a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e708065291fc41553edf7a192215653c0ef512dd440dc2934b8ce3c661d7d8de +size 41805 diff --git a/data/2025/2504_06xxx/2504.06263/images/b122d2212096046f8e937b7fe80bc42797f0338f419bcb3492121a56f357ccd3.jpg b/data/2025/2504_06xxx/2504.06263/images/b122d2212096046f8e937b7fe80bc42797f0338f419bcb3492121a56f357ccd3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d50a8e2ee36aed14117535a535da22d06ba1d71 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/b122d2212096046f8e937b7fe80bc42797f0338f419bcb3492121a56f357ccd3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5b10b32a6049a805585b63722d50db75c1635005cee729d451f183781dd3d53 +size 27379 diff --git a/data/2025/2504_06xxx/2504.06263/images/ba725e00ce2f094419da2329d07c4a29c4c5b39cf31f5c5cab46aca8243a4b94.jpg b/data/2025/2504_06xxx/2504.06263/images/ba725e00ce2f094419da2329d07c4a29c4c5b39cf31f5c5cab46aca8243a4b94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea96d0cb0ad0eef545d4d54e379cd67aa5482bc0 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/ba725e00ce2f094419da2329d07c4a29c4c5b39cf31f5c5cab46aca8243a4b94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a8dea27d02adfc723879435e1dd727721c99b4e7fa2efcc53e6106a53363971 +size 1260 diff --git a/data/2025/2504_06xxx/2504.06263/images/c4f306ac16842311f85921e1c2a3d115725cdb762eb9aa886166cb927204f427.jpg b/data/2025/2504_06xxx/2504.06263/images/c4f306ac16842311f85921e1c2a3d115725cdb762eb9aa886166cb927204f427.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c1b6b5577593a1b0e6d3f3c859e4f9223b4c52a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/c4f306ac16842311f85921e1c2a3d115725cdb762eb9aa886166cb927204f427.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:918fe608feef3970e2cd23c584e199a88b823df6718f8df9e86ddea3b8e8a650 +size 109811 diff --git a/data/2025/2504_06xxx/2504.06263/images/c5d2637f15e3c2c9618d7e718252bc140df50e4b622c6efe968a8714b6abd547.jpg b/data/2025/2504_06xxx/2504.06263/images/c5d2637f15e3c2c9618d7e718252bc140df50e4b622c6efe968a8714b6abd547.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5845b258466e75248c6a9167706bc0c03f17a0c6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/c5d2637f15e3c2c9618d7e718252bc140df50e4b622c6efe968a8714b6abd547.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14a58cb5cb6018c7204b74151ba02e9a0fafbe3c4a657d6c93bf9ad4b97e05fa +size 114939 diff --git a/data/2025/2504_06xxx/2504.06263/images/cbd46bc6376976fecf43d9880323bc6e5b2f57aa205285f95fbabc54caac45f6.jpg b/data/2025/2504_06xxx/2504.06263/images/cbd46bc6376976fecf43d9880323bc6e5b2f57aa205285f95fbabc54caac45f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d26c2347b849b6ca3de11cd3e3faa6592a03b2c1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/cbd46bc6376976fecf43d9880323bc6e5b2f57aa205285f95fbabc54caac45f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ca94dbaed28af416072f430355204a4e023b88c748d2f221d2efd2799435371 +size 42078 diff --git a/data/2025/2504_06xxx/2504.06263/images/d6ddb92fedfc27029a0187a1301eb25f0cd201b3585a66c8e60afa541a6c5379.jpg b/data/2025/2504_06xxx/2504.06263/images/d6ddb92fedfc27029a0187a1301eb25f0cd201b3585a66c8e60afa541a6c5379.jpg new file mode 100644 index 0000000000000000000000000000000000000000..306b2f3e37d9867aad4f681910f04c6baa7630cf --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/d6ddb92fedfc27029a0187a1301eb25f0cd201b3585a66c8e60afa541a6c5379.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1f60b261903cf4df5a0fda3e306c6d8aeac030bd26d95e44716b51b79505ce2 +size 1667 diff --git a/data/2025/2504_06xxx/2504.06263/images/dbd2c1184a18cb173aa8a27178432adf7f48ac0b537a06ddb31d90e52a5f32cd.jpg b/data/2025/2504_06xxx/2504.06263/images/dbd2c1184a18cb173aa8a27178432adf7f48ac0b537a06ddb31d90e52a5f32cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2cd9cb8f168dd9f8e870dfd9bff2c4075211b1a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/dbd2c1184a18cb173aa8a27178432adf7f48ac0b537a06ddb31d90e52a5f32cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76d4cbb0c91ed74d466899c45c08efbc94efcc4b9377198a1f2a3266e687329e +size 55573 diff --git a/data/2025/2504_06xxx/2504.06263/images/f2a3bf5441468ccd44b23b048b566fd1b98834e333baba08f597b0619c99d3b5.jpg b/data/2025/2504_06xxx/2504.06263/images/f2a3bf5441468ccd44b23b048b566fd1b98834e333baba08f597b0619c99d3b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d94533a33b262ba34bb0d5de51ff96b0178cde51 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/f2a3bf5441468ccd44b23b048b566fd1b98834e333baba08f597b0619c99d3b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72def42f57644e1995aecdd9758ae1e8a6e83c845d22487ffd961d8292af6c54 +size 29343 diff --git a/data/2025/2504_06xxx/2504.06263/images/f4018d8e6b84d4441104ddcc05ee39760d0faf2a95c81a23d8f047c80b19142b.jpg b/data/2025/2504_06xxx/2504.06263/images/f4018d8e6b84d4441104ddcc05ee39760d0faf2a95c81a23d8f047c80b19142b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3dad7a732c17c3d5944f197ed5ecc3758dff6d76 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/f4018d8e6b84d4441104ddcc05ee39760d0faf2a95c81a23d8f047c80b19142b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96468164cd8cfcfccf34c6d1d62bfc60f856a8c8507a065983f3b818bcffaaeb +size 42266 diff --git a/data/2025/2504_06xxx/2504.06263/images/f8339bb823ff6b5c65739fe1d241b3c850f32158ec08e4048b05299bf9b4bd5c.jpg b/data/2025/2504_06xxx/2504.06263/images/f8339bb823ff6b5c65739fe1d241b3c850f32158ec08e4048b05299bf9b4bd5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30568197f95a4af9a891289b4fea57302a3ca807 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/images/f8339bb823ff6b5c65739fe1d241b3c850f32158ec08e4048b05299bf9b4bd5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79f6217ec9540f76732416dbd96c0cdd21c56e9f4448cef7c2d19a39e3e5c7ff +size 6160 diff --git a/data/2025/2504_06xxx/2504.06263/layout.json b/data/2025/2504_06xxx/2504.06263/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a354f1382a3872fac2801cd5f2fd5354d1d4a414 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06263/layout.json @@ -0,0 +1,10403 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 130, + 97, + 479, + 135 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 97, + 479, + 135 + ], + "spans": [ + { + "bbox": [ + 130, + 97, + 479, + 135 + ], + "type": "text", + "content": "OmniSVG: A Unified Scalable Vector Graphics Generation Model" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "spans": [ + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": "Yiying Yang" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{1,2*}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": " Wei Cheng" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": " Sijin Chen" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": " Xianfang Zeng" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": " Fukun Yin" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": " \nJiaxu Zhang" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": " Liao Wang" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": " Gang Yu" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{2\\ddagger}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": " Xingjun Ma" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{1\\ddagger}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": " Yu-Gang Jiang" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": " Fudan University " + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 133, + 173, + 479, + 217 + ], + "type": "text", + "content": " StepFun" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 137, + 221, + 153, + 236 + ], + "blocks": [ + { + "bbox": [ + 137, + 221, + 153, + 236 + ], + "lines": [ + { + "bbox": [ + 137, + 221, + 153, + 236 + ], + "spans": [ + { + "bbox": [ + 137, + 221, + 153, + 236 + ], + "type": "image", + "image_path": "ba725e00ce2f094419da2329d07c4a29c4c5b39cf31f5c5cab46aca8243a4b94.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 154, + 224, + 203, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 224, + 203, + 234 + ], + "spans": [ + { + "bbox": [ + 154, + 224, + 203, + 234 + ], + "type": "text", + "content": "Project Page" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 213, + 222, + 228, + 235 + ], + "blocks": [ + { + "bbox": [ + 213, + 222, + 228, + 235 + ], + "lines": [ + { + "bbox": [ + 213, + 222, + 228, + 235 + ], + "spans": [ + { + "bbox": [ + 213, + 222, + 228, + 235 + ], + "type": "image", + "image_path": "923416a225130c97c601eacd176d61f636bb8514d9daa38229bd075edd1cde88.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 230, + 224, + 281, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 224, + 281, + 233 + ], + "spans": [ + { + "bbox": [ + 230, + 224, + 281, + 233 + ], + "type": "text", + "content": "MMSVG-2M" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 292, + 221, + 306, + 235 + ], + "blocks": [ + { + "bbox": [ + 292, + 221, + 306, + 235 + ], + "lines": [ + { + "bbox": [ + 292, + 221, + 306, + 235 + ], + "spans": [ + { + "bbox": [ + 292, + 221, + 306, + 235 + ], + "type": "image", + "image_path": "7d2fbfce65c3b19406e9aca6bef44f843f580a523a02d36a2e58d59d410b190b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 224, + 368, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 224, + 368, + 233 + ], + "spans": [ + { + "bbox": [ + 307, + 224, + 368, + 233 + ], + "type": "text", + "content": "MMSVGBench" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 378, + 221, + 394, + 235 + ], + "blocks": [ + { + "bbox": [ + 378, + 221, + 394, + 235 + ], + "lines": [ + { + "bbox": [ + 378, + 221, + 394, + 235 + ], + "spans": [ + { + "bbox": [ + 378, + 221, + 394, + 235 + ], + "type": "image", + "image_path": "1f92246b71d2b71347d70f2d6d09408e5ac28af07385da8375387d51730ec9e0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 395, + 224, + 424, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 224, + 424, + 232 + ], + "spans": [ + { + "bbox": [ + 395, + 224, + 424, + 232 + ], + "type": "text", + "content": "Models" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 435, + 221, + 456, + 235 + ], + "blocks": [ + { + "bbox": [ + 435, + 221, + 456, + 235 + ], + "lines": [ + { + "bbox": [ + 435, + 221, + 456, + 235 + ], + "spans": [ + { + "bbox": [ + 435, + 221, + 456, + 235 + ], + "type": "image", + "image_path": "64a4597fc022169aa9518e8f8138e7fed6d0e631485be9646c2900262d8721e4.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 451, + 224, + 471, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 451, + 224, + 471, + 232 + ], + "spans": [ + { + "bbox": [ + 451, + 224, + 471, + 232 + ], + "type": "text", + "content": "Code" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 137, + 260, + 155, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 260, + 155, + 268 + ], + "spans": [ + { + "bbox": [ + 137, + 260, + 155, + 268 + ], + "type": "text", + "content": "Icon" + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 113, + 270, + 181, + 373 + ], + "blocks": [ + { + "bbox": [ + 113, + 270, + 181, + 373 + ], + "lines": [ + { + "bbox": [ + 113, + 270, + 181, + 373 + ], + "spans": [ + { + "bbox": [ + 113, + 270, + 181, + 373 + ], + "type": "image", + "image_path": "159b5270617b8ba36e1cfeaf7311e08671a5b1b0eb436704a4b5cee27646eaeb.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 403, + 504, + 453 + ], + "lines": [ + { + "bbox": [ + 104, + 403, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 504, + 453 + ], + "type": "text", + "content": "Figure 1: OmniSVG is capable of autoregressively generating high-quality Scalable Vector Graphs (SVG) across a wide spectrum of complexity, from simple icons to intricate anime characters. OmniSVG demonstrates remarkable versatility in generating high-quality SVGs adhering to multimodal instructions, covering tasks like Text-to-SVG, Image-to-SVG, and Character-Reference SVG, making it a powerful and flexible solution for diverse creative tasks." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 213, + 260, + 257, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 260, + 257, + 268 + ], + "spans": [ + { + "bbox": [ + 213, + 260, + 257, + 268 + ], + "type": "text", + "content": "Illustration" + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 189, + 271, + 275, + 373 + ], + "blocks": [ + { + "bbox": [ + 189, + 271, + 275, + 373 + ], + "lines": [ + { + "bbox": [ + 189, + 271, + 275, + 373 + ], + "spans": [ + { + "bbox": [ + 189, + 271, + 275, + 373 + ], + "type": "image", + "image_path": "4279ac5b0b7bd39e0fbef10bbf6251dfdc8dd6f9b0ef61548e401177ef229a96.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 326, + 259, + 367, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 259, + 367, + 268 + ], + "spans": [ + { + "bbox": [ + 326, + 259, + 367, + 268 + ], + "type": "text", + "content": "Character" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 279, + 271, + 419, + 373 + ], + "blocks": [ + { + "bbox": [ + 279, + 271, + 419, + 373 + ], + "lines": [ + { + "bbox": [ + 279, + 271, + 419, + 373 + ], + "spans": [ + { + "bbox": [ + 279, + 271, + 419, + 373 + ], + "type": "image", + "image_path": "aa3c3ece9f9bd13fb92b24dc92df6642857ae3e037a781c4949e294265ef189e.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 437, + 252, + 477, + 260 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 437, + 252, + 477, + 260 + ], + "spans": [ + { + "bbox": [ + 437, + 252, + 477, + 260 + ], + "type": "text", + "content": "Text-to-SVG" + } + ] + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 433, + 262, + 483, + 285 + ], + "blocks": [ + { + "bbox": [ + 433, + 262, + 483, + 285 + ], + "lines": [ + { + "bbox": [ + 433, + 262, + 483, + 285 + ], + "spans": [ + { + "bbox": [ + 433, + 262, + 483, + 285 + ], + "type": "image", + "image_path": "22c950090cdb82fa973bb44474a61e96feba1d2d5b4006804615b7cc6584a3b0.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 484, + 262, + 499, + 285 + ], + "blocks": [ + { + "bbox": [ + 484, + 262, + 499, + 285 + ], + "lines": [ + { + "bbox": [ + 484, + 262, + 499, + 285 + ], + "spans": [ + { + "bbox": [ + 484, + 262, + 499, + 285 + ], + "type": "image", + "image_path": "d6ddb92fedfc27029a0187a1301eb25f0cd201b3585a66c8e60afa541a6c5379.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 437, + 297, + 482, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 437, + 297, + 482, + 304 + ], + "spans": [ + { + "bbox": [ + 437, + 297, + 482, + 304 + ], + "type": "text", + "content": "Image-to-SVG" + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 437, + 304, + 493, + 335 + ], + "blocks": [ + { + "bbox": [ + 437, + 304, + 493, + 335 + ], + "lines": [ + { + "bbox": [ + 437, + 304, + 493, + 335 + ], + "spans": [ + { + "bbox": [ + 437, + 304, + 493, + 335 + ], + "type": "image", + "image_path": "47003e91f94681858c0acae88d53a55388f807b9d3e547fe579b6f9e179f7526.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 435, + 340, + 497, + 372 + ], + "blocks": [ + { + "bbox": [ + 435, + 340, + 497, + 372 + ], + "lines": [ + { + "bbox": [ + 435, + 340, + 497, + 372 + ], + "spans": [ + { + "bbox": [ + 435, + 340, + 497, + 372 + ], + "type": "image", + "image_path": "4c37cb908f4690ee150ed69ea6d33667b65dc7b961a9774fc264effd63f957d6.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "bbox": [ + 159, + 383, + 372, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 383, + 372, + 393 + ], + "spans": [ + { + "bbox": [ + 159, + 383, + 372, + 393 + ], + "type": "text", + "content": "Samples Generated by OmniSVG with Wide Complexity Range" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 447, + 384, + 484, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 447, + 384, + 484, + 392 + ], + "spans": [ + { + "bbox": [ + 447, + 384, + 484, + 392 + ], + "type": "text", + "content": "Versatility" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 281, + 467, + 329, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 467, + 329, + 479 + ], + "spans": [ + { + "bbox": [ + 281, + 467, + 329, + 479 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 140, + 491, + 469, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 491, + 469, + 668 + ], + "spans": [ + { + "bbox": [ + 140, + 491, + 469, + 668 + ], + "type": "text", + "content": "Scalable Vector Graphics (SVG) is an important image format widely adopted in graphic design because of their resolution independence and editability. The development of autonomous SVG generation workflows is continuously drawing attention from both designers and researchers in the AIGC community. However, existing methods either produce unstructured outputs at huge computational cost or are limited to generating monochrome icons of over-simplified structures. To produce high-quality and complex SVG adhering to multi-modal instructions, we propose OmniSVG, a unified SVG generation framework that inherits knowledge from a pre-trained Vision-Language Model (VLM). By parameterizing SVG commands and coordinates into discrete token sequences, the auto-regressive nature enables us to seamlessly adapt modern VLMs to the direct SVG generation. To further advance the development of SVG synthesis, we introduce MMSVG-2M, a multimodal dataset with two million richly annotated SVG assets, along with a standardized evaluation protocol for conditional SVG generation tasks. Extensive experiments show that OmniSVG outperforms existing methods and demonstrates its potential for integration into professional SVG design workflows." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 224, + 35, + 567 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 224, + 35, + 567 + ], + "spans": [ + { + "bbox": [ + 14, + 224, + 35, + 567 + ], + "type": "text", + "content": "arXiv:2504.06263v3 [cs.CV] 1 Dec 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 692, + 352, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 692, + 352, + 703 + ], + "spans": [ + { + "bbox": [ + 121, + 692, + 352, + 703 + ], + "type": "text", + "content": "* Yiying Yang and Wei Cheng contributed equally to this work." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 123, + 703, + 217, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 703, + 217, + 713 + ], + "spans": [ + { + "bbox": [ + 123, + 703, + 217, + 713 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 123, + 703, + 217, + 713 + ], + "type": "text", + "content": " Corresponding Authors." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 731, + 385, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 385, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 385, + 742 + ], + "type": "text", + "content": "39th Conference on Neural Information Processing Systems (NeurIPS 2025)." + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 504, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 504, + 162 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 504, + 162 + ], + "type": "text", + "content": "Scalable Vector Graphics (SVG) have become a cornerstone of modern digital design because of their resolution independence, compact file size, and inherent editability. Widely adopted in professional workflows from UI/UX design to industrial CAD systems, SVG enables precise manipulation of geometric primitives (e.g., Bezier curves, polygons) while maintaining high precision and consistent visual quality across varying resolutions. However, creating high-quality SVG content remains challenging for non-experts, requiring mastery of specialized tools or intricate XML syntax." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 167, + 504, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 504, + 189 + ], + "type": "text", + "content": "Existing methods adopt either optimization-based methods or auto-regressive approaches to generate SVG contents." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 194, + 506, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 506, + 315 + ], + "type": "text", + "content": "The optimization-based methods [34, 12, 29] iteratively refine the SVG parameters by minimizing the differences between the input image and the raster image created by differentiable vector graphics rasterizers. Though these methods are sufficient for reconstructing SVG icons, they suffer from significant computational overhead when scaling up to more intricate samples and produce unstructured outputs with redundant anchor points, harming the editability of the reconstructed SVG samples. In contrast, auto-regressive methods build transformer models or adapt pre-trained Large Language Models (LLMs) to directly generate XML parameters [59] or codes [56, 42] representing SVGs. Benefiting from the end-to-end learning pipeline, the auto-regressive method is a more scalable approach [5] as it can learn directly from a large collection of SVG samples. However, existing auto-regressive approaches are limited to basic SVG contents [11, 24, 53] because of the limited context length and the scarcity of complex SVG data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 319, + 506, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 506, + 430 + ], + "type": "text", + "content": "In this paper, we propose OmniSVG that harnesses native VLMs [1] for various end-to-end multimodal SVG generation tasks. By parameterizing SVG coordinates and commands into discrete tokens, OmniSVG decouples structural logic from low-level geometry, mitigating the \"coordinate hallucination\" problem prevalent in code-based LLMs, and produces vivid and colorful SVG results. Additionally, the next token prediction training objective enables OmniSVG to complete SVGs with diverse generation results given some partial observations. Compared to traditional auto-regressive SVG generation methods, OmniSVG is able to parameterize SVGs exceeding " + }, + { + "bbox": [ + 104, + 319, + 506, + 430 + ], + "type": "inline_equation", + "content": "30k" + }, + { + "bbox": [ + 104, + 319, + 506, + 430 + ], + "type": "text", + "content": " tokens, facilitating the generation of detailed and complex SVG contents. Building upon pre-trained VLMs, our method natively integrates the ability to reason upon visual and textual instructions to synthesize editable, high-fidelity SVGs across diverse domains, from icons to intricate illustrations and anime characters." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 434, + 504, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 468 + ], + "type": "text", + "content": "To advance the development of SVG synthesis, we introduce MMSVG-2M, a multi-modal SVG synthesis dataset with two million richly annotated assets, encompassing icons, illustrations, and anime designs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 506 + ], + "type": "text", + "content": "We also establish a standardized evaluation protocol, MMSVG-Bench, for \"Text-to-SVG\" and \"Image-to-SVG\" generation. Extensive experiments show that OmniSVG can produce highly detailed and complex SVG contents, surpassing prior art both quantitatively and qualitatively." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 510, + 289, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 289, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 289, + 521 + ], + "type": "text", + "content": "To summarize, our key contributions include:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 531, + 503, + 635 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 132, + 531, + 503, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 531, + 503, + 563 + ], + "spans": [ + { + "bbox": [ + 132, + 531, + 503, + 563 + ], + "type": "text", + "content": "- We introduce OmniSVG, a family of end-to-end multimodal SVG generators that leverage native VLMs for generating complex and detailed SVGs, from simple icons to intricate anime characters." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 568, + 503, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 568, + 503, + 599 + ], + "spans": [ + { + "bbox": [ + 132, + 568, + 503, + 599 + ], + "type": "text", + "content": "- We present MMSVG-2M, a large-scale dataset comprising two million SVG assets, along with a standardized evaluation protocol for various multi-modal SVG generation tasks providing a comprehensive resource for future research." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 605, + 503, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 605, + 503, + 635 + ], + "spans": [ + { + "bbox": [ + 132, + 605, + 503, + 635 + ], + "type": "text", + "content": "- Extensive experiments show that OmniSVG surpasses prior SVG generation methods both qualitatively and quantitatively, highlighting its potential for integration into professional SVG design workflows." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 653, + 203, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 653, + 203, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 203, + 666 + ], + "type": "text", + "content": "2 Related Works" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "SVG Generation. Early attempts to generating SVGs directly utilize architectures like RNNs [18, 41, 19, 44, 45], VAEs [4, 32, 48, 46, 51], and Transformers [4, 57] to compress SVG commands into latent representations. Meanwhile, DeepSVG [4] further parameterizes SVGs using a dual transformer architecture but struggles with geometric consistency. Recently, the advent of large language models" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": "(LLMs) [30, 64, 52, 61, 5, 6, 63, 62, 49] unleashes the potential of generating SVGs via XML code synthesis [59, 56, 42]. However, the limited context length of existing LLM-based SVG generation methods [56, 42, 59] poses significant challenges in handling complex SVGs that exceed " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "10k" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": " tokens. In this paper, we explore the potential of native Vision-Language Models (VLMs) in multi-modal SVG generation. By combining pre-trained VLMs with SVG command parameterization, we validate that OmniSVG is able to follow multi-modal instructions and generate vivid and complex SVGs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 144, + 506, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 144, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 506, + 222 + ], + "type": "text", + "content": "Image Vectorization. Recent advancements in vectorization harness diffusion models paired with differentiable rasterizers, using techniques like score distillation sampling [37, 22, 7] and specialized regularizers [29, 34] to convert raster images into SVG paths. While these methods achieve remarkable results, they face limitations such as over-smoothing, color over-saturation, and lack of editability, often producing tangled paths that fail to capture hierarchical structures inherent in professional SVG designs. In this paper, we present an end-to-end approach that follows multi-modal instructions to generate high-quality SVGs with improved path clarity and editability." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 225, + 506, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 225, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 225, + 506, + 357 + ], + "type": "text", + "content": "SVG Datasets and Benchmarks. The lack of suitable datasets for complex SVG structures presents a significant challenge. Existing datasets [11, 24, 53] primarily focus on simplified path-based SVGs or monochrome icons, overlooking the intricate layered structures and rich color semantics found in real-world designs. For example, FIGR-8-SVG [11] focuses on monochromatic icons, while StarVector [42] proposes categorized datasets, including illustrations, icons, emojis, and fonts. Therefore, existing datasets only present simple SVG samples that do not exceed " + }, + { + "bbox": [ + 104, + 225, + 506, + 357 + ], + "type": "inline_equation", + "content": "8.2k" + }, + { + "bbox": [ + 104, + 225, + 506, + 357 + ], + "type": "text", + "content": " tokens, failing to capture the complexities of layered structures and rich color semantics. Benchmark evaluations, such as VGBench [70], further highlight gaps in multi-format testing and the absence of comprehensive coverage for illustrative SVGs. To this end, we introduce MMSVG-2M, a multimodal SVG synthesis dataset comprising two million richly annotated assets, including icons, illustrations, and complex anime designs. We also present a standardized evaluation protocol, MMSVG-Bench, to evaluate diverse multi-modal SVG generation tasks with varying complexity." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 373, + 222, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 222, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 222, + 385 + ], + "type": "text", + "content": "3 OmniSVG Dataset" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 399, + 504, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 504, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 504, + 455 + ], + "type": "text", + "content": "We present MMSVG-2M, a large-scale SVG dataset with two million SVG samples covering website icons, illustrations, graphic designs, anime characters, and etc (Sec. 3.1). To promote the downstream development of SVG generation methods, we also introduce MMSVG-Bench, a standardized evaluation protocol for a series of multi-modal instruction following tasks for conditional SVG generation (Sec. 3.2)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 468, + 188, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 188, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 188, + 479 + ], + "type": "text", + "content": "3.1 MMSVG-2M" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 490, + 504, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 504, + 567 + ], + "type": "text", + "content": "Data Source. With increasing visual complexity, MMSVG-2M consists of three subsets, 1) the icon subset MMSVG-Icon collected from Iconfont, 2) the illustration subset MMSVG-Illustration sourced from IconSount, and 3) the complex anime character subset MMSVG-Character both curated from Freepik and created by our data creation pipeline as shown in Fig. 2. All these websites are online platforms where users can publish and share SVGs, encompassing a broad variety of categories. Specifically, our collection of MMSVG-2M contains 1.1 million icons, 0.5 million illustrations, and 0.4 million anime characters as shown in Tab. 6." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 573, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 573, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 573, + 506, + 628 + ], + "type": "text", + "content": "Data Curation. We extract SVG samples with a comprehensive dedduplication process based on filenames, SVG code, and metadata. We first fit the collected SVGs within a viewbox of " + }, + { + "bbox": [ + 104, + 573, + 506, + 628 + ], + "type": "inline_equation", + "content": "200 \\times 200" + }, + { + "bbox": [ + 104, + 573, + 506, + 628 + ], + "type": "text", + "content": ". Then, we employ an off-the-shelf VLM, specifically BLIP-2 [28], to generate captions for the SVGs. Please find more samples from the MMSVG-2M dataset in Fig. 8, and instruction templates in Sec. A.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": "SVG Simplification is an essential procedure in SVG data cleansing, since the over-complicated XML grammars in the crawled SVG data will lead to ambiguities while representing basic shapes. To standardize training and evaluation, we simplify all SVG commands with atomic commands as shown in Tab. 1. Inspired by FIGR-8-SVG [11] and IconShop [57], we remove all attributes and simplify each SVG with five basic commands, including \"Move To\" (M), \"Line To\" (L), \"Cubic Bezier\" (C), \"Elliptical Arc\" (A), \"ClosePath\" (Z). The introduction of atomic commands further removes the ambiguities, as complex XML grammars can be approximated with the combination of several atomic commands. To efficiently produce a unified and less complex data structure, we utilize" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 123, + 118, + 491, + 381 + ], + "blocks": [ + { + "bbox": [ + 104, + 78, + 504, + 107 + ], + "lines": [ + { + "bbox": [ + 104, + 78, + 504, + 107 + ], + "spans": [ + { + "bbox": [ + 104, + 78, + 504, + 107 + ], + "type": "text", + "content": "Table 1: SVG Draw Commands. Draw commands used in this work along with their arguments and a visualization are listed. The start-position " + }, + { + "bbox": [ + 104, + 78, + 504, + 107 + ], + "type": "inline_equation", + "content": "(x_{1},y_{1})" + }, + { + "bbox": [ + 104, + 78, + 504, + 107 + ], + "type": "text", + "content": " is implicitly defined as the end-position of the preceding command." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 123, + 118, + 491, + 381 + ], + "lines": [ + { + "bbox": [ + 123, + 118, + 491, + 381 + ], + "spans": [ + { + "bbox": [ + 123, + 118, + 491, + 381 + ], + "type": "table", + "html": "
CommandArgumentsDescriptionVisualization
<SOP>‘Start-of-Path’ token.
M\n(MoveTo)x2,y2Move the cursor to the end-point (x2,y2) without drawing anything.(x2,y2)
L\n(LineTo)x2,y2Draw a line to the point (x2,y2).(x1,y1) (x2,y2)
C\n(Cubic\nBézier)qx1, qy1\nqx2, qy2\nx2,y2Draw a cubic Bézier curve with control points (qx1,qy1), (qx2,qy2) and end-point (x2,y2).(x1,y1) (qx2,qy2)\n(qx1,qy1) (x2,y2)
A\n(Elliptical\nArc)rx, ry\nφ, fA, fs\nx2,y2Draw an elliptical arc with radii rx and ry (semi-major and semi-minor axes), rotated by angle φ to the x-axis, and end-point (x2,y2). (x2,y2).fA=1/∑fS=1Ry\nφ(x1,y1) (x2,y2)
Z\n(ClosePath)Close the path by moving the cursor back to the path's starting position (x0,y0).(x0,y0) (x1,y1)
F (Fill)fillDraw the fill attribute of the path.
<EOS>‘End-of-SVG’ token.
", + "image_path": "5ba0a3aa19f972201f2521d49b389b0fd36c034b5c010cc63336a808ef94e72e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 393, + 504, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 393, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 393, + 504, + 427 + ], + "type": "text", + "content": "picosvg to remove grammars like \"group\" and \"transform\", and simplify the complex commands to atomic path commands. It is worth noting that atomic path commands are sufficient to represent complex SVGs shown in Fig. 1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 441, + 200, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 200, + 452 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 200, + 452 + ], + "type": "text", + "content": "3.2 MMSVG-Bench" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 462, + 504, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 462, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 104, + 462, + 504, + 572 + ], + "type": "text", + "content": "To compensate for the vacancy of standardized and open evaluation for SVG generation, we introduce MMSVG-Bench, a comprehensive benchmark for multi-modal SVG generation. We require the corresponding benchmark to be a sufficient verification whether a model is practically useful in real-world scenarios, and avoid the excessive similarity between the benchmark input data and training data as in traditional train/test splits. Therefore, we opt to generate the benchmark inputs with GPT-4o. Specifically, for Text-to-SVG task, we synthesize 150 textual prompts for each SVG subset (i.e. Icon and Illustration). For Image-to-SVG task, we synthesize extra 150 textual descriptions, and prompt GPT-4o to generate vector-style images with transparent backgrounds based on the above texts as the ground truth visual samples. We focus on both the visual quality and semantics of the generation results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 578, + 504, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 504, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 504, + 612 + ], + "type": "text", + "content": "Text-to-SVG requires a model to generate SVGs from text instructions. We measure the visual quality with Frechet Inception Distance (FID) [50], aesthetic appeal with Aesthetic score [43], text-SVG alignment with CLIP score [38], and Human Preference Scores (HPS) [58]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 617, + 506, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 617, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 617, + 506, + 662 + ], + "type": "text", + "content": "Image-to-SVG evaluates a model's ability to convert images into SVGs. To quantify the distance between the input and output SVG, we calculate the cosine similarity of DinoV2 features (DinoScore) [35], Structural Similarity Index (SSIM) [54], Learned Perceptual Image Patch Similarity (LPIPS) [66], and Mean Squared Error (MSE)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 667, + 506, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 506, + 701 + ], + "type": "text", + "content": "Character-Reference SVG Generation evaluates a model's ability to generate novel SVGs while keeping the profile of the characters depicted in the input image. Different from image-to-SVG, the model does not reconstruct, but generates a specific character SVG for the input image (see" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 121, + 712, + 303, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 712, + 303, + 723 + ], + "spans": [ + { + "bbox": [ + 121, + 712, + 303, + 723 + ], + "type": "text", + "content": "https://github.com/googlefonts/picosvg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 71, + 504, + 232 + ], + "blocks": [ + { + "bbox": [ + 107, + 71, + 504, + 232 + ], + "lines": [ + { + "bbox": [ + 107, + 71, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 107, + 71, + 504, + 232 + ], + "type": "image", + "image_path": "a091a9687594e4c4fa9988fcbe29d8f537e1b6f79b8e6716b558e58af1afa32c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 237, + 504, + 270 + ], + "lines": [ + { + "bbox": [ + 104, + 237, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 237, + 504, + 270 + ], + "type": "text", + "content": "Figure 2: Overview of OmniSVG. OmniSVG is built on a pre-trained vision-language model Qwen2.5-VL and incorporates an SVG tokenizer. The model tokenizes both text and image inputs as prefix tokens, while the SVG tokenizer encodes vector graphics commands into a unified representation space." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 277, + 504, + 300 + ], + "lines": [ + { + "bbox": [ + 104, + 277, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 504, + 300 + ], + "type": "text", + "content": "Fig. 5). We evaluate the alignment between input character images and generated SVGs by prompting GPT-4o [21] to generate a score ranging from 1 to 10, the higher the better. [15, 23, 17]" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 315, + 179, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 315, + 179, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 179, + 327 + ], + "type": "text", + "content": "4 OmniSVG" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 340, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 504, + 373 + ], + "type": "text", + "content": "To support end-to-end training for multi-modal SVG generation, OmniSVG parameterizes a series of atomic SVG path commands into a sequence before feeding into a pre-trained VLM with multi-modal instructions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": "SVG Tokenizer. As illustrated in Sec. 3, our MMSVG-2M dataset simplifies an SVG by removing all attributes and using five basic path commands (see Tab. 1). After the simplification, an SVG script " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": " is represented as the combination of " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": " paths, " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "G = \\{P_i\\}_{i=1}^M" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "P_i" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": "-th path containing " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "N_i" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": " commands, " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "P_i = \\{C_i^j\\}_{j=1}^{N_i}" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "C_i^j" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": "-th command in the " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": "-th path. Each command is represented as " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "C_i^j = (U_i^j, V_i^j)" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": ", containing both the command type identifier " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "U_i^j \\in \\{\\mathrm{M}, \\mathrm{L}, \\mathrm{C}, \\mathrm{A}, \\mathrm{Z}\\}" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": " and the corresponding location argument " + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "inline_equation", + "content": "V_i^j" + }, + { + "bbox": [ + 104, + 378, + 504, + 453 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "text", + "content": "To generate colored SVG contents, we assign special tokens for hex values to control the \"Fill\" (F) attribute, distinguishing it from the original SVG commands and coordinates. To this end, we are able to use a total six types of commands " + }, + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "inline_equation", + "content": "U_{i}^{j} \\in \\{\\mathrm{M}, \\mathrm{L}, \\mathrm{C}, \\mathrm{A}, \\mathrm{Z}, \\mathrm{F}\\}" + }, + { + "bbox": [ + 104, + 457, + 504, + 503 + ], + "type": "text", + "content": " to parameterize a colored SVG parameterization." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "text", + "content": "Specifically, our SVG tokenizer transforms SVG scripts " + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "inline_equation", + "content": "X_{s}" + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "text", + "content": " into an ordered SVG token sequence within the same representation space as the pre-trained VLM. Following IconShop [57], we flatten the layered structure of the SVG script by concatenating different paths into a single command sequence, where each path begins with the drawing commands followed by point coordinates. Therefore, each SVG sequence could be represented as a flattened sequence. As the generation identifier, we apply special tokens like " + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "inline_equation", + "content": "<\\mathrm{SOP}>" + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "inline_equation", + "content": "<\\mathrm{EOS}>" + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "text", + "content": " to the two ends of a SVG sequence, identifying the beginning and ending of a SVG sequence. We assign special tokens for each command type, i.e. " + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "inline_equation", + "content": "\\{\\mathrm{M}, \\mathrm{L}, \\mathrm{C}, \\mathrm{A}, \\bar{\\mathrm{Z}}, \\mathrm{F}\\}" + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "text", + "content": ". To shorten the length of the SVG sequence, we further merge the 2D point coordinates into one token with a mapping function: " + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "inline_equation", + "content": " \\rightarrow x \\times w + y" + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 104, + 508, + 506, + 628 + ], + "type": "text", + "content": " is the width of the image. The SVG sequence are then lifted into the same embedding space as the pre-trained VLM with a learnable embedding layer." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 633, + 506, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 633, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 104, + 633, + 506, + 679 + ], + "type": "text", + "content": "Model Architecture. OmniSVG adopts Qwen2.5-VL [1], an open-sourced VLM that excels at understanding intricate vision-text inputs, as its backbone (Fig. 2) to produce precise and compact SVG outputs. OmniSVG is trained to predict the SVG suffix tokens " + }, + { + "bbox": [ + 104, + 633, + 506, + 679 + ], + "type": "inline_equation", + "content": "(x_{s})" + }, + { + "bbox": [ + 104, + 633, + 506, + 679 + ], + "type": "text", + "content": " conditioned on the multi-modal instruction prefix tokens " + }, + { + "bbox": [ + 104, + 633, + 506, + 679 + ], + "type": "inline_equation", + "content": "(x_{c})" + }, + { + "bbox": [ + 104, + 633, + 506, + 679 + ], + "type": "text", + "content": " with the standard next-token prediction objective." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 228, + 693, + 505, + 724 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 693, + 505, + 724 + ], + "spans": [ + { + "bbox": [ + 228, + 693, + 505, + 724 + ], + "type": "interline_equation", + "content": "\\theta^ {*} = \\arg \\max _ {\\theta} \\prod_ {i = 1} ^ {L} P \\left(x _ {s, i} \\mid x _ {s, < i}, x _ {c}\\right) \\tag {1}", + "image_path": "f8339bb823ff6b5c65739fe1d241b3c850f32158ec08e4048b05299bf9b4bd5c.jpg" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 118, + 504, + 384 + ], + "blocks": [ + { + "bbox": [ + 104, + 70, + 504, + 111 + ], + "lines": [ + { + "bbox": [ + 104, + 70, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 104, + 70, + 504, + 111 + ], + "type": "text", + "content": "Table 2: Quantitative Evaluations. Quantitative results between OmniSVG and current state-of-the-art text-to-SVG and image-to-SVG baseline methods. The bold numbers and underlined numbers represent the best and second best performance respectively. Our OmniSVG model demonstrates superior performance compared SOTA SVG generation baselines." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 118, + 504, + 384 + ], + "lines": [ + { + "bbox": [ + 107, + 118, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 107, + 118, + 504, + 384 + ], + "type": "table", + "html": "
Evaluation DatasetMethods# TokensText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
MMSVG-IconVectorfusion [22]66.2k250.770.2404.760.237----
SVGDreamer [60]132.0k308.940.2074.260.221----
Chat2SVG [56]0.6k190.870.2994.410.247----
IconShop [57]2.0k213.280.2884.550.244----
LIVE [34]52.5k----0.9320.9430.1060.011
DiffVG [29]322.0k----0.9400.9540.0660.002
GPT-4o [21]0.3k----0.8600.7920.4030.124
StarVector(8B) [42]2.0k----0.8950.8810.2310.059
Vtracer52.4k----0.9930.9660.0390.002
OmniSVG(4B)3.8k137.400.2754.620.2440.9930.9500.0500.006
OmniSVG-L(8B)5.7k130.560.2764.600.2420.9220.8930.2350.040
MMSVG-IllustrationVectorfusion [22]66.1k253.940.1854.940.226----
SVGDreamer [60]132.0k419.700.2014.370.221----
Chat2SVG [56]1.0k210.030.2834.450.250----
IconShop [57]2.6k107.930.2334.460.224----
LIVE [34]52.2k----0.9350.9500.1110.008
DiffVG [29]322.0k----0.9450.9550.0650.001
GPT-4o [21]0.4k----0.8750.8540.3730.077
StarVector(8B) [42]2.6k----0.8770.9000.2380.046
Vtracer57.6k----0.9940.9660.0350.002
OmniSVG(4B)5.8k154.370.2264.560.2320.8990.9060.2370.034
OmniSVG-L(8B)6.9k138.420.2314.510.2320.9050.9070.2310.031
", + "image_path": "c4f306ac16842311f85921e1c2a3d115725cdb762eb9aa886166cb927204f427.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 402, + 192, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 402, + 192, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 192, + 415 + ], + "type": "text", + "content": "5 Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 427, + 504, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 427, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 427, + 504, + 462 + ], + "type": "text", + "content": "To validate the effectiveness of our method, we first introduce the baselines (Sec. 5.1). Then, we make quantitative comparisons with prior arts (Secs. 5.2 and 5.3) and conduct ablations (Sec. 5.4) to study the effectiveness of our design." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 475, + 170, + 485 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 170, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 170, + 485 + ], + "type": "text", + "content": "5.1Baselines" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 495, + 506, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 506, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 506, + 562 + ], + "type": "text", + "content": "For the text-to-SVG task, we compare our method with language-based (LLM-based) methods, including VectorFusion [22], SVGDreamer [60], Chat2SVG [56] and IconShop [57]. For image-to-SVG task, we compare our method with baseline methods across image vectorization and Multimodal Large Language Modeling approaches, including LIVE [34], DiffVG [29], StarVector [42], Vtracer [12] and GPT-4o [21] using the official implementations with the hyperparameters proposed by the authors, and apply their pre- and post-processing code as required." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 575, + 243, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 575, + 243, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 575, + 243, + 588 + ], + "type": "text", + "content": "5.2 Quantitative Comparisons" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 596, + 506, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 506, + 631 + ], + "type": "text", + "content": "We compare our OmniSVG with other baseline methods on the \"text-to-SVG\" and \"image-to-SVG\" tasks in our MMSVG-Bench. In addition to the metrics mentioned in Sec. 3, we also report the average token length (# tokens) of a generated SVG sample utilizing the Qwen2.5-VL [1] tokenizer." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 634, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 504, + 723 + ], + "type": "text", + "content": "As shown in Tab. 2, OmniSVG demonstrates strong performance compared to state-of-the-art baselines in text-to-SVG generation, achieving superior FID scores and competitive CLIP score, aesthetic quality, and HPS. For image-to-SVG, OmniSVG also achieves competitive results with traditional vectorization methods, i.e. LIVE [34], DiffVG [29], and VTracer [12], but with a much shorter sequence length. When comparing to auto-regressive methods, i.e. GPT-4o [21] and StarVector [42], OmniSVG also achieves a superior performance across all metrics. The above results indicate that OmniSVG effectively balances the generation cost and the visual quality when generating SVGs according to multi-modal conditions." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 76, + 503, + 329 + ], + "blocks": [ + { + "bbox": [ + 107, + 76, + 503, + 329 + ], + "lines": [ + { + "bbox": [ + 107, + 76, + 503, + 329 + ], + "spans": [ + { + "bbox": [ + 107, + 76, + 503, + 329 + ], + "type": "image", + "image_path": "0b0a38efbb695a95d71553a06e7819b3d49df273d684e1a78144d9c2d90c71b6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 336, + 504, + 357 + ], + "lines": [ + { + "bbox": [ + 104, + 336, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 336, + 504, + 357 + ], + "type": "text", + "content": "Figure 3: Qualitative Comparison with SOTA Methods on Text-to-SVG Task. We compare the propose method with SOTA Text-to-SVG methods on our evaluation benchmarks, namely Icon and Illustration." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 385, + 231, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 385, + 231, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 231, + 396 + ], + "type": "text", + "content": "5.3 Qualitative Evaluations" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 409, + 506, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 409, + 506, + 520 + ], + "spans": [ + { + "bbox": [ + 104, + 409, + 506, + 520 + ], + "type": "text", + "content": "Text-to-SVG task. We compare our method with baseline approaches using seven distinct text prompts for the text-to-SVG task, as shown in Fig. 4. Optimization-based methods like SVGDreamer [60] and VectorFusion [22] require significant computation time due to their iterative optimization processes, which, while effective for refining SVG details, are computationally expensive. Auto-regressive methods, such as IconShop [57] and Chat2SVG [56], generate SVGs more quickly by leveraging pre-trained models but have notable limitations. IconShop produces monochrome SVGs, restricting its applicability, while Chat2SVG, though flexible, generates less detailed and semantically consistent SVGs in its first stage. Our OmniSVG consistently outperforms all baselines across various text prompts in generating high-fidelity SVGs with rich color, geometric accuracy, and the ability to handle complex visual cues." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 525, + 298, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 298, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 298, + 723 + ], + "type": "text", + "content": "Image-to-SVG Task. We compare our method with classical image vectorization approaches, including DiffVG [29], LIVE [34], and VLM-based methods GPT-4o [21], StarVector [42] and Vtracer [12] As shown in Fig. 4, our method outperforms these baselines in both quality and efficiency. Optimization-based methods like DiffVG and LIVE perform well on simple icons but struggle with complex images, often generating visual artifacts. The GPT-4o model, while capable of generating SVGs for complex images, is limited to icon-level outputs and cannot handle detailed illustrations. StarVector excels at simple icons but fails to produce accurate SVGs for more intricate images, highlighting its limited generalization capability. Vtracer is an image processing algorithm designed to convert raster images into SVGs. In contrast, OmniSVG effi" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 312, + 529, + 492, + 694 + ], + "blocks": [ + { + "bbox": [ + 312, + 529, + 492, + 694 + ], + "lines": [ + { + "bbox": [ + 312, + 529, + 492, + 694 + ], + "spans": [ + { + "bbox": [ + 312, + 529, + 492, + 694 + ], + "type": "image", + "image_path": "9cce668291ae3db38c36dcf3597d0b593fc88ffc1dd7c1c70776e983a717947f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 695, + 504, + 715 + ], + "lines": [ + { + "bbox": [ + 302, + 695, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 302, + 695, + 504, + 715 + ], + "type": "text", + "content": "Figure 5: Generated SVG with Character-Reference (CRef) by OmniSVG." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 76, + 500, + 327 + ], + "blocks": [ + { + "bbox": [ + 109, + 76, + 500, + 327 + ], + "lines": [ + { + "bbox": [ + 109, + 76, + 500, + 327 + ], + "spans": [ + { + "bbox": [ + 109, + 76, + 500, + 327 + ], + "type": "image", + "image_path": "1848b53d51571e79abf00b6041fd54b00a12f7df7b9c077a30c48675555e314e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 332, + 504, + 354 + ], + "lines": [ + { + "bbox": [ + 104, + 332, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 332, + 504, + 354 + ], + "type": "text", + "content": "Figure 4: Qualitative Comparison with SOTA Methods on Image-to-SVG Task. We compare the propose method with SOTA Image-to-SVG methods on our evaluation benchmarks." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 376, + 506, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 506, + 433 + ], + "type": "text", + "content": "ciently converts a wide range of images, from icons to complex illustrations and character images, into high-quality, editable SVGs. This superior performance in handling diverse visual cues distinguishes OmniSVG from traditional vectorization methods. Additional visual results can be found in Fig. 12. We provide more detailed discussions with existing methods, particularly the recent works LLM4SVG [59] and StarVector [42], in the Sec. D." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 504, + 472 + ], + "type": "text", + "content": "Character-Reference SVG generation task. As shown in Fig. 5, by training on MMSVG-Character with natural character image and SVG pair data, OmniSVG is capable of generating character SVGs through image references." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 486, + 200, + 497 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 486, + 200, + 497 + ], + "spans": [ + { + "bbox": [ + 105, + 486, + 200, + 497 + ], + "type": "text", + "content": "5.4 Ablation studies" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 506, + 506, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 506, + 551 + ], + "type": "text", + "content": "Effectiveness of SVG Parameterization. We present a comprehensive comparison among different SVG parameterization strategy with the traditional non-parameterized methods for SVG representation in large language models. We ablates on the parameterization on both coordinate and color attributes of the SVG." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 555, + 504, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 601 + ], + "type": "text", + "content": "The results, shown in Tab. 3 and Fig. 6 demonstrate that parameterizing both coordinate and color attributes yields a better generation results under all metrics with the shortest token length. It further validates that the efficient token representation allows our method to generate complex SVGs with fewer computational resources. Additionally, qualitative results show that our method outperforms" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 122, + 655, + 488, + 721 + ], + "blocks": [ + { + "bbox": [ + 104, + 628, + 504, + 649 + ], + "lines": [ + { + "bbox": [ + 104, + 628, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 504, + 649 + ], + "type": "text", + "content": "Table 3: Quantitative Study on SVG Parameterization. Ablation studies on color parametrization (abbreviated as color param.) and coordinate parameterization (abbreviated as coord param.) are conducted." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 122, + 655, + 488, + 721 + ], + "lines": [ + { + "bbox": [ + 122, + 655, + 488, + 721 + ], + "spans": [ + { + "bbox": [ + 122, + 655, + 488, + 721 + ], + "type": "table", + "html": "
MethodsText-to-SVGImage-to-SVG# Tokens
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
w/o param.218.760.1853.430.1380.7410.7180.3150.18218.5k
w/o coordinate param.193.420.2163.900.1690.8260.8090.2480.11910.2k
w/o color param.167.280.2694.310.2110.8950.8790.1790.0536.3k
OmniSVG(4B)145.890.3084.590.2380.9460.9280.1380.0204.8k
", + "image_path": "5016e0dda01fc72e711a0637f464fa6dca63956af554e7623e81300c9c91050a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 115, + 94, + 492, + 159 + ], + "blocks": [ + { + "bbox": [ + 108, + 78, + 500, + 89 + ], + "lines": [ + { + "bbox": [ + 108, + 78, + 500, + 89 + ], + "spans": [ + { + "bbox": [ + 108, + 78, + 500, + 89 + ], + "type": "text", + "content": "Table 4: Ablation of the Model Size. As the model size grows, the generated samples are of higher quality." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 115, + 94, + 492, + 159 + ], + "lines": [ + { + "bbox": [ + 115, + 94, + 492, + 159 + ], + "spans": [ + { + "bbox": [ + 115, + 94, + 492, + 159 + ], + "type": "table", + "html": "
MethodsInputSizeText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIS↓MSE↓
FLAN-T5-Base[10]Text223M198.480.1583.380.085----
FLAN-T5-Large[10]Text770M175.240.2083.920.142----
FLAN-T5-xl[10]Text3B160.280.2584.310.192----
blip2-flan-t5-xl[28]Text/Image3.94B152.110.2354.480.2150.8980.8910.2550.041
OmniSVG(4B)Text/Image3.7B145.890.3084.590.2380.9460.9280.1380.020
", + "image_path": "cbd46bc6376976fecf43d9880323bc6e5b2f57aa205285f95fbabc54caac45f6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 167, + 504, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 504, + 201 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 504, + 201 + ], + "type": "text", + "content": "others, particularly as SVG complexity increases. The non-parameterization method fails to generate SVGs for complex images. These findings underscore the effectiveness of our full parameterization strategy in balancing performance and resource efficiency for SVG generation tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 205, + 506, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 506, + 262 + ], + "type": "text", + "content": "Ablation studies on model size. To analyze whether training a larger model benefits SVG generation, we evaluate OmniSVG base models with different sizes on the MMSVG-2M dataset in Tab. 4. We evaluate OmniSVG with base models of varying sizes on the MMSVG-2M dataset in Tab. 4 by progressively scaling up the model size. The results show that as the model size grows, we can generate SVG samples with a better quality." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 116, + 293, + 492, + 355 + ], + "blocks": [ + { + "bbox": [ + 226, + 277, + 383, + 288 + ], + "lines": [ + { + "bbox": [ + 226, + 277, + 383, + 288 + ], + "spans": [ + { + "bbox": [ + 226, + 277, + 383, + 288 + ], + "type": "text", + "content": "Table 5: Ablation on VLM architecture." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 116, + 293, + 492, + 355 + ], + "lines": [ + { + "bbox": [ + 116, + 293, + 492, + 355 + ], + "spans": [ + { + "bbox": [ + 116, + 293, + 492, + 355 + ], + "type": "table", + "html": "
Vision ModelLanguage ModelText-to-SVGImage-to-SVG
FID↓CLIP↑Aesthetic↑HPS↑DINO↑SSIM↑LPIPS↓MSE↓
CLIPQwen2.5185.310.2494.520.2150.8670.8560.2670.058
VQGANQwen2.5198.740.2344.490.2030.8390.8280.2950.071
Qwen2.5-VL-3B-Instruct145.890.3084.590.2380.9460.9280.1380.020
Qwen2.5-VL-7B-Instruct134.450.2544.560.2370.9140.9000.2330.036
", + "image_path": "afdb539875426e118405d4db2d308c5bb36d4e628c45078b40a4de7fcddde2a5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 365, + 504, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 365, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 365, + 504, + 399 + ], + "type": "text", + "content": "Ablation Studies on the VLM Architecture. To evaluate the effectiveness of the VLM architecture, we conducted an ablation study replacing it with alternative LLM-based architectures incorporating image encoders such as CLIP ViT-B/32 [39], VQGAN [14], and Qwen2.5-VL [1]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 403, + 297, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 297, + 436 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 297, + 436 + ], + "type": "text", + "content": "The results in Tab. 5 show that Qwen2.5-VL consistently outperformed all alternatives under all evaluation metrics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 441, + 298, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 298, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 298, + 583 + ], + "type": "text", + "content": "User Study. We extract one-tenth of the samples from the evaluation dataset and conducted a user study with 15 participants to evaluate user preferences, vividness, and the alignment between text-to-SVG and image-to-SVG. Participants are asked to assess SVGs generated by different models based on 150 text descriptions and 150 image prompts, comparing the results generated using our method and baseline models. The results in Fig. 7 show that OmniSVG is widely preferred, with higher scores for vividness and superior semantic alignment with the input conditions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 599, + 188, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 599, + 188, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 188, + 611 + ], + "type": "text", + "content": "6 Conclusions" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 623, + 297, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 623, + 297, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 623, + 297, + 667 + ], + "type": "text", + "content": "Conclusions. We introduce OmniSVG, a unified framework for multimodal SVG generation that leverages pre-trained Vision-Language Models (VLMs). By parameterizing SVG com" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 667, + 506, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 506, + 722 + ], + "type": "text", + "content": "mands and coordinates as discrete tokens, OmniSVG efficiently decouples structural logic from geometry, addressing issues like \"coordinate hallucination\" while maintaining design expressiveness. Our method outperforms existing approaches in both quality and efficiency, offering high-quality, editable SVG across various design domains. Additionally, we proposed MMSVG-2M, a large-scale multimodal dataset with two million annotated SVG assets and a standardized evaluation protocol." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 310, + 413, + 495, + 628 + ], + "blocks": [ + { + "bbox": [ + 310, + 413, + 495, + 628 + ], + "lines": [ + { + "bbox": [ + 310, + 413, + 495, + 628 + ], + "spans": [ + { + "bbox": [ + 310, + 413, + 495, + 628 + ], + "type": "image", + "image_path": "f4018d8e6b84d4441104ddcc05ee39760d0faf2a95c81a23d8f047c80b19142b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 632, + 497, + 643 + ], + "lines": [ + { + "bbox": [ + 309, + 632, + 497, + 643 + ], + "spans": [ + { + "bbox": [ + 309, + 632, + 497, + 643 + ], + "type": "text", + "content": "Figure 6: Qualitative Study on Parametrization." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "Extensive experiments show that OmniSVG surpasses prior SVG generation methods in various conditional generation tasks, highlighting its potential for integration into professional SVG design workflows." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 308, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 308, + 220 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 308, + 220 + ], + "type": "text", + "content": "Limitations and Future Work. During inference, OmniSVG generates tens of thousands of tokens for complex samples, which inevitably leads to a considerable generation time. OmniSVG is only bounded by vector style image prompt and fails on natural images. As for future work, recent endeavors on multi-token prediction [15, 2] and KV-cache compression [68, 3] provide a promising way to save the generation cost. Additionally, the auto-regressive nature of OmniSVG also unlocks future" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 320, + 123, + 500, + 206 + ], + "blocks": [ + { + "bbox": [ + 313, + 107, + 504, + 119 + ], + "lines": [ + { + "bbox": [ + 313, + 107, + 504, + 119 + ], + "spans": [ + { + "bbox": [ + 313, + 107, + 504, + 119 + ], + "type": "text", + "content": "Figure 7: User Study of OmniSVG and baselines." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 320, + 123, + 500, + 206 + ], + "lines": [ + { + "bbox": [ + 320, + 123, + 500, + 206 + ], + "spans": [ + { + "bbox": [ + 320, + 123, + 500, + 206 + ], + "type": "table", + "html": "
MethodPreference ↑Vividity↑Alignment↑
Vectorfusion [22]355876
SVGDreamer [60]416579
Chat2SVG [56]556186
IconShop [57]795775
GPT-4o [21]385480
StarVector(8B) [42]378168
DiffVG [29]887696
LIVE [34]867095
OmniSVG968898
", + "image_path": "9d402d8c09bf0bd876bc6d4063630a82a87d9bd5d4ba548e776d476dfc8b87fc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 220, + 504, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 243 + ], + "type": "text", + "content": "opportunities for in-context learning [67, 69, 47], chain-of-thought reasoning [55, 16], and multi-turn interleaved generation [20, 31], thereby providing a more precise user control." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 258, + 208, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 258, + 208, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 208, + 272 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 281, + 506, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 506, + 316 + ], + "type": "text", + "content": "This work is in part supported by National Key R&D Program of China (Grant No. 2022ZD0160103), National Natural Science Foundation of China (Grant No. 62276067), and National Natural Science Foundation of China (Grant No. 62472104)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 320, + 494, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 320, + 494, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 320, + 494, + 333 + ], + "type": "text", + "content": "The computations in this research were performed using the CFFF platform of Fudan University." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 88, + 505, + 721 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 111, + 88, + 505, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 88, + 505, + 111 + ], + "spans": [ + { + "bbox": [ + 111, + 88, + 505, + 111 + ], + "type": "text", + "content": "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 116, + 505, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 116, + 505, + 137 + ], + "spans": [ + { + "bbox": [ + 111, + 116, + 505, + 137 + ], + "type": "text", + "content": "[2] Tianle Cai, Yuhong Li, Zhengyang Geng, Hongwu Peng, and Tri Dao. Medusa: Simple framework for accelerating IIm generation with multiple decoding heads. Retrieved December, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 144, + 505, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 144, + 505, + 175 + ], + "spans": [ + { + "bbox": [ + 111, + 144, + 505, + 175 + ], + "type": "text", + "content": "[3] Zefan Cai, Yichi Zhang, Bofei Gao, Yuliang Liu, Tianyu Liu, Keming Lu, Wayne Xiong, Yue Dong, Baobao Chang, Junjie Hu, et al. Pyramidkv: Dynamic kv cache compression based on pyramidal information tunneling. arXiv preprint arXiv:2406.02069, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 182, + 505, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 182, + 505, + 203 + ], + "spans": [ + { + "bbox": [ + 111, + 182, + 505, + 203 + ], + "type": "text", + "content": "[4] Alexandre Carlier, Martin Danelljan, Alexandre Alahi, and Radu Timofte. Deepsvg: A hierarchical generative network for vector graphics animation. NeurIPS, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 209, + 505, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 209, + 505, + 231 + ], + "spans": [ + { + "bbox": [ + 111, + 209, + 505, + 231 + ], + "type": "text", + "content": "[5] Sijin Chen, Xin Chen, Anqi Pang, Xianfang Zeng, Wei Cheng, Yijun Fu, Fukun Yin, Billzb Wang, Jingyi Yu, Gang Yu, et al. Meshxl: Neural coordinate field for generative 3d foundation models. NeurIPS, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 236, + 505, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 236, + 505, + 267 + ], + "spans": [ + { + "bbox": [ + 111, + 236, + 505, + 267 + ], + "type": "text", + "content": "[6] Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. L13da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In CVPR, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 274, + 505, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 274, + 505, + 296 + ], + "spans": [ + { + "bbox": [ + 111, + 274, + 505, + 296 + ], + "type": "text", + "content": "[7] Zehao Chen and Rong Pan. Svgbuilder: Component-based colored graphic generation with text-guided autoregressive transformers. arXiv preprint arXiv:2412.10488, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 301, + 505, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 301, + 505, + 333 + ], + "spans": [ + { + "bbox": [ + 111, + 301, + 505, + 333 + ], + "type": "text", + "content": "[8] Wei Cheng, Ruixiang Chen, Siming Fan, Wanqi Yin, Keyu Chen, Zhongang Cai, Jingbo Wang, Yang Gao, Zhengming Yu, Zhengyu Lin, et al. Dna-rendering: A diverse neural actor repository for high-fidelity human-centric rendering. In ICCV, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 339, + 505, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 339, + 505, + 370 + ], + "spans": [ + { + "bbox": [ + 111, + 339, + 505, + 370 + ], + "type": "text", + "content": "[9] Wei Cheng, Su Xu, Jingtan Piao, Chen Qian, Wayne Wu, Kwan-Yee Lin, and Hongsheng Li. Generalizable neural performer: Learning robust radiance fields for human novel view synthesis. arXiv preprint arXiv:2204.11798, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 376, + 505, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 376, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 505, + 407 + ], + "type": "text", + "content": "[10] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instruction-finetuned language models. JMLR, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 414, + 505, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 414, + 505, + 436 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 505, + 436 + ], + "type": "text", + "content": "[11] Louis Clouatre and Marc Demers. Figr: Few-shot image generation with reptile. arXiv preprint arXiv:1901.02199, 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 441, + 433, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 433, + 453 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 433, + 453 + ], + "type": "text", + "content": "[12] Vision Cortex. Vtracer. https://www.visioncortex.org/vtracer-docs, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 460, + 505, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 505, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 505, + 480 + ], + "type": "text", + "content": "[13] Nyanko Devs. Danbooru2023: A large-scale crowdsourced and tagged anime illustration dataset. Hugging Face, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 487, + 505, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 487, + 505, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 505, + 509 + ], + "type": "text", + "content": "[14] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In CVPR, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 515, + 505, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 515, + 505, + 536 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 505, + 536 + ], + "type": "text", + "content": "[15] Fabian Gloeckle, Badr Youbi Idrissi, Baptiste Rozière, David Lopez-Paz, and Gabriel Synnaeve. Better & faster large language models via multi-token prediction. arXiv preprint arXiv:2404.19737, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 542, + 505, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 542, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 542, + 505, + 574 + ], + "type": "text", + "content": "[16] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 580, + 505, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 580, + 505, + 602 + ], + "spans": [ + { + "bbox": [ + 105, + 580, + 505, + 602 + ], + "type": "text", + "content": "[17] Han Guo, Songlin Yang, Tarushii Goel, Eric P Xing, Tri Dao, and Yoon Kim. Log-linear attention. arXiv preprint arXiv:2506.04761, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 608, + 444, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 608, + 444, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 608, + 444, + 620 + ], + "type": "text", + "content": "[18] David Ha and Douglas Eck. A neural representation of sketch drawings. In ICLR, 2018." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 625, + 505, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 625, + 505, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 505, + 647 + ], + "type": "text", + "content": "[19] Teng Hu, Ran Yi, Baihong Qian, Jiangning Zhang, Paul L Rosin, and Yu-Kun Lai. Supersvg: Superpixel-based scalable vector graphics synthesis. In CVPR, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 653, + 505, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 653, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 505, + 685 + ], + "type": "text", + "content": "[20] Minbin Huang, Yanxin Long, Xinchi Deng, Ruihang Chu, Jiangfeng Xiong, Xiaodan Liang, Hong Cheng, Qinglin Lu, and Wei Liu. Dialoggen: Multi-modal interactive dialogue system for multi-turn text-to-image generation. arXiv preprint arXiv:2403.08857, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 691, + 505, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 691, + 505, + 721 + ], + "spans": [ + { + "bbox": [ + 105, + 691, + 505, + 721 + ], + "type": "text", + "content": "[21] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "[22] Ajay Jain, Amber Xie, and Pieter Abbeel. Vectorfusion: Text-to-sv by abstracting pixel-based diffusion models. In CVPR, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 101, + 505, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 101, + 505, + 132 + ], + "spans": [ + { + "bbox": [ + 106, + 101, + 505, + 132 + ], + "type": "text", + "content": "[23] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast autoregressive transformers with linear attention. In International conference on machine learning, pages 5156-5165. PMLR, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 139, + 505, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 139, + 505, + 171 + ], + "spans": [ + { + "bbox": [ + 107, + 139, + 505, + 171 + ], + "type": "text", + "content": "[24] Denis Kocetkov, Raymond Li, Loubna Ben Allal, Jia Li, Chenghao Mou, Carlos Muñoz Ferrandis, Yacine Jernite, Margaret Mitchell, Sean Hughes, Thomas Wolf, et al. The stack: 3 tb of permissively licensed source code. arXiv preprint arXiv:2211.15533, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 178, + 318, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 178, + 318, + 190 + ], + "spans": [ + { + "bbox": [ + 107, + 178, + 318, + 190 + ], + "type": "text", + "content": "[25] Kozea. Cairosvg. https://cairosvg.org/, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 196, + 439, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 196, + 439, + 208 + ], + "spans": [ + { + "bbox": [ + 107, + 196, + 439, + 208 + ], + "type": "text", + "content": "[26] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 214, + 506, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 214, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 107, + 214, + 506, + 236 + ], + "type": "text", + "content": "[27] Black Forest Labs. Flux.1Redux-dev. https://huggingface.co/black-forest-labs/FLUX.1-Redux-dev, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 243, + 504, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 243, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 107, + 243, + 504, + 264 + ], + "type": "text", + "content": "[28] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 271, + 504, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 271, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 107, + 271, + 504, + 293 + ], + "type": "text", + "content": "[29] Tzu-Mao Li, Michal Lukáč, Gharbi Michael, and Jonathan Ragan-Kelley. Differentiable vector graphics rasterization for editing and learning. SIGGRAPH Asia, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 300, + 505, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 300, + 505, + 312 + ], + "spans": [ + { + "bbox": [ + 107, + 300, + 505, + 312 + ], + "type": "text", + "content": "[30] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 318, + 505, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 318, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 107, + 318, + 505, + 350 + ], + "type": "text", + "content": "[31] Ziyu Liu, Tao Chu, Yuhang Zang, Xilin Wei, Xiaoyi Dong, Pan Zhang, Zijian Liang, Yuanjun Xiong, Yu Qiao, Dahua Lin, et al. Mmdu: A multi-turn multi-image dialog understanding benchmark and instruction-tuning dataset for lvlms. arXiv preprint arXiv:2406.11833, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 357, + 504, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 357, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 107, + 357, + 504, + 378 + ], + "type": "text", + "content": "[32] Raphael Gontijo Lopes, David Ha, Douglas Eck, and Jonathon Shlens. A learned representation for scalable vector graphics. In CVPR, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 384, + 504, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 384, + 504, + 406 + ], + "spans": [ + { + "bbox": [ + 107, + 384, + 504, + 406 + ], + "type": "text", + "content": "[33] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 413, + 505, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 413, + 505, + 435 + ], + "spans": [ + { + "bbox": [ + 107, + 413, + 505, + 435 + ], + "type": "text", + "content": "[34] Xu Ma, Yuqian Zhou, Xingqian Xu, Bin Sun, Valerii Filev, Nikita Orlov, Yun Fu, and Humphrey Shi. Towards layer-wise image vectorization. In CVPR, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 441, + 505, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 441, + 505, + 473 + ], + "spans": [ + { + "bbox": [ + 107, + 441, + 505, + 473 + ], + "type": "text", + "content": "[35] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 479, + 505, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 479, + 505, + 511 + ], + "spans": [ + { + "bbox": [ + 107, + 479, + 505, + 511 + ], + "type": "text", + "content": "[36] Dongwei Pan, Long Zhuo, Jingtan Piao, Huiwen Luo, Wei Cheng, Yuxin Wang, Siming Fan, Shengqi Liu, Lei Yang, Bo Dai, et al. Renderme-360: a large digital asset library and benchmarks towards high-fidelity head avatars. NeurIPS, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 518, + 505, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 518, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 107, + 518, + 505, + 540 + ], + "type": "text", + "content": "[37] Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. In ICLR, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 547, + 505, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 547, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 107, + 547, + 505, + 578 + ], + "type": "text", + "content": "[38] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 586, + 505, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 586, + 505, + 616 + ], + "spans": [ + { + "bbox": [ + 107, + 586, + 505, + 616 + ], + "type": "text", + "content": "[39] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 624, + 505, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 624, + 505, + 655 + ], + "spans": [ + { + "bbox": [ + 107, + 624, + 505, + 655 + ], + "type": "text", + "content": "[40] Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, and Yuxiong He. Zero: Memory optimizations toward training trillion parameter models. In SC20: International Conference for High Performance Computing, Networking, Storage and Analysis. IEEE, 2020." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 662, + 505, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 662, + 505, + 683 + ], + "spans": [ + { + "bbox": [ + 107, + 662, + 505, + 683 + ], + "type": "text", + "content": "[41] Pradyumna Reddy, Michael Gharbi, Michal Lukac, and Niloy J Mitra. Im2vec: Synthesizing vector graphics without vector supervision. In CVPR, 2021." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 691, + 505, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 691, + 505, + 721 + ], + "spans": [ + { + "bbox": [ + 107, + 691, + 505, + 721 + ], + "type": "text", + "content": "[42] Juan A Rodriguez, Shubham Agarwal, Issam H Laradji, Pau Rodriguez, David Vazquez, Christopher Pal, and Marco Pedersoli. Starvector: Generating scalable vector graphics code from images. arXiv preprint arXiv:2312.11556, 2023." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "[43] Christoph Schuhmann. Improved aesthetic predictor. https://github.com/christophschuhmann/improved-aesthetic-predictor, 2022." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 99, + 506, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 99, + 506, + 121 + ], + "spans": [ + { + "bbox": [ + 106, + 99, + 506, + 121 + ], + "type": "text", + "content": "[44] I-Chao Shen and Bing-Yu Chen. Clipgen: A deep generative model for clipart vectorization and synthesis. TVCG, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 127, + 506, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 127, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 107, + 127, + 506, + 149 + ], + "type": "text", + "content": "[45] Yiren Song, Xuning Shao, Kang Chen, Weidong Zhang, Zhongliang Jing, and Minzhe Li. Clipvg: Text-guided image manipulation using differentiable vector graphics. In AAAI, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 156, + 504, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 156, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 107, + 156, + 504, + 177 + ], + "type": "text", + "content": "[46] Hao Su, Xuefeng Liu, Jianwei Niu, Jiahe Cui, Ji Wan, Xinghao Wu, and Nana Wang. Marvel: Raster gray-level manga vectorization via primitive-wise deep reinforcement learning. TCSVT, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 182, + 505, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 182, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 106, + 182, + 505, + 213 + ], + "type": "text", + "content": "[47] Quan Sun, Yufeng Cui, Xiaosong Zhang, Fan Zhang, Qiying Yu, Yueze Wang, Yongming Rao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Generative multimodal models are in-context learners. In CVPR, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 220, + 504, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 220, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 107, + 220, + 504, + 251 + ], + "type": "text", + "content": "[48] Zecheng Tang, Chenfei Wu, Zekai Zhang, Mingheng Ni, Shengming Yin, Yu Liu, Zhengyuan Yang, Lijuan Wang, Zicheng Liu, Juntao Li, et al. Strokenuwa: Tokenizing strokes for vector graphic synthesis. arXiv preprint arXiv:2401.17093, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 258, + 504, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 258, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 107, + 258, + 504, + 289 + ], + "type": "text", + "content": "[49] Zecheng Tang, Chenfei Wu, Zekai Zhang, Mingheng Ni, Shengming Yin, Yu Liu, Zhengyuan Yang, Lijuan Wang, Zicheng Liu, Juntao Li, et al. Strokenuwa: Tokenizing strokes for vector graphic synthesis. arXiv preprint arXiv:2401.17093, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 295, + 506, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 295, + 506, + 317 + ], + "spans": [ + { + "bbox": [ + 107, + 295, + 506, + 317 + ], + "type": "text", + "content": "[50] Lucas Theis, Aäron van den Oord, and Matthias Bethge. A note on the evaluation of generative models. arXiv preprint arXiv:1511.01844, 2015." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 323, + 504, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 323, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 107, + 323, + 504, + 344 + ], + "type": "text", + "content": "[51] Yingtao Tian and David Ha. Modern evolution strategies for creativity: Fitting concrete images and abstract concepts. In Artificial Intelligence in Music, Sound, Art and Design, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 350, + 504, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 350, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 107, + 350, + 504, + 381 + ], + "type": "text", + "content": "[52] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 388, + 504, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 388, + 504, + 409 + ], + "spans": [ + { + "bbox": [ + 107, + 388, + 504, + 409 + ], + "type": "text", + "content": "[53] Yizhi Wang and Zhouhui Lian. Deepvecfont: synthesizing high-quality vector fonts via dual-modality learning. TOG, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 415, + 504, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 415, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 107, + 415, + 504, + 437 + ], + "type": "text", + "content": "[54] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. TIP, 2004." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 443, + 506, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 443, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 107, + 443, + 506, + 464 + ], + "type": "text", + "content": "[55] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. NeurIPS, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 471, + 504, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 471, + 504, + 492 + ], + "spans": [ + { + "bbox": [ + 107, + 471, + 504, + 492 + ], + "type": "text", + "content": "[56] Ronghuan Wu, Wanchao Su, and Jing Liao. Chat2svg: Vector graphics generation with large language models and image diffusion models. arXiv preprint arXiv:2411.16602, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 498, + 504, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 498, + 504, + 519 + ], + "spans": [ + { + "bbox": [ + 107, + 498, + 504, + 519 + ], + "type": "text", + "content": "[57] Ronghuan Wu, Wanchao Su, Kede Ma, and Jing Liao. Iconshop: Text-guided vector icon synthesis with autoregressive transformers. TOG, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 525, + 504, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 525, + 504, + 547 + ], + "spans": [ + { + "bbox": [ + 107, + 525, + 504, + 547 + ], + "type": "text", + "content": "[58] Xiaoshi Wu, Keqiang Sun, Feng Zhu, Rui Zhao, and Hongsheng Li. Human preference score: Better aligning text-to-image models with human preference. In ICCV, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 553, + 504, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 553, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 107, + 553, + 504, + 574 + ], + "type": "text", + "content": "[59] Ximing Xing, Juncheng Hu, Guotao Liang, Jing Zhang, Dong Xu, and Qian Yu. Empowering llms to understand and generate complex vector graphics. arXiv preprint arXiv:2412.11102, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 581, + 504, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 581, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 107, + 581, + 504, + 602 + ], + "type": "text", + "content": "[60] Ximing Xing, Haitao Zhou, Chuang Wang, Jing Zhang, Dong Xu, and Qian Yu. SVGdreamer: Text guided. \nsvg generation with diffusion model. In CVPR, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 609, + 504, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 609, + 504, + 630 + ], + "spans": [ + { + "bbox": [ + 107, + 609, + 504, + 630 + ], + "type": "text", + "content": "[61] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 635, + 504, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 635, + 504, + 657 + ], + "spans": [ + { + "bbox": [ + 107, + 635, + 504, + 657 + ], + "type": "text", + "content": "[62] Yiying Yang, Fukun Yin, Wen Liu, Jiayuan Fan, Xin Chen, Gang Yu, and Tao Chen. Pm-inr: Prior-rich multi-modal implicit large-scale scene neural representation. In AAAI, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 663, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 663, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 107, + 663, + 504, + 685 + ], + "type": "text", + "content": "[63] Fukun Yin, Xin Chen, Chi Zhang, Biao Jiang, Zibo Zhao, Wen Liu, Gang Yu, and Tao Chen. Shapept: 3d shape generation with a unified multi-modal language model. TMM, 2025." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 107, + 691, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 691, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 107, + 691, + 506, + 721 + ], + "type": "text", + "content": "[64] Alex Young, Bei Chen, Chao Li, Chengen Huang, Ge Zhang, Guanwei Zhang, Heng Li, Jiangcheng Zhu, Jianqun Chen, Jing Chang, et al. Yi: Open foundation models by 01. ai. arXiv preprint arXiv:2403.04652, 2024." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 244 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 94 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 94 + ], + "type": "text", + "content": "[65] Zhengming Yu, Wei Cheng, Xian Liu, Wayne Wu, and Kwan-Yee Lin. Monohuman: Animatable human neural field from monocular video. In CVPR, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 100, + 504, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 100, + 504, + 122 + ], + "spans": [ + { + "bbox": [ + 106, + 100, + 504, + 122 + ], + "type": "text", + "content": "[66] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 129, + 504, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 129, + 504, + 149 + ], + "spans": [ + { + "bbox": [ + 107, + 129, + 504, + 149 + ], + "type": "text", + "content": "[67] Yuanhan Zhang, Kaiyang Zhou, and Ziwei Liu. What makes good examples for visual in-context learning? NeurIPS, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 156, + 504, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 156, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 156, + 504, + 186 + ], + "type": "text", + "content": "[68] Xiabin Zhou, Wenbin Wang, Minyan Zeng, Jiaxian Guo, Xuebo Liu, Li Shen, Min Zhang, and Liang Ding. Dynamicky: Task-aware adaptive kv cache compression for long context llms. arXiv preprint arXiv:2412.14838, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 194, + 504, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 194, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 106, + 194, + 504, + 217 + ], + "type": "text", + "content": "[69] Yucheng Zhou, Xiang Li, Qianning Wang, and Jianbing Shen. Visual in-context learning for large vision-language models. arXiv preprint arXiv:2402.11574, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 222, + 504, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 222, + 504, + 244 + ], + "spans": [ + { + "bbox": [ + 107, + 222, + 504, + 244 + ], + "type": "text", + "content": "[70] Bocheng Zou, Mu Cai, Jianrui Zhang, and Yong Jae Lee. Vgbench: A comprehensive benchmark of vector graphics understanding and generation for large language models. In EMNLP, 2024." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 70, + 181, + 89 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 70, + 181, + 89 + ], + "spans": [ + { + "bbox": [ + 104, + 70, + 181, + 89 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 102, + 346, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 102, + 346, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 346, + 116 + ], + "type": "text", + "content": "A Additional Details of MMSVG-2M dataset" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 128, + 274, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 128, + 274, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 128, + 274, + 140 + ], + "type": "text", + "content": "A.1 Samples of MMSVG-2M Dataset" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 148, + 506, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 148, + 506, + 216 + ], + "spans": [ + { + "bbox": [ + 104, + 148, + 506, + 216 + ], + "type": "text", + "content": "We visualize samples of our MMSVG-2M dataset in Fig. 8. In our MMSVG-2M dataset, " + }, + { + "bbox": [ + 104, + 148, + 506, + 216 + ], + "type": "inline_equation", + "content": "55\\%" + }, + { + "bbox": [ + 104, + 148, + 506, + 216 + ], + "type": "text", + "content": " of the SVG samples belongs to the MMSVG-Icon, " + }, + { + "bbox": [ + 104, + 148, + 506, + 216 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 104, + 148, + 506, + 216 + ], + "type": "text", + "content": " belongs to the MMSVG-Illustration, and the rest " + }, + { + "bbox": [ + 104, + 148, + 506, + 216 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 148, + 506, + 216 + ], + "type": "text", + "content": " belongs to the MMSVG-Character. Among the SVG samples within the MMSVG-Character category, half of them comes from Freepik, while another half is generated by our data creation pipeline. We also collect image-SVG pairs for the character-reference SVG generation tasks during the generation process." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 127, + 246, + 481, + 299 + ], + "blocks": [ + { + "bbox": [ + 104, + 224, + 504, + 245 + ], + "lines": [ + { + "bbox": [ + 104, + 224, + 504, + 245 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 504, + 245 + ], + "type": "text", + "content": "Table 6: Data Statistics for MMSVG-2M. Our MMSVG-2M consists of 1.1 million SVG icons, 0.5 million SVG illustrations, and 0.4 million SVG anime characters." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 246, + 481, + 299 + ], + "lines": [ + { + "bbox": [ + 127, + 246, + 481, + 299 + ], + "spans": [ + { + "bbox": [ + 127, + 246, + 481, + 299 + ], + "type": "table", + "html": "
DatasetTrainValTotalSourceToken Length
MMSVG-Icon990k110k1,100kIconfont2.2k ± 0.9k
MMSVG-Illustration450k50k500kIconScout8.1k ± 3.3k
MMSVG-Character350k50k400kFreepik & generated28k ± 7.3k
", + "image_path": "041793a88ae441883cee99e8fcd32d85151503f775b754e305d890cf08cd57b0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 318, + 288, + 330 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 318, + 288, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 288, + 330 + ], + "type": "text", + "content": "A.2 SVG-Image-Text Pairs Construction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 504, + 384 + ], + "type": "text", + "content": "Our MMSVG-2M dataset comprises two million SVG samples with the corresponding rasterized images. We generate captions on the rasterized images with BLIP-2 [28], thereby providing textual descriptions that enable us to fine-tune our model to follow these instructions. We use CairoSVG [25] for rasterization and remove samples that produced completely white images." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 387, + 506, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 387, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 387, + 506, + 433 + ], + "type": "text", + "content": "Annotation. We employ an off-the-shelf VLM, specifically BLIP-2 [28], to generate SVG captions with the prompt below. To reduce hallucinations, we drop the samples with CLIP scores less than 30. We also visualize the distribution annotated keywords of MMSVG-2M dataset in Fig. 10 with word cloud format. And the instruction template for annotation is shown in Tab. 7." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 436, + 506, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 436, + 506, + 482 + ], + "spans": [ + { + "bbox": [ + 104, + 436, + 506, + 482 + ], + "type": "text", + "content": "Instruction templates. MMSVGBench provides three tasks, including text-to-SVG task, image-to-SVG task and character-reference SVG generation task. Each task needs different instruction templates. For the text and image conditioning SVG generation, we provide the input text or image with VLM architecture. For character-reference SVG generation, we provide the natural character" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 504, + 259, + 515 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 504, + 259, + 515 + ], + "spans": [ + { + "bbox": [ + 121, + 504, + 259, + 515 + ], + "type": "text", + "content": "Instructions for Different Tasks" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 119, + 523, + 490, + 678 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 119, + 523, + 490, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 523, + 490, + 569 + ], + "spans": [ + { + "bbox": [ + 119, + 523, + 490, + 569 + ], + "type": "text", + "content": "- Employed BLIP2 for SVG Captioning: You are a helpful assistant. Your task is to describe this image in a single sentence, including the object, its color, and its overall arrangement. For example: \"Yellow cheers with glasses of alcohol drinks.\" / \"Heart emojis represent love on Valentine's Day.\"" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 119, + 578, + 490, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 578, + 490, + 601 + ], + "spans": [ + { + "bbox": [ + 119, + 578, + 490, + 601 + ], + "type": "text", + "content": "- Text-to-SVG: You are a helpful SVG Generation assistant, designed to generate SVG. We provide the text description as input, generate SVG based on the text." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 119, + 610, + 490, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 610, + 490, + 634 + ], + "spans": [ + { + "bbox": [ + 119, + 610, + 490, + 634 + ], + "type": "text", + "content": "- Image-to-SVG: You are a helpful SVG Generation assistant, designed to generate SVG. We provide an image as input, generate SVG for this image." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 119, + 643, + 490, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 643, + 490, + 678 + ], + "spans": [ + { + "bbox": [ + 119, + 643, + 490, + 678 + ], + "type": "text", + "content": "- Character-Reference SVG Generation: You are a helpful SVG Generation assistant, designed to generate SVG. We provide a natural image as input, please generate the simplified character SVG based on the reference input image." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 697, + 504, + 719 + ], + "lines": [ + { + "bbox": [ + 104, + 697, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 104, + 697, + 504, + 719 + ], + "type": "text", + "content": "Table 7: Instructions for Different Tasks. Instructions including annotation, text-to-SVG, image-to-SVG and character-reference SVG generation." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 116, + 69, + 492, + 300 + ], + "blocks": [ + { + "bbox": [ + 116, + 69, + 492, + 300 + ], + "lines": [ + { + "bbox": [ + 116, + 69, + 492, + 300 + ], + "spans": [ + { + "bbox": [ + 116, + 69, + 492, + 300 + ], + "type": "image", + "image_path": "c5d2637f15e3c2c9618d7e718252bc140df50e4b622c6efe968a8714b6abd547.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 308, + 504, + 351 + ], + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 351 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 351 + ], + "type": "text", + "content": "Figure 8: Samples from MMSVG-2M Dataset. The proposed MMSVG-2M dataset can be separated into three subset, namely Icon, Illustration and Character. Samples from Icon, Illustration and part of Character subsets are downloaded from Internet. Another part of Character subset is generated by our data creation pipeline, which can provide image and SVG pairs for image prompting task." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 358, + 504, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 358, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 504, + 380 + ], + "type": "text", + "content": "reference image and the original image with the VLM architecture. The list of instruction templates for different tasks are shown in Tab. 7." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 396, + 282, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 396, + 282, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 282, + 407 + ], + "type": "text", + "content": "A.3 Character-SVG Pairs Construction" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 417, + 506, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 417, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 417, + 506, + 581 + ], + "type": "text", + "content": "As illustrated in the Fig. 6, part of our proposed MMSVG-2M-Character subset is constructed using a generative pipeline. As shown in the pipeline diagram in Fig. 2, we employ a FLUX [26]-based generative model enhanced with a vector-style LoRA to enable the generation of SVG-style data. For image-based conditioning, we adopt FLUX-Redux [27], which injects image features via a SigLIP encoder and projects them into image embeddings. These embeddings are then concatenated with the text tokens as conditioning inputs for FLUX [26]. However, in practice, the original Redux [27] conditioning proves to be overly strong. To address this, we adopt a community-implemented variant of Redux that downsamples the image embeddings in 2D space. As observed in our experiments shown in Fig. 9, a downsampling factor between " + }, + { + "bbox": [ + 104, + 417, + 506, + 581 + ], + "type": "inline_equation", + "content": "2 \\times" + }, + { + "bbox": [ + 104, + 417, + 506, + 581 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 417, + 506, + 581 + ], + "type": "inline_equation", + "content": "3 \\times" + }, + { + "bbox": [ + 104, + 417, + 506, + 581 + ], + "type": "text", + "content": " yields the most reasonable SVG-style character references. Finally, we employ VTracer [12] to perform near-instant vectorization of the generated images. To construct the MMSVG-2M-Character subset, we first filter " + }, + { + "bbox": [ + 104, + 417, + 506, + 581 + ], + "type": "inline_equation", + "content": "103k" + }, + { + "bbox": [ + 104, + 417, + 506, + 581 + ], + "type": "text", + "content": " character instances from the Danbooru [13] dataset and apply the aforementioned pipeline with motion and expression keywords like previous works [8, 9, 36, 65]. We compare the raw FLUX [26] outputs and their vectorized counterparts, retaining only those samples with PSNR and SSIM scores above a certain threshold as valid data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 598, + 222, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 598, + 222, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 598, + 222, + 611 + ], + "type": "text", + "content": "B Additional Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 624, + 179, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 179, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 179, + 636 + ], + "type": "text", + "content": "B.1 Scaling Up" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 645, + 504, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 504, + 690 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 504, + 690 + ], + "type": "text", + "content": "To study the effectiveness of scaling up multimodal SVG generation, we scale up OmniSVG from 4B to 8B parameters. We present training perplexity in Fig. 11, where both models are trained from scratch on 250 billion tokens. We show that, as the size of the model grows, the model achieves a lower validation perplexity, indicating a higher probability of producing the validation data." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 704, + 233, + 715 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 704, + 233, + 715 + ], + "spans": [ + { + "bbox": [ + 105, + 704, + 233, + 715 + ], + "type": "text", + "content": "B.2 Implementation Details" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 298, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 298, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 298, + 194 + ], + "type": "text", + "content": "We train our models in bfloat16 with the ZeRO-2 strategy [40] for memory-efficient training. We also adopt the AdamW [33] optimizer with a learning rate decaying from " + }, + { + "bbox": [ + 104, + 72, + 298, + 194 + ], + "type": "inline_equation", + "content": "3 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 72, + 298, + 194 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 72, + 298, + 194 + ], + "type": "inline_equation", + "content": "3 \\times 10^{-6}" + }, + { + "bbox": [ + 104, + 72, + 298, + 194 + ], + "type": "text", + "content": " and a weight decay of 0.1 to train our model. In practice, we load the pre-trained weights from the Qwen2.5-VL [1] model and initialize the SVG embeddings from scratch. Without further specification, we generate SVGs with the top-k and top-p sampling strategy with " + }, + { + "bbox": [ + 104, + 72, + 298, + 194 + ], + "type": "inline_equation", + "content": "k = 50" + }, + { + "bbox": [ + 104, + 72, + 298, + 194 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 72, + 298, + 194 + ], + "type": "inline_equation", + "content": "p = 0.95" + }, + { + "bbox": [ + 104, + 72, + 298, + 194 + ], + "type": "text", + "content": " for diversity." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 213, + 224, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 213, + 224, + 227 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 224, + 227 + ], + "type": "text", + "content": "C Additional Results" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 240, + 298, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 240, + 298, + 339 + ], + "spans": [ + { + "bbox": [ + 104, + 240, + 298, + 339 + ], + "type": "text", + "content": "As list in full comparisons in Tab. 2, including all the baselines mentioned in Sec. 5. For the text-to-SVG task, we compare our method with language-based (LLM-based) methods, including VectorFusion [22], SVGDreamer [60], Chat2SVG [56] and IconShop [57]. For image-to-SVG task, we compare our method with baseline methods across image vectorization and Multimodal Large Language Modeling ap" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 339, + 506, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 339, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 339, + 506, + 515 + ], + "type": "text", + "content": "proaches, including LIVE [34], DiffVG [29], StarVector [42] and GPT-4o [21] using the official implementations with the hyperparameters proposed by the authors, and apply their pre- and post-processing code as required. Specifically, for the text-to-SVG task, the optimization-based method SVGDreamer excels in enhancing editability by employing a semantic-driven image vectorization process that effectively separates foreground objects from the background, while failing to handle complex scenes. Another optimization-based work, VectorFusion, stands out for generating SVG-exportable vector graphics without relying on large captioned datasets. However, Vectorfusion is also unable to handle complex scenarios and diverse styles. The significant problem with these optimization-based works is that the optimization time is too long. Generating an SVG usually takes more than ten minutes, which is too expensive. For the LLM-based method, Chat2SVG integrates Large Language Models (LLMs) with image diffusion models to create semantically rich SVG templates. However, Chat2SVG still needs to optimize the output SVG script from LLM, which introduces increased computational complexity and poses challenges during model training. In comparison, IconShop utilizes a transformer-based architecture to autoregressively model SVG path sequences, demonstrating exceptional performance in simplified icon SVGs, which offers effective solutions for text-to-SVG generation. It can only generate black simple Icon SVGs." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 306, + 61, + 503, + 258 + ], + "blocks": [ + { + "bbox": [ + 306, + 61, + 503, + 258 + ], + "lines": [ + { + "bbox": [ + 306, + 61, + 503, + 258 + ], + "spans": [ + { + "bbox": [ + 306, + 61, + 503, + 258 + ], + "type": "image", + "image_path": "06747bc02f04585aabd024e7ccafb4f8d7839e47fc78f613ad60da4b3238485c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 265, + 506, + 316 + ], + "lines": [ + { + "bbox": [ + 302, + 265, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 302, + 265, + 506, + 316 + ], + "type": "text", + "content": "Figure 10: Word Cloud Visualization of Label Distribution in the MMSVG-2M Dataset. The size of each label corresponds to its frequency of occurrence. The larger the label, the more frequently it appears in the dataset." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 123, + 534, + 495, + 674 + ], + "blocks": [ + { + "bbox": [ + 123, + 534, + 495, + 674 + ], + "lines": [ + { + "bbox": [ + 123, + 534, + 495, + 674 + ], + "spans": [ + { + "bbox": [ + 123, + 534, + 495, + 674 + ], + "type": "image", + "image_path": "dbd2c1184a18cb173aa8a27178432adf7f48ac0b537a06ddb31d90e52a5f32cd.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 688, + 504, + 720 + ], + "lines": [ + { + "bbox": [ + 104, + 688, + 504, + 720 + ], + "spans": [ + { + "bbox": [ + 104, + 688, + 504, + 720 + ], + "type": "text", + "content": "Figure 9: Image Prompting Dataset Creation of MMSVG-2M Character. By utilizing FLUX-Redux and SVG vectorization tools, image prompting data pairs can be generated. We adipot FLUX-Redux downsampling scale with 2, 3 in practice by trading-off the character similarity and complexity of generated SVG." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 73, + 294, + 203 + ], + "blocks": [ + { + "bbox": [ + 109, + 73, + 294, + 203 + ], + "lines": [ + { + "bbox": [ + 109, + 73, + 294, + 203 + ], + "spans": [ + { + "bbox": [ + 109, + 73, + 294, + 203 + ], + "type": "image", + "image_path": "b122d2212096046f8e937b7fe80bc42797f0338f419bcb3492121a56f357ccd3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 143, + 207, + 264, + 219 + ], + "lines": [ + { + "bbox": [ + 143, + 207, + 264, + 219 + ], + "spans": [ + { + "bbox": [ + 143, + 207, + 264, + 219 + ], + "type": "text", + "content": "(a) Training PPL for our models." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 306, + 71, + 499, + 203 + ], + "blocks": [ + { + "bbox": [ + 306, + 71, + 499, + 203 + ], + "lines": [ + { + "bbox": [ + 306, + 71, + 499, + 203 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 499, + 203 + ], + "type": "image", + "image_path": "f2a3bf5441468ccd44b23b048b566fd1b98834e333baba08f597b0619c99d3b5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 343, + 208, + 468, + 219 + ], + "lines": [ + { + "bbox": [ + 343, + 208, + 468, + 219 + ], + "spans": [ + { + "bbox": [ + 343, + 208, + 468, + 219 + ], + "type": "text", + "content": "(b) Validation PPL for our models." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 225, + 504, + 247 + ], + "lines": [ + { + "bbox": [ + 104, + 225, + 504, + 247 + ], + "spans": [ + { + "bbox": [ + 104, + 225, + 504, + 247 + ], + "type": "text", + "content": "Figure 11: Training and Validation Perplexity (PPL) for OmniSVG Models. We train all the models from scratch on 250 billion tokens. We observe that the performance grows with model sizes." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 269, + 506, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 506, + 423 + ], + "type": "text", + "content": "For the image-to-SVG task, we compare our method with the image vectorization methods. LIVE allows progressive and efficient generation of SVGs, optimizing closed vector paths under raster image supervision with shape complexity control. However, LIVE needs to optimize for a long time when generating complex SVGs. DiffVG enables end-to-end differentiability in vector graphics rasterization, improving optimization through anti-aliasing and gradient-based methods while also is computationally expensive due to the complexity of the forward-backward rasterization process. Recently, the Multimodal Large Language Model (MLLM) based method StarVector leverages the visual understanding to apply accurate SVG primitive to the LLM architecture, which also can generate SVGs from both text and image inputs. However, it still fails to generate complex SVGs. Since Starvector [42] has not yet opened up its text-to-SVG model weights, our MMSVGBench does not evaluate Starvector's text-to-SVG capabilities. MMSVG-Bench also evaluates our methods with VLM methods, GPT-4o, to conduct a comprehensive assessment. We compare our method with these baselines on our MMSVG-2M dataset, from simple MMSVG-Icon dataset, a bit complex MMSVG-illustration dataset, to the very complex MMSVG-Character dataset." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 440, + 274, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 440, + 274, + 453 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 274, + 453 + ], + "type": "text", + "content": "D More details of the baselines" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 466, + 209, + 477 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 466, + 209, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 209, + 477 + ], + "type": "text", + "content": "D.1 Text-to-SVG Task" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 487, + 504, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 504, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 504, + 554 + ], + "type": "text", + "content": "SVGDreamer [60] uses a semantic-driven image vectorization (SIVE) process to separate foreground objects and background, improving editability. The SIVE process utilizes attention-based primitive control and an attention-mask loss function to manipulate individual elements effectively. To address issues in existing text-to-SVG generation methods, the proposed Vectorized Particle-based Score Distillation (VPSD) approach models SVGs as distributions of control points and colors, improving shape, color diversity, and convergence speed." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 558, + 504, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 558, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 558, + 504, + 624 + ], + "type": "text", + "content": "VectorFusion [22] leverages a text-conditioned diffusion model trained on pixel representations to generate SVG exportable vector graphics without needing large captioned SVG datasets. By optimizing a differentiable vector graphics rasterizer, it distills semantic knowledge from a pretrained diffusion model and uses Score Distillation Sampling to generate an SVG consistent with a caption. Experiments show that VectorFusion improves both quality and fidelity, offering a variety of styles such as pixel art and sketches." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 628, + 504, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 504, + 685 + ], + "type": "text", + "content": "Chat2SVG [56] proposes a hybrid framework that combines the strengths of Large Language Models (LLMs) and image diffusion models for text-to-SVG generation. The approach first uses an LLM to create semantically meaningful SVG templates from basic geometric primitives. A dual-stage optimization pipeline, guided by image diffusion models, refines paths in latent space and adjusts point coordinates to enhance geometric complexity." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "IconShop [57] uses a transformer-based architecture to encode path commands and learn to model SVG path sequences autoregressively. It has shown excellent results in simplified icon scenarios and provides a good solution to Text-to-SVG generation by extending the FIGR-8-SVG dataset with" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 117, + 482, + 653 + ], + "blocks": [ + { + "bbox": [ + 126, + 117, + 482, + 653 + ], + "lines": [ + { + "bbox": [ + 126, + 117, + 482, + 653 + ], + "spans": [ + { + "bbox": [ + 126, + 117, + 482, + 653 + ], + "type": "image", + "image_path": "1aa916043462691eacdf1ec6504ad4cdb68fdafec3028a2b4e705575becadf71.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 163, + 657, + 447, + 670 + ], + "lines": [ + { + "bbox": [ + 163, + 657, + 447, + 670 + ], + "spans": [ + { + "bbox": [ + 163, + 657, + 447, + 670 + ], + "type": "text", + "content": "Figure 12: Illustration of the SVG Generation Capabilities of OmniSVG." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": " captions. We have access to their dataset and original splits and have trained our model on that data using a pre-trained checkpoint (trained on OmniVG dataset). We have extracted the results from IconShop and included them here to compare our method." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 506, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 178 + ], + "type": "text", + "content": "LLM4SVG [59] is a framework that leverages Large Language Models (LLMs) to understand and generate Scalable Vector Graphics (SVGs). It employs a structured SVG encoding approach, utilizing learnable semantic tokens to accurately represent SVG components and their properties. This design enables LLMs to produce SVGs that are both semantically aligned with textual descriptions and visually coherent. However, LLM4SVG also has a maximum token length of 2048, limiting its ability to generate highly complex SVGs that require longer sequences." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 190, + 217, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 190, + 217, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 217, + 201 + ], + "type": "text", + "content": "D.2 Image-to-SVG Task" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 209, + 504, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 504, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 504, + 276 + ], + "type": "text", + "content": "LIVE (Layer-wise Image Vectorization) [34] is a method for progressively generating SVGs that closely fit a given raster image by recursively adding and optimizing closed vector paths. Using a differentiable renderer (based on DiffVG [29]), LIVE enables direct optimization of paths under raster image supervision while controlling shape complexity by adjusting the number of path segments. It introduces component-wise path initialization, identifying key visual components to ensure efficient topology extraction and minimize redundant shapes." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 281, + 505, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 505, + 347 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 505, + 347 + ], + "type": "text", + "content": "DiffVG [29] is a landmark in vector graphics research, pioneering deep learning-based methods with the first differentiable vector graphics rasterization pipeline. By leveraging a combination of anti-aliasing techniques and gradient-based optimization, DiffVG ensures differentiability. Unlike methods relying on non-differentiable curve-to-mesh conversions, DiffVG employs a forward-backward rasterization process, where the forward pass generates antialiased images and the backward pass computes gradients with respect to vector graphic parameters." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 352, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 352, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 504, + 430 + ], + "type": "text", + "content": "StarVector [42] works directly in the SVG code space, leveraging visual understanding to apply accurate SVG primitives. StarVector employs a transformer-based architecture that integrates an image encoder with a language model, enabling it to process visual inputs and produce precise SVG code. StarVector effectively handles diverse SVG types, including icons, logos, and complex diagrams, demonstrating robust generalization across various vectorization tasks. However, with a 16k token context window, StarVector may struggle to process highly complex SVGs that require longer sequences." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 434, + 506, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 506, + 479 + ], + "type": "text", + "content": "Vtracer [12] is an image processing algorithm designed to convert raster images into SVGs. The algorithm follows a three-step pipeline, which involves the hierarchical clustering of images for vectorization. Initially, the pixels are transformed into paths, which are subsequently simplified into polygons. In the final step, these polygons are smoothed and approximated using a Bezier curve fitter." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_content_list.json b/data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..eeb1230780959a6a2211c6fee1efe4632aec8b6c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_content_list.json @@ -0,0 +1,2035 @@ +[ + { + "type": "text", + "text": "PromptHMR: Promptable Human Mesh Recovery", + "text_level": 1, + "bbox": [ + 243, + 130, + 754, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yufu Wang $^{1,4}$ Yu Sun $^{1}$ Priyanka Patel $^{1}$ Kostas Daniilidis $^{4,5}$", + "bbox": [ + 238, + 179, + 756, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Michael J. Black1,2 Muhammed Kocabas1,2,3", + "bbox": [ + 313, + 198, + 681, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Meshcapade $^{2}$ MPI for Intelligent Systems $^{3}$ ETH Zürich $^{4}$ University of Pennsylvania $^{5}$ Archimedes", + "bbox": [ + 107, + 215, + 883, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://yufu-wang.github.io/phmr-page", + "bbox": [ + 276, + 234, + 715, + 252 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9270978d52c1cf9dadd732232e763d94d0b8368cc6d89f133ad0335f742112de.jpg", + "image_caption": [ + "image $\\downarrow$ box prompts" + ], + "image_footnote": [], + "bbox": [ + 127, + 267, + 338, + 383 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/201617bcc60b7ea35f45b6e032b28f2e5f14eeb391b77e8238450717c31c8156.jpg", + "image_caption": [ + "image ↓ box prompts" + ], + "image_footnote": [], + "bbox": [ + 349, + 268, + 575, + 385 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f6ba05c518e44eb65aca5d52635611bd11aa77d87bb42893fe1dfbd26de0b8ec.jpg", + "image_caption": [ + "image ↓ masks", + "PromptHMR" + ], + "image_footnote": [], + "bbox": [ + 586, + 268, + 700, + 385 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/02daf49395bd2b890a48cabb843ab75004decfd12c4023d5ee0b4d1251cfd2f6.jpg", + "image_caption": [ + "image box+text", + "PromptHMR" + ], + "image_footnote": [], + "bbox": [ + 725, + 268, + 861, + 385 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4dd0a14461f0856bedb6eedd29c14f07ab7a15e151b7f5a8f9b9fc20f3634e8e.jpg", + "image_caption": [ + "PromptHMR" + ], + "image_footnote": [], + "bbox": [ + 127, + 444, + 336, + 561 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/756ef6ee3dce2b408d2c590ea77778c0e6c4634ab245800eabe66fc8221dff3c.jpg", + "image_caption": [ + "PromptHMR", + "Figure 1. PromptHMR is a promptable human pose and shape (HPS) estimation method that processes images with spatial or semantic prompts. It takes \"side information\" readily available from vision-language models or user input to improve the accuracy and robustness of 3D HPS. PromptHMR recovers human pose and shape from spatial prompts such as (a) face bounding boxes, (b) partial or complete person detection boxes, or (c) segmentation masks. It refines its predictions using semantic prompts such as (c) person-person interaction labels for close contact scenarios, or (d) natural language descriptions of body shape to improve body shape predictions. Both image and video versions of PromptHMR achieve state-of-the-art accuracy." + ], + "image_footnote": [], + "bbox": [ + 349, + 445, + 575, + 561 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7c2f9735852252556d12572991c936f8502ca7b7ceaa2c90fbd13948e95061f2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 586, + 464, + 702, + 561 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9e599b70e43046dd11573a4225224c1af4fd86746468273033a022f96c5b671e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 725, + 463, + 861, + 561 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 680, + 326, + 696 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Human pose and shape (HPS) estimation presents challenges in diverse scenarios such as crowded scenes, person-person interactions, and single-view reconstruction. Existing approaches lack mechanisms to incorporate auxiliary \"side information\" that could enhance reconstruction accuracy in such challenging scenarios. Furthermore, the most accurate methods rely on cropped person detections and cannot exploit scene context while methods that process the whole image often fail to detect people and are less accurate than methods that use crops. While recent language-based methods explore HPS reasoning through large language or vision-language models, their metric accuracy is well below", + "bbox": [ + 89, + 719, + 485, + 900 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the state of the art. In contrast, we present PromptHMR, a transformer-based promptable method that reformulates HPS estimation through spatial and semantic prompts. Our method processes full images to maintain scene context and accepts multiple input modalities: spatial prompts like bounding boxes and masks, and semantic prompts like language descriptions or interaction labels. PromptHMR demonstrates robust performance across challenging scenarios: estimating people from bounding boxes as small as faces in crowded scenes, improving body shape estimation through language descriptions, modeling person-person interactions, and producing temporally coherent motions in videos. Experiments on benchmarks show that PromptHMR achieves state-of-the-art performance while offering flexible prompt-based control over the HPS estimation process.", + "bbox": [ + 511, + 681, + 908, + 910 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06397v2 [cs.CV] 24 May 2025", + "bbox": [ + 22, + 255, + 58, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 89, + 222, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The estimation of 3D human pose and shape (HPS) is classically viewed as regressing the parameters of shape and pose from pixels. In particular, most methods take a tightly cropped image of a person and output the pose and shape in camera coordinates. While the accuracy of such methods has increased rapidly, they do not address the whole problem. In particular, an HPS method should be able to take an image or video containing complex human-human and human-scene interactions, return the parameters of every person in the scene, and place these people in a consistent global coordinate frame.", + "bbox": [ + 89, + 114, + 480, + 280 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our key observation is that the classical \"pixels to parameters\" formulation of the problem is too narrow. Today, we have large vision-language foundation models (VLMs) that understand a great deal about images and what people are doing in them. What these models lack, however, is an understanding of 3D human pose and shape. Recent work [10, 16] has tried to bring together VLMs and 3D HPS but with 3D accuracy well below the best classical methods.", + "bbox": [ + 89, + 281, + 480, + 401 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Consequently, we need to think about the problem in a different way and ask whether we can exploit readily available side information (e.g. provided by a VLM) to improve 3D HPS regression robustness, usefulness, and accuracy. To that end, we develop a novel \"promptable\" HPS architecture called PromptHMR. Consider the sample images shown in Fig. 1. In crowded scenes, existing person detection methods struggle, while face detection methods remain reliable. When people closely interact, their body parts overlap and occlude each other, introducing ambiguity in pose estimation. Moreover, 3D body shape estimation from monocular views is challenging due to perspective ambiguity. In all these cases, we can extract cues, or prompts, that provide \"side information\" that can help an HPS method better analyze the scene. PromptHMR formalizes this intuition by combining image evidence with different types of spatial and semantic information that can come from either humans or AI systems such as VLMs.", + "bbox": [ + 89, + 402, + 482, + 672 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Specifically, our approach combines three key components: (1) a vision transformer that extracts features from high-resolution full images to preserve scene context, (2) a multi-modal prompt encoder that processes spatial and semantic inputs, and (3) a transformer decoder that attends to both prompt and image tokens to generate SMPL-X [47] body parameters. This design addresses the limitations of cropped-image HPS methods by processing full images using side information in the form of prompts. It addresses the challenges that full-image HPS methods have in detecting all people in a scene by accepting readily available bounding boxes. Last, our method incorporates auxiliary semantic information through text descriptions or interaction labels.", + "bbox": [ + 89, + 674, + 482, + 869 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By combining spatial and semantic prompting, our method offers a powerful and versatile approach to 3D HPS", + "bbox": [ + 89, + 869, + 482, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "estimation from the whole image. At test time, we show that this promptable structure (1) can take various bounding boxes or segmentation masks to recover full body HPS in a robust way, (2) improve its body shape predictions by using textual descriptions as input, (3) is capable of modeling person-person close interaction directly in the regression process, and (4) uses full image context to reconstruct people coherently in the camera space and the world space. Our model can handle video by incorporating temporal transformer layers at the SMPL-X decoding phase, yielding temporally stable and smooth motions. Last, following TRAM [67], we combine the temporal version of our model with metric SLAM to estimate human motion in world coordinates.", + "bbox": [ + 511, + 90, + 903, + 301 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We make several key design choices that make PromptHMR successful. To achieve robustness to different spatial inputs, we train our model by simulating noisy full-body and face-region bounding boxes. For improved body shape estimation, we leverage SHAPY [8] to generate automatic body shape descriptions for training samples and process them with a pretrained text encoder [50]. To enhance person-person interaction reconstruction, we use segmentation masks as more precise spatial prompts and develop person-person attention layers that operate between prompted people, producing coherent reconstructions of close interactions. Through random masking of different input types during training, our model learns to work with any combination of prompts at test time.", + "bbox": [ + 511, + 301, + 903, + 513 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Quantitative experiments on the EMDB [25], 3DPW [65], RICH [21], Hi4D [71], CHI3D [17] and HBW [8] benchmark datasets demonstrate that our method outperforms state-of-the-art (SOTA) approaches and strong baselines. We also provide many qualitative examples of in-the-wild images and videos that illustrate the robustness and generalization of PromptHMR.", + "bbox": [ + 511, + 513, + 903, + 619 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By moving away from the pure pixels-to-parameters approach, PromptHMR not only achieves a new SOTA, it shows a new way of improving both accuracy and robustness by leveraging side information that is easily available. One can think of this as a collaboration between VLMs, which know a lot about people in images but not in 3D, and a metric regressor that knows a lot about 3D humans but not about the semantics of what they do. We show that this combination has significant upside potential to increase both generality and accuracy. Our code and model are available for research purposes.", + "bbox": [ + 511, + 619, + 903, + 786 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 511, + 799, + 653, + 814 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Human pose and shape estimation from images. Existing methods for human pose and shape (HPS) estimation can be broadly categorized into two main approaches. The first [6, 18, 23, 29, 30, 32-34, 40, 66, 73] uses a tightly cropped image of an individual as input, and estimates", + "bbox": [ + 511, + 825, + 903, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "pose and shape in camera coordinates. While effective for isolated individuals, this approach discards scene context that is essential to resolve human pose in cases of occlusion, severe overlap and close interaction in multi-person scenes [17, 71].", + "bbox": [ + 89, + 90, + 480, + 167 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The second category [2, 22, 57, 59–61] build upon object detection frameworks [5, 13] to jointly detect humans and estimate their pose and shape parameters. Having access to the entire image, they can better perceive occluded individuals and infer depth relationships, but they often suffer from detection failures due to the difficulty in simultaneously performing detection and reconstruction. Our \"promptable\" architecture leverages detection box prompts to resolve such conflicts while having access to the entire scene context.", + "bbox": [ + 89, + 167, + 480, + 305 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Human pose and shape estimation from video. Methods for human motion estimation from video can also be divided into two main categories. The first [7, 24, 28, 38, 58] focuses on estimating smooth human motion in camera space. These methods build upon single-person HPS estimation approaches [23, 32] by adding temporal layers during the SMPL decoding phase to introduce temporal coherence.", + "bbox": [ + 89, + 306, + 480, + 412 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "More recent methods estimate human motion in world coordinates from videos captured with dynamic cameras. These methods follow a two-stage approach, first estimating camera motion using SLAM techniques [19, 20, 41, 42, 62, 63], and then leveraging human motion priors to optimize the human world motion [31, 70, 72]. Others [53, 54] learn temporal models to directly regress human world motion from image and camera features. Still others [67, 74] use monocular metric depth estimation to solve for the scale of camera motion and transform human motion from camera space to world coordinates.", + "bbox": [ + 89, + 412, + 480, + 580 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In our approach, we extend PromptHMR to video by taking the SMPL-X output tokens and utilizing a temporal transformer module to estimate temporally stable and smooth human motion and translation in camera space. We follow TRAM [67] to transform human motion to world coordinates due to its simplicity and effectiveness.", + "bbox": [ + 89, + 580, + 480, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Semantic reasoning about 3D humans in images. Recent methods explore combining different types of semantic information, such as language descriptions and knowledge of person-person interactions, to improve reasoning about 3D humans from images and videos. For example, ChatPose [16] follows the common approach of visual language models (VLMs) [36] by fine-tuning a large language model (LLM) with a combination of images and tokens to estimate SMPL parameters. In a similar direction, PoseEmbroider [10] is a multi-modal framework that aligns image, 3D pose, and text representations in a shared latent space. While ChatPose focuses on combining high-level scene reasoning with 3D HPS, PoseEmbroider exploits detailed language descriptions of human pose. While promising, neither method achieves SOTA accuracy on the HPS task. Note", + "bbox": [ + 89, + 674, + 482, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "that many other methods relate language to human pose or motion, without considering images [1, 9, 37, 48, 64], but these are outside our scope.", + "bbox": [ + 511, + 90, + 903, + 136 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Additionally, several methods [8, 49, 55] focus on modeling the relationship between SMPL body shape and natural language descriptions. These methods show that language descriptions and images can provide complementary information to solve this task. Other approaches, such as BUDDI [43] and ProsePose [56], address the challenge of estimating person-person interactions. BUDDI is an optimization-based approach that leverages diffusion model as a prior over interacting people, while ProsePose queries a VLM to estimate contact points on the human body surface and uses these contact points to guide an optimization process for improving human interaction.", + "bbox": [ + 511, + 136, + 903, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Overall, methods like ChatPose [16] and PoseEmbroider [10] are promising steps toward jointly learning the relationship between vision, language, and 3D humans, but their understanding of 3D humans remains limited, as indicated by their relatively low 3D pose accuracy. Meanwhile, SHAPY [8], BodyShapeGPT [49], and BodyTalk [55] focus solely on exploring the relationship between SMPL body shape and natural language. BUDDI and ProsePose are post-processing approaches for interaction that do not directly reason using image information.", + "bbox": [ + 511, + 316, + 903, + 469 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our approach addresses the limitations of these methods by training a single model capable of flexible prompting that achieves state-of-the-art (SOTA) performance, not only on standard HPS benchmarks but also on benchmarks tailored to body shape and person-person interaction.", + "bbox": [ + 511, + 469, + 903, + 544 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 513, + 556, + 604, + 570 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given an image $I$ containing $N$ people and a set of prompts, our main goal is to recover the pose, shape, and locations of the people in the camera space to form a coherent human-centric 3D scene. Figure 2 shows an overview.", + "bbox": [ + 511, + 580, + 903, + 642 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Promptable mesh regression", + "text_level": 1, + "bbox": [ + 511, + 648, + 767, + 666 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We adopt SMPL-X [47] to represent each person $i$ in the 3D space, including the orientation $\\phi_i \\in \\mathbb{R}^3$ , local body pose $\\theta_i \\in \\mathbb{R}^{22 \\times 3}$ , shape $\\beta_i \\in \\mathbb{R}^{10}$ , and translation $\\tau_i \\in \\mathbb{R}^3$ in the camera space. We do not include face and hand parameters in this work. Each human $H_i$ is mapped to a 3D mesh with the differentiable SMPL-X layer.", + "bbox": [ + 511, + 672, + 903, + 763 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nH _ {i} = \\left\\{\\phi_ {i}, \\theta_ {i}, \\beta_ {i}, \\tau_ {i} \\right\\}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 637, + 770, + 903, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Each person can be prompted with spatial and semantic prompts. Spatial prompts include a bounding box $b_{i} \\in \\mathbb{R}^{2 \\times 2}$ (the two corners) and a segmentation mask $m_{i} \\in \\mathbb{R}^{h \\times w}$ . Semantic prompts consist of text and two-person interaction labels. The text prompt is the CLIP embedding $t_{i}$ of a sentence describing the body shape. The interaction prompt is a binary variable $k_{i}$ indicating whether two", + "bbox": [ + 511, + 795, + 906, + 901 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/5052c9e8ab63f19eb48f844b7af74060fa66875a0636e9684ea47923970d46f8.jpg", + "image_caption": [ + "Figure 2. Method overview. PromptHMR estimates SMPL-X parameters for each person in an image based on various types of prompts, such as boxes, language descriptions, and person-person interaction cues. Given an image and prompts, we utilize a vision transformer to generate image embeddings and mask and prompt encoders to map different types of prompts to tokens. Optionally, camera intrinsics can be embedded along with the image embeddings. The image embeddings and prompt tokens are then fed to the SMPL-X decoder. The SMPL-X decoder is a transformer-based module that attends to both the image and prompt tokens to estimate SMPL-X parameters. Note that the language and interaction prompts are optional, but providing them enhances the accuracy of the estimated SMPL-X parameters." + ], + "image_footnote": [], + "bbox": [ + 94, + 90, + 898, + 237 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "people are in close contact. While semantic prompts are optional, each human needs at least one spatial prompt to be reconstructed. Overall, the input prompts are represented as $P_{i}$ :", + "bbox": [ + 89, + 345, + 483, + 405 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP _ {i} \\subseteq \\left\\{b _ {i}, m _ {i}, t _ {i}, k _ {i} \\right\\} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 217, + 411, + 480, + 434 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nb _ {i} \\in P _ {i} \\text {o r} m _ {i} \\in P _ {i}\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 431, + 354, + 445 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Promptable human mesh recovery (PromptHMR) is defined as a learnable function that maps an image and a set of prompts to a set of 3D humans", + "bbox": [ + 89, + 450, + 483, + 496 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nf: \\left(I, \\left\\{P _ {i} \\right\\} _ {i = 1} ^ {N}\\right)\\rightarrow \\left\\{H _ {i} \\right\\} _ {i = 1} ^ {N}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 501, + 480, + 520 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This task definition integrates all available contexts to locate and reconstruct prompted humans in the image.", + "bbox": [ + 89, + 525, + 483, + 556 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Model", + "text_level": 1, + "bbox": [ + 89, + 561, + 176, + 577 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Image encoder. The image is first encoded as tokens by a vision transformer (ViT) encoder from DINOv2 [12, 44]:", + "bbox": [ + 89, + 585, + 483, + 616 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nF = \\operatorname {E n c o d e r} (I), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 223, + 622, + 480, + 638 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To ensure sufficient resolution for modeling humans at both near and far distances, we use $896 \\times 896$ images. The encoder is run once per frame regardless of the number of people prompted. When camera intrinsics are provided, we add positional encoding of the camera rays to the image tokens to make them camera-aware [2, 15].", + "bbox": [ + 89, + 645, + 483, + 734 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Mask encoder. When available, masks are first processed by an encoder consisting of stripped convolutional layers that downsample the masks. The output mask features are added to the image tokens. If no mask is provided, a learned \"no mask\" token is added instead.", + "bbox": [ + 89, + 736, + 483, + 810 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nF _ {i} = \\operatorname {E n c o d e r} _ {\\mathrm {m}} \\left(m _ {i}\\right) + F. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 816, + 480, + 834 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Prompt encoder. The prompt encoder consists of a set of transformations that map different types of prompts to token vectors of the same dimension. When a prompt is not available, it is replaced with a learned null token.", + "bbox": [ + 89, + 839, + 483, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For bounding boxes, we encode $b_{i}$ using positional encoding summed with learned embeddings to form the box prompt tokens $T_{bi} = \\mathrm{PE}(b_{i})$ , with $T_{bi} \\in \\mathbb{R}^{2 \\times d}$ . We design different box transformations during training to allow the model to use different boxes as a human identifier. In the training phase, each instance is prompted with either a whole-body bounding box, a face bounding box, or a truncated box covering part of the body. Gaussian noise is added to both corners. At inference time, the model accepts boxes without needing to know the box types.", + "bbox": [ + 511, + 345, + 906, + 496 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Language is a natural way to supply semantic information, and in this paper, we use language to supplement spatial prompts with information on body shape. A sentence such as \"a muscular and tall male\" is encoded with the CLIP text encoder $T_{ti} = \\mathrm{CLIP}(t_i)$ , with $T_{ti} \\in \\mathbb{R}^{d}$ . To generate paired (image, text) data, we run SHAPY's [8] shape-to-attribute method on the ground truth shape parameters to obtain shape attribute scores and randomly pick a subset of top attributes to form a sentence.", + "bbox": [ + 511, + 496, + 905, + 632 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The interaction prompt $k_{i}$ passes through the prompt encoder without modification and directly switches on-off the cross-person attention that is described in Sec. 3.3.", + "bbox": [ + 511, + 632, + 905, + 676 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "SMPL-X decoder. The SMPL-X decoder appends two query tokens $T_{\\mathrm{spl}}, T_{\\mathrm{depth}}$ with the prompt tokens $T_{bi}, T_{ti}$ to form the person-specific prompt $T_i \\in \\mathbb{R}^{5 \\times d}$ .", + "bbox": [ + 511, + 676, + 905, + 722 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Finally, we use a standard transformer decoder and two MLP heads to produce the final output", + "bbox": [ + 511, + 723, + 905, + 753 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nT _ {s m p l} ^ {\\prime}, T _ {d e p t h} ^ {\\prime} = \\mathrm {D e c o d e r} (F _ {i}, T _ {i})\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 758, + 812, + 779 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\phi_ {i}, \\theta_ {i}, \\beta_ {i} = \\operatorname {H e a d} _ {s m p l} \\left(T _ {s m p l} ^ {\\prime}\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 781, + 903, + 797 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tau_ {i} = \\operatorname {H e a d} _ {d e p t h} (T _ {d e p t h} ^ {\\prime}).\n$$\n", + "text_format": "latex", + "bbox": [ + 663, + 801, + 830, + 819 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The transformer consists of three attention blocks. Each block applies self-attention on the tokens, cross-person attention (described in Sec. 3.3), and then two-way cross-attention between the tokens and the image embeddings [27]. The self-attention and cross-attention with the", + "bbox": [ + 511, + 825, + 905, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "image are applied to each prompted person independently. We use separate tokens $T_{\\mathrm{spl}}$ and $T_{\\mathrm{depth}}$ to make the location representation invariant to the 3D human pose and shape representation.", + "bbox": [ + 89, + 90, + 480, + 151 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Regressing the location of the human in the camera space is much more challenging than most prior work that models humans in a cropped image space. Therefore, we do not regress $\\tau$ directly. We regress focal length normalized 2D translation $p_{xy} \\in \\mathbb{R}^2$ and inverse depth $p_z \\in \\mathbb{R}$ , and then transform them to $\\tau$ as follows", + "bbox": [ + 89, + 152, + 482, + 242 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nt _ {x y} = \\frac {p _ {x y}}{p _ {z}} \\quad t _ {z} = \\frac {1}{p _ {z}} \\times \\frac {f}{f _ {c}} \\quad \\tau = [ t _ {x y}, t z ], \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 140, + 250, + 482, + 282 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $f$ is the ground truth or estimated focal length of the image, and $f_{c}$ is the canonical focal length. Predicting the normalized inverse depth follows the recent monocular depth literature [51] and is also intuitive since the inverse depth is linearly related to the size of the human in the image. Predicting $p_{xy}$ is equivalent to predicting the 2D location of the human in a normalized image plane.", + "bbox": [ + 89, + 291, + 483, + 398 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Two-person interaction", + "text_level": 1, + "bbox": [ + 89, + 406, + 305, + 422 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We introduce promptable layers in the decoder to model two-person interaction. We describe the case where there are two people in the image, but the implementation can extend to model an interacting pair in a larger group.", + "bbox": [ + 89, + 429, + 482, + 491 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The promptability is modeled as a flow control with a residual connection (Fig. 3). Specifically, if two humans are interacting (as indicated by $k_{i}$ ), their query tokens pass through an additional self-attention layer; otherwise, non-interacting humans skip this.", + "bbox": [ + 89, + 489, + 482, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Applying attention to every person often creates unnecessary dependency in crowded scenes, and there is limited training data for large-group scenarios. However, there is high-quality data featuring two-person social interactions. By making the interaction layers promptable, we mitigate data diversity issues and increase flexibility, regardless of the number of people in the scene.", + "bbox": [ + 89, + 566, + 482, + 671 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our proposed interaction layer uses a standard self-attention mechanism. First, we add positional encodings to the query tokens to distinguish the two individuals. The encoded tokens then go through a self-attention layer, whose output is combined with the original tokens via a residual connection. Our experiments demonstrate that including these interaction layers significantly improves inter-person pose accuracy in two-person interaction benchmarks.", + "bbox": [ + 89, + 671, + 482, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. PromptHMR video version", + "text_level": 1, + "bbox": [ + 89, + 801, + 334, + 816 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In addition to the single-image variant of PromptHMR, we train an extended version that processes videos to estimate human motion in world coordinates. To achieve this, we introduce a simple and efficient temporal transformer module. Given a monocular video sequence $\\{I^t\\}_{t=0}^T$ , we first run", + "bbox": [ + 89, + 824, + 482, + 902 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/8c6c59d99ccd878b1a6ed924cf39f0de820cb6afdd9d730c0a6e96d7833acb42.jpg", + "image_caption": [ + "Figure 3. SMPL-X decoder. The top row shows one attention block in the decoder. The cross-person interaction module can be turned on/off. The bottom row shows the cross-person attention." + ], + "image_footnote": [], + "bbox": [ + 516, + 90, + 898, + 200 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "PromptHMR to obtain per-subject SMPL-X decoder output tokens $T_{\\mathrm{mpl}}^{\\prime}$ and $T_{\\mathrm{depth}}^{\\prime}$ , assuming that the subject identities are provided with the prompts. These tokens, along with the positional encoding of time $t$ , are fed to a decoder-only temporal transformer module with twelve attention blocks. The output tokens are converted to SMPL-X parameters $\\phi_t, \\theta_t, \\beta_t$ , translation $\\tau_t$ , and joint contact probabilities $c_t$ . The contact probabilities indicate whether a given joint is in contact with the ground plane similar to [52-54].", + "bbox": [ + 511, + 268, + 905, + 406 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To obtain results in world coordinates, we adopt the approach from TRAM [67]. Specifically, we use DROID-SLAM [62] and a monocular metric depth estimation model, ZoeDepth [3], to estimate camera motion in metric world coordinates. The translation parameters $\\tau_{t}$ are then transformed to world coordinates using the estimated camera motion. To refine the human trajectory and mitigate foot-skating artifacts, we leverage the estimated contact probabilities and run a fast postprocessing that optimizes the contact joints to have zero velocity.", + "bbox": [ + 511, + 407, + 905, + 559 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Losses", + "text_level": 1, + "bbox": [ + 513, + 574, + 601, + 588 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "PromptHMR is trained with a combination of 2D and 3D losses, following traditional HMR methods [23, 32]:", + "bbox": [ + 511, + 599, + 905, + 630 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\lambda_ {1} \\mathcal {L} _ {2 D} + \\lambda_ {2} \\mathcal {L} _ {3 D} + \\lambda_ {3} \\mathcal {L} _ {\\mathrm {S M P L}} + \\lambda_ {4} \\mathcal {L} _ {V} + \\lambda_ {5} \\mathcal {L} _ {t}\n$$\n", + "text_format": "latex", + "bbox": [ + 534, + 645, + 883, + 662 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "with each term calculated as", + "bbox": [ + 511, + 676, + 702, + 691 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {2 D} = \\left\\| \\hat {\\mathcal {J}} _ {2 D} - \\Pi \\left(\\mathcal {J} _ {3 D}\\right) \\right\\| _ {F} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 707, + 779, + 724 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {3 D} = \\left\\| \\hat {\\mathcal {J}} _ {3 D} - \\mathcal {J} _ {3 D} \\right\\| _ {F} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 728, + 754, + 746 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {S M P L}} = | | \\hat {\\Theta} - \\Theta | | _ {2} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 576, + 750, + 718, + 768 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {V} = \\left| \\left| \\hat {V} - V \\right| \\right| _ {F} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 602, + 772, + 720, + 789 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {t} = \\left| \\left| \\hat {p} _ {x y} - p _ {x y} \\right| \\right| _ {F} ^ {2} + \\left| \\left| \\hat {p} _ {z} - p _ {z} \\right| \\right| _ {F} ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 606, + 792, + 843, + 810 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{J}_{3D}$ and $V$ are the 3D joints and vertices of the SMPL-X model, with the hat operator denoting the ground truth. $\\Pi$ is the camera reprojection operator. Additionally, on datasets with ground truth translation labels, we supervise the normalized translation $p_{xy}$ and inverse depth $p_z$ .", + "bbox": [ + 511, + 824, + 905, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 89, + 89, + 223, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets. We train PromptHMR with standard datasets: BEDLAM [4], AGORA [46], 3DPW [65], COCO [35], and MPII [39]. Following 4DHumans, we add AIC [68] and InstaVariety [24] as in-the-wild data, with pseudoground truth from CamSMPLify [45]. Additionally, we add CHI3D [17] and HI4D [71] to enable learning two-person interaction following the train-test splits from BUDDI [43]. Including CHI3D and HI4D does not improve performance on other benchmarks.", + "bbox": [ + 89, + 114, + 483, + 251 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation. We train PromptHMR with AdamW with a batch size of 96 images of resolution $896 \\times 896$ . We use a learning rate of $1e^{-5}$ for the image encoder and $3e^{-5}$ for the prompt encoder and the SMPL-X decoder, with a weight decay of $5e^{-5}$ . The training converges within 350K steps.", + "bbox": [ + 89, + 253, + 483, + 329 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation. We evaluate camera space reconstruction accuracy on 3DPW [65], EMDB [25] and RICH [21], using MPJPE, Procrustes-aligned MPJPE (PA-MPJPE) and Per Vertex Error (PVE) [23]. We evaluate inter-person accuracy on HI4D and CHI3D by Pair-PA-MPJPE, which aligns the two people as a whole with the ground truth [43].", + "bbox": [ + 89, + 330, + 483, + 420 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To evaluate world-grounded motion on EMDB with PromptHMR video (PromptHMR-vid), we compute World-aligned MPJPE $(\\mathrm{WA - MPJPE}_{100})$ , World MPJPE $(\\mathrm{W - MPJPE}_{100})$ and Root Translation Error (RTE in %) [54, 70].", + "bbox": [ + 89, + 422, + 483, + 483 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Reconstruction accuracy", + "text_level": 1, + "bbox": [ + 89, + 496, + 316, + 512 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For camera space reconstruction, as shown in Table 1, PromptHMR and PromptHMR-Vid demonstrate state-of-the-art performance, matching crop-based methods while achieving better results than other full-image methods. PromptHMR and CameraHMR use the same training data and have similar performance, which validates that this prompt-based approach can achieve metrically accurate results. For representative results, see Fig. 7, where PromptHMR recovers coherent 3D scenes of people.", + "bbox": [ + 89, + 520, + 483, + 656 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For interaction reconstruction, PromptHMR achieves good accuracy as indicated in Table 2. Compared to BUDDI which is also trained on CHI3D and HI4D, our method achieves better overall accuracy on per-person and interperson metrics. We show qualitative results in Fig. 8. As a monocular regression method, PromptHMR still cannot avoid interpenetration between closely interacting people.", + "bbox": [ + 89, + 657, + 483, + 763 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "PromptHMR-Vid achieves SOTA performance among methods that estimate human motion in world coordinates, as shown in Table 4. Unlike TRAM, we estimate the joint contact probabilities similar to [53, 54]. Therefore, we achieve lower foot skating than TRAM, even though we use the same metric SLAM method to transform motion in camera space to world coordinates. Please refer to our supplementary material (SupMat) for qualitative results of PromptHMR-Vid.", + "bbox": [ + 89, + 763, + 483, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/bed63959a43ef9b6517723265c8d98562acb316fc42046804d36f64950b13e1e.jpg", + "image_caption": [ + "Figure 4. Effect of box prompts. Our method remains stable with different boxes, including noisy truncated boxes." + ], + "image_footnote": [], + "bbox": [ + 517, + 88, + 900, + 162 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d460b7604ea14b3e3eeca7c6c5ccbdf17a95d7dc5d115e02e0e0b1efc67f4b85.jpg", + "image_caption": [ + "Figure 5. Effect of mask prompts. Results are from the same model with different prompt inputs. Masks are better for close interaction scenarios where boxes are ambiguous." + ], + "image_footnote": [], + "bbox": [ + 516, + 204, + 900, + 287 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2964c98ed8bbefebb6c7c00d61ec76077da35ec78188b8515b97045c789f956e.jpg", + "image_caption": [ + "Figure 6. Effect of shape prompts. Compared to the baseline that does not incorporate shape description during training and testing, the model with shape prompts has better accuracy on HBW, especially in ambiguous images." + ], + "image_footnote": [], + "bbox": [ + 519, + 335, + 898, + 595 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Effect of multimodal prompts", + "text_level": 1, + "bbox": [ + 511, + 678, + 777, + 695 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct qualitative and quantitative evaluations of the multimodal prompts. For efficient ablation, we train models with $448 \\times 448$ input resolution and select the best model within 150K steps of training.", + "bbox": [ + 511, + 702, + 905, + 763 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For box prompts, as shown in rows 3-4 of Fig. 7, our method is able to take a combination of different boxes from in-the-wild images to reconstruct crowded scenes. Figure 4 also shows an example with varying box inputs. PromptHMR remains stable when the boxes change and uses full image context to reconstruct the human even when the boxes are truncated.", + "bbox": [ + 511, + 763, + 905, + 868 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The mask prompt is more effective than boxes when people closely overlap (Fig. 5), as boxes are ambiguous in such", + "bbox": [ + 511, + 869, + 905, + 901 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/10b66dfbccf02f42f4e3b9a2ffd895e955ecf5bab5cfa84ddc478ead506cd34f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Models3DPW (14)EMDB (24)RICH (24)
PA-MPJPEMPJPEPVEPA-MPJPEMPJPEPVEPA-MPJPEMPJPEPVE
cropped imageCLIFF* [33]43.069.081.268.3103.3123.768.1103.3128.0
HMR2.0a [18]44.469.882.261.597.8120.060.798.3120.8
TokenHMR [14]44.371.084.655.691.7109.4---
CameraHMR [45]35.156.065.943.370.281.734.055.764.4
full imageBEV [60]46.978.592.370.9112.2133.4---
Multi-HMR* [2]45.973.187.150.181.695.746.373.883.0
PromptHMR*36.658.769.441.071.784.537.356.665.5
videoWHAM [54]37.559.871.552.081.696.944.380.091.2
TRAM [67]35.659.369.645.774.486.6---
GVHMR [53]37.056.668.744.574.285.939.566.074.4
PromptHMR-Vid35.556.967.340.168.179.237.057.465.8
", + "bbox": [ + 153, + 90, + 841, + 316 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/0308ef773eef4ff74a60a1ebfdf87d0d192464007bb0372b2e2e0590beaefe8b.jpg", + "table_caption": [ + "Table 1. Comparison of mesh reconstruction on the 3DPW, EMDB and RICH datasets, with the number of joints in parenthesis. $\\star$ denotes methods that use ground truth focal length during inference. Note that we remove the test-time flip augmentation from all of the video methods to ensure a fair comparison. All metrics are in mm." + ], + "table_footnote": [], + "table_body": "
ModelsHI4D (14)CHI3D (14)
PA-MPJPEMPJPEPair-PA-MPJPEPA-MPJPEMPJPEPair-PA-MPJPE
BEV* [60]81-13651-96
BUDDI [43]73-9847-68
Multi-HMR* [2]49.867.880.631.754.0100.0
PromptHMR*39.263.978.127.248.058.5
PromptHMR30.139.639.524.746.545.3
", + "bbox": [ + 94, + 380, + 477, + 481 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/56545b98164f3d44d755701f2cd0c4d315ce4d8ea2e750b11b796f77a9f6c710.jpg", + "table_caption": [ + "Table 2. Comparison on interaction reconstruction. PromptHMR is more accurate in per-person and inter-person accuracy. * denote a method or baseline is not trained on HI4D or CHI3D. All metrics are in mm. The impact of HI4D and the interaction prompt are evaluated in Table 5." + ], + "table_footnote": [], + "table_body": "
Train w/ textTest w/ textHBW
HeightChestWaistHipP2P-20k
××6951886326
×6948866026
6243765824
", + "bbox": [ + 94, + 566, + 475, + 674 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "cases. Ablation of HI4D (rows 1-2 in Tab. 5) shows that using masks as the spatial prompt improves accuracy.", + "bbox": [ + 89, + 763, + 482, + 792 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Experiments on the HBW validation set (Tab. 3) show that text prompts effectively improve shape accuracy when used during both training and testing. Moreover, training with shape descriptions alone provides an accuracy boost even if prompts are not given at test time. As illustrated in Fig. 6, text prompts provide notable improvements, especially when large perspective effects create ambiguity.", + "bbox": [ + 88, + 795, + 482, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/786e6190b2e7d369c95bb59543041ca8d17a5c27883afb76fcfa7fd758c59a17.jpg", + "table_caption": [ + "Table 3. Ablation of shape prompts using text. Training with shape prompts improves shape accuracy. Using shape prompts during inference further improves shape accuracy. The ablation study is conducted with a $448 \\times 448$ model. Errors are in mm." + ], + "table_footnote": [], + "table_body": "
ModelsEMDB-2 (24)
WA-MPJPE100W-MPJPE100RTEJitterFoot Skating
WHAM [54]135.6354.86.022.54.4
TRAM [67]76.4222.41.418.523.4
GVHMR [53]111.0276.52.016.73.5
PromptHMR-Vid71.0216.51.316.33.5
", + "bbox": [ + 519, + 380, + 898, + 467 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c1fa09e71427461b1e5ebaacdbf8c8294d86641a77d6f9e8be765e32ec0660a4.jpg", + "table_caption": [ + "Table 4. Evaluation of motion in world coordinates. PromptHMR-Vid combined with metric SLAM from TRAM [67] surpasses SOTA methods at predicting human motion in world coordinates." + ], + "table_footnote": [], + "table_body": "
Trained withHI4D (14)
MaskInteractionHI4DPA-MPJPEMPJPEPair-PA-MPJPE
×××47.071.487.2
××43.460.583.0
××43.761.373.0
××36.349.452.6
36.547.147.9
", + "bbox": [ + 517, + 549, + 903, + 675 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. Ablation on interaction prompt. The interaction module improves inter-person reconstruction metrics Pair-PA-MPJPE on HI4D, especially when the method does not include HI4D in training. Ablation is conducted with a $448 \\times 448$ model. All metrics are in mm.", + "bbox": [ + 511, + 683, + 906, + 752 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For interaction prompts, we show an ablation in Table 5. The proposed interaction module is beneficial and largely improves inter-person accuracy on HI4D even without HI4D training, indicating out-off-domain generalization. When trained on HI4D, the interaction module does not improve per-person PA-MPJPE but still improves interperson Pair-PA-MPJPE. Please refer to our SupMat for more qualitative results on interaction prompts.", + "bbox": [ + 511, + 779, + 908, + 902 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f8f9cf2acc709924ae2bab9efea5c107298d4e5de4209433bf74abca05fca567.jpg", + "image_caption": [ + "Figure 7. Qualitative comparison: Multi-HMR vs PromptHMR. Our model can recover coherent 3D scenes of people. In crowded scenes, face detection provides reliable box prompts for our model. Please zoom in to see the details." + ], + "image_footnote": [], + "bbox": [ + 96, + 89, + 897, + 441 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/793495b0ca62fcf69d0eb4f71001d3888dbb2dafe49de988b7bd3fb3108bb57d.jpg", + "image_caption": [ + "Figure 8. Qualitative results. PromptHMR recovers coherent two-person close interaction. Despite suffering from some interpenetration, the relative positions of the interacting people are accurately recovered. More examples are provided in the Supplementary." + ], + "image_footnote": [], + "bbox": [ + 96, + 484, + 880, + 676 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Limitations", + "text_level": 1, + "bbox": [ + 89, + 726, + 212, + 742 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We see PromptHMR as a step towards a holistic perception model for 3D humans, but several limitations need to be addressed in future work. Currently, the shape description and interaction prompts are not automatically generated and need to be supplied by the user. Future work should explore how to effectively integrate our promptable model with VLMs to automate prompting. We show how semantic prompts can improve reconstruction accuracy, but many other potential types of side information such as action descriptions, 3D scene context, or body measurements may", + "bbox": [ + 89, + 750, + 483, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "provide additional benefits in different scenarios.", + "bbox": [ + 511, + 728, + 836, + 742 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 753, + 633, + 770 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We have presented PromptHMR, a promptable HPS estimation approach that leverages full image context with spatial and semantic prompts to infer 3D humans in the scene. Our method demonstrates state-of-the-art accuracy across diverse benchmarks and generalizes well in the wild. Our experiments show that incorporating diverse input information through flexible prompting enables robustness and adaptability in challenging scenarios.", + "bbox": [ + 511, + 779, + 906, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. The authors would like to thank Yan Zhang, Yao Feng, and Nitin Saini for their suggestions. The majority of the work was done when Yufu was an intern at Meshcapade. Yufu and Kostas thank the support of NSF NCS-FO 2124355, NSF FRR 2220868, and NSF IISRI 2212433.", + "bbox": [ + 89, + 90, + 480, + 180 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Disclosure. While MJB is a co-founder and Chief Scientist at Meshcapade, his research in this project was performed solely at, and funded solely by, the Max Planck Society.", + "bbox": [ + 89, + 181, + 480, + 242 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 255, + 187, + 271 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Nikos Athanasiou, Alpar Ceske, Markos Diomataris, Michael J. Black, and Gül Varol. MotionFix: Text-driven 3D human motion editing. In SIGGRAPH Asia, 2024. 3", + "[2] Fabien Baradel, Matthieu Armando, Salma Galaoui, Romain Brégier, Philippe Weinzaepfel, Grégory Rogez, and Thomas Lucas. Multi-HMR: Multi-person whole-body human mesh recovery in a single shot. European Conference on Computer Vision, 2024. 3, 4, 7", + "[3] Shariq Farooq Bhat, Reiner Birkl, Diana Wofk, Peter Wonka, and Matthias Müller. ZoeDepth: Zero-shot transfer by combining relative and metric depth. arXiv preprint arXiv:2302.12288, 2023. 5", + "[4] Michael J Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. BEDLAM: A synthetic dataset of bodies exhibiting detailed lifelike animated motion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8726-8737, 2023. 6, 1", + "[5] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229, 2020. 3", + "[6] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2Mesh: Graph convolutional network for 3D human pose and mesh recovery from a 2D human pose. In European Conference on Computer Vision, pages 769-787. Springer, 2020. 2", + "[7] Hongsuk Choi, Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. Beyond static features for temporally consistent 3D human pose and shape from a video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1964-1973, 2021. 3", + "[8] Vasileios Choutas, Lea Müller, Chun-Hao P. Huang, Siyu Tang, Dimitrios Tzionas, and Michael J. Black. Accurate 3D body shape regression using metric and semantic attributes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2718-2728, 2022. 2, 3, 4, 1", + "[9] Ginger Delmas, Philippe Weinzaepfel, Francesc Moreno-Noguer, and Grégory Rogez. PoseFix: correcting 3D human poses with natural language. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15018-15028, 2023. 3", + "[10] Ginger Delmas, Philippe Weinzaepfel, Francesc Moreno-Noguer, and Grégory Rogez. Posembroider: Towards a" + ], + "bbox": [ + 93, + 280, + 483, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "3D, visual, semantic-aware human pose representation. In European Conference on Computer Vision, 2024. 2, 3", + "[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1", + "[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021. 4", + "[13] Kaiwen Duan, Song Bai, Lingxi Xie, Honggang Qi, Qingming Huang, and Qi Tian. Centernet: Keypoint triplets for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6569-6578, 2019. 3", + "[14] Sai Kumar Dwivedi, Yu Sun, Priyanka Patel, Yao Feng, and Michael J Black. TokenHMR: Advancing human mesh recovery with a tokenized pose representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1323-1333, 2024. 7", + "[15] Jose M. Facil, Benjamin Ummenhofer, Huizhong Zhou, Luis Montesano, Thomas Brox, and Javier Civera. CAM-Convs: Camera-aware multi-scale convolutions for single-view depth. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, page 11818-11827, 2019. 4", + "[16] Yao Feng, Jing Lin, Sai Kumar Dwivedi, Yu Sun, Priyanka Patel, and Michael J. Black. ChatPose: Chatting about 3D human pose. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 2, 3", + "[17] Mihai Fieraru, Mihai Zanfir, Elisabeta Oneata, Alin-Ionut Popa, Vlad Olaru, and Cristian Sminchisescu. Reconstructing three-dimensional models of interacting humans. arXiv preprint arXiv:2308.01854, 2023. 2, 3, 6, 1", + "[18] Shubham Goel, Georgios Pavlakos, Jathushan Rajasegaran, Angjoo Kanazawa, and Jitendra Malik. Reconstructing and tracking humans with transformers. Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2, 7", + "[19] Dorian F Henning, Tristan Laidlow, and Stefan Leutenegger. BodySLAM: joint camera localisation, mapping, and human motion tracking. In European Conference on Computer Vision, pages 656-673. Springer, 2022. 3", + "[20] Dorian F Henning, Christopher Choi, Simon Schaefer, and Stefan Leutenegger. BodySLAM++: Fast and tightly-coupled visual-inertial camera and human motion tracking. In IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 3781-3788. IEEE, 2023. 3", + "[21] Chun-Hao P Huang, Hongwei Yi, Markus Höschle, Matvey Safroshkin, Tsvetelina Alexiadis, Senya Polikovsky, Daniel Scharstein, and Michael J Black. Capturing and inferring dense full-body human-scene contact. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13274-13285, 2022. 2, 6" + ], + "bbox": [ + 516, + 92, + 906, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[22] Wen Jiang, Nikos Kolotouros, Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Coherent reconstruction of multiple humans from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5579-5588, 2020. 3", + "[23] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7122-7131, 2018. 2, 3, 5, 6", + "[24] Angjoo Kanazawa, Jason Y Zhang, Panna Felsen, and Jitendra Malik. Learning 3D human dynamics from video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5614-5623, 2019. 3, 6, 1", + "[25] Manuel Kaufmann, Jie Song, Chen Guo, Kaiyue Shen, Tianjian Jiang, Chengcheng Tang, Juan José Zárate, and Otmar Hilliges. EMDB: The electromagnetic database of global 3d human pose and shape in the wild. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14632-14643, 2023. 2, 6", + "[26] Rawal Khirodkar, Timur Bagautdinov, Julieta Martinez, Su Zhaoen, Austin James, Peter Selednik, Stuart Anderson, and Shunsuke Saito. Sapiens: Foundation for human vision models. In European Conference on Computer Vision, pages 206-228. Springer, 2025. 1", + "[27] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 4", + "[28] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. VIBE: Video inference for human body pose and shape estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5253-5263, 2020. 3", + "[29] Muhammed Kocabas, Chun-Hao P Huang, Otmar Hilliges, and Michael J Black. PARE: Part attention regressor for 3D human body estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11127-11137, 2021. 2", + "[30] Muhammed Kocabas, Chun-Hao P. Huang, Joachim Tesch, Lea Müller, Otmar Hilliges, and Michael J. Black. SPEC: Seeing people in the wild with an estimated camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11035-11045, 2021. 2", + "[31] Muhammed Kocabas, Ye Yuan, Pavlo Molchanov, Yunrong Guo, Michael J Black, Otmar Hilliges, Jan Kautz, and Umar Iqbal. PACE: Human and camera motion estimation from inthe-wild videos. In International Conference on 3D Vision, pages 397-408, 2024. 3", + "[32] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2252-2261, 2019. 2, 3, 5", + "[33] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. CLIFF: Carrying location information" + ], + "bbox": [ + 91, + 90, + 485, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "in full frames into human pose and shape estimation. In European Conference on Computer Vision, pages 590-606. Springer, 2022. 7", + "[34] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12939-12948, 2021. 2", + "[35] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft COCO: Common objects in context. In European Conference on Computer Vision, pages 740-755. Springer, 2014. 6, 1", + "[36] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning, 2023. 3", + "[37] Thomas Lucas, Fabien Baradel, Philippe Weinzaepfel, and Grégory Rogez. Posegpt: Quantization-based 3d human motion generation and forecasting. In European Conference on Computer Vision, pages 417-435, 2022. 3", + "[38] Zhengyi Luo, S. Alireza Golestaneh, and Kris M. Kitani. 3d human motion estimation via motion compression and refinement. In Proceedings of the Asian Conference on Computer Vision, 2020. 3", + "[39] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved cnn supervision. In International Conference on 3D Vision, pages 506-516. IEEE, 2017. 6, 1", + "[40] Gyeongsik Moon and Kyoung Mu Lee. I2L-MeshNet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single RGB image. In European Conference on Computer Vision, pages 752-768. Springer, 2020. 2", + "[41] Raul Mur-Artal and Juan D Tardós. ORB-SLAM: An opensource slam system for monocular, stereo, and RGB-D cameras. IEEE Transactions on Robotics, 33(5):1255-1262, 2017. 3", + "[42] Raul Mur-Artal, Jose Maria Martinez Montiel, and Juan D Tardos. ORB-SLAM: A versatile and accurate monocular SLAM system. IEEE Transactions on Robotics, 31(5):1147-1163, 2015. 3", + "[43] Lea Müller, Vickie Ye, Georgios Pavlakos, Michael J. Black, and Angjoo Kanazawa. Generative proxemics: A prior for 3D social interaction from images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3, 6, 7", + "[44] Maxime Oquab, Timothée Darcet, Theo Moutakanni, Huy V. Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, Russell Howes, Po-Yao Huang, Hu Xu, Vasu Sharma, Shang-Wen Li, Wojciech Galuba, Mike Rabbat, Mido Assran, Nicolas Ballas, Gabriel Synnaeve, Ishan Misra, Herve Jegou, Julien Mairal, Patrick Labatut, Armand Joulin, and Piotr Bojanowski. DINoV2: Learning robust visual features without supervision, 2023. 4, 1", + "[45] Priyanka Patel and Michael J. Black. Camerahrm: Aligning people with perspective. International Conference on 3D Vision (3DV), 2025. 6, 7" + ], + "bbox": [ + 516, + 90, + 906, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[46] Priyanka Patel, Chun-Hao P Huang, Joachim Tesch, David T Hoffmann, Shashank Tripathi, and Michael J Black. AGORA: Avatars in geography optimized for regression analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13468-13478, 2021. 6, 1", + "[47] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3D hands, face, and body from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10975-10985, 2019. 2, 3", + "[48] Mathis Petrovich, Michael J Black, and Gül Varol. Action-conditioned 3D human motion synthesis with transformer VAE. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10985-10995, 2021. 3", + "[49] Baldomero R. Árbol and Dan Casas. BodyShapeGPT: SMPL body shape manipulation with LLMs. In European Conference on Computer Vision Workshops, 2024. 3", + "[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision, 2021. 2", + "[51] Rene Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(3):1623-1637, 2022. 5", + "[52] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J Guibas. HUMOR: 3D human motion model for robust pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11488-11499, 2021. 5", + "[53] Zehong Shen, Huajin Pi, Yan Xia, Zhi Cen, Sida Peng, Zechen Hu, Hujun Bao, Ruizhen Hu, and Xiaowei Zhou. World-grounded human motion recovery via gravity-view coordinates. In SIGGRAPH Asia, 2024. 3, 6, 7, 1", + "[54] Soyong Shin, Juyong Kim, Eni Halilaj, and Michael J Black. WHAM: Reconstructing world-grounded humans with accurate 3D motion. arXiv preprint arXiv:2312.07531, 2023. 3, 5, 6, 7, 1", + "[55] Stephan Streuber, M Alejandra Quiros-Ramirez, Matthew Q Hill, Carina A Hahn, Silvia Zuffi, Alice O'Toole, and Michael J Black. Body talk: Crowdshaping realistic 3D avatars with words. ACM TOG, 35(4):1-14, 2016. 3", + "[56] Sanjay Subramanian, Evonne Ng, Lea Müller, Dan Klein, Shiry Ginosar, and Trevor Darrell. Pose priors from language models. arXiv preprint arXiv:2405.03689, 2024. 3", + "[57] Qingping Sun, Yanjun Wang, Ailing Zeng, Wanqi Yin, Chen Wei, Wenjia Wang, Haiyi Mei, Chi-Sing Leung, Ziwei Liu, Lei Yang, and Zhongang Cai. AiOS: All-in-one-stage expressive human pose and shape estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, page 1834-1843, 2024. 3", + "[58] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a" + ], + "bbox": [ + 91, + 90, + 482, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "skeleton-disentangled representation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019. 3", + "[59] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J Black, and Tao Mei. Monocular, one-stage, regression of multiple 3D people. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11179-11188, 2021. 3", + "[60] Yu Sun, Wu Liu, Qian Bao, Yili Fu, Tao Mei, and Michael J Black. Putting people in their place: Monocular regression of 3D people in depth. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13243-13252, 2022. 7", + "[61] Yu Sun, Qian Bao, Wu Liu, Tao Mei, and Michael J Black. TRACE: 5D temporal regression of avatars with dynamic cameras in 3D environments. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8856-8866, 2023. 3", + "[62] Zachary Teed and Jia Deng. DRPOID-SLAM: Deep visual slam for monocular, stereo, and RGB-D cameras. Advances in Neural Information Processing Systems, 34:16558-16569, 2021. 3, 5", + "[63] Zachary Teed, Lahav Lipson, and Jia Deng. Deep patch visual odometry. Advances in Neural Information Processing Systems, 36, 2024. 3", + "[64] Guy Tevet, Sigal Raab, Brian Gordon, Yoni Shafir, Daniel Cohen-or, and Amit Haim Bermano. Human motion diffusion model. In International Conference on Learning Representations, 2023. 3", + "[65] Timo Von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In European Conference on Computer Vision, pages 601-617, 2018. 2, 6, 1", + "[66] Yufu Wang and Kostas Daniilidis. ReFit: Recurrent fitting network for 3D human recovery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14644-14654, 2023. 2", + "[67] Yufu Wang, Ziyun Wang, Lingjie Liu, and Kostas Daniilidis. TRAM: Global trajectory and motion of 3d humans from inthe-wild videos. In European Conference on Computer Vision, 2024. 2, 3, 5, 7", + "[68] Jiahong Wu, He Zheng, Bo Zhao, Yixin Li, Baoming Yan, Rui Liang, Wenjia Wang, Shipei Zhou, Guosen Lin, Yanwei Fu, et al. AI challenger: A large-scale dataset for going deeper in image understanding. arXiv preprint arXiv:1711.06475, 2017. 6, 1", + "[69] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. arXiv preprint arXiv:2309.16671, 2023. 1", + "[70] Vickie Ye, Georgios Pavlakos, Jitendra Malik, and Angjoo Kanazawa. Decoupling human and camera motion from videos in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21222-21232, 2023. 3, 6", + "[71] Yifei Yin, Chen Guo, Manuel Kaufmann, Juan Jose Zarate, Jie Song, and Otmar Hilliges. Hi4D: 4D instance segmentation of close human interaction. In Proceedings of" + ], + "bbox": [ + 516, + 92, + 906, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17016-17027, 2023. 2, 3, 6, 1", + "[72] Ye Yuan, Umar Iqbal, Pavlo Molchanov, Kris Kitani, and Jan Kautz. GLAMR: Global occlusion-aware human mesh recovery with dynamic cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11038-11049, 2022. 3", + "[73] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. PyMAF: 3D human pose and shape regression with pyramidal mesh alignment feedback loop. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11446-11456, 2021. 2", + "[74] Yizhou Zhao, Tuanfeng Yang Wang, Bhiksha Raj, Min Xu, Jimei Yang, and Chun-Hao Paul Huang. Synergistic global-space camera and human reconstruction from videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1216-1226, 2024. 3" + ], + "bbox": [ + 91, + 90, + 482, + 345 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "PromptHMR: Promptable Human Mesh Recovery Supplementary Material", + "text_level": 1, + "bbox": [ + 243, + 85, + 754, + 138 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "7. Additional Results", + "text_level": 1, + "bbox": [ + 89, + 155, + 272, + 171 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this section, we demonstrate more qualitative results to show the effects of interaction prompting and the video module. Please refer to the supplementary video to see the results from PromptHMR-Vid.", + "bbox": [ + 89, + 181, + 482, + 242 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "7.1. Interaction Prompting", + "text_level": 1, + "bbox": [ + 89, + 252, + 302, + 268 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We perform qualitative and quantitative ablation studies of interaction prompting on the HI4D dataset. In Tab. 5 of the main paper, we demonstrate that introducing interaction prompting improves the quantitative results on HI4D. In Fig. 9, we present more qualitative results to show the effect of the interaction module. As shown in the first column of Fig. 9, without the interaction module, the model does not learn to reconstruct close interaction effectively, even when trained with CHI3D interaction data. By adding the proposed interaction module, in the second column, the relative position and orientation of the interacting people are improved, and the penetration is reduced. Note that if we turn off the interaction module via the proposed flow control, the results will become similar to the first column. Finally, training with both CHI3D and HI4D leads to better results.", + "bbox": [ + 89, + 276, + 483, + 517 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "8. Experiment Details", + "text_level": 1, + "bbox": [ + 89, + 532, + 277, + 550 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "8.1. Datasets", + "text_level": 1, + "bbox": [ + 89, + 559, + 192, + 573 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The training set of the image model includes BEDLAM [4], AIC [68], InstaVariety [24], HI4D [71], CHI3D [17], AGORA [46], 3DPW [65], COCO [35], and MPII [39], with the sampling rate of $\\{0.2, 0.2, 0.3, 0.08, 0.08, 0.06, 0.06, 0.01, 0.01\\}$ . All input images are padded and resized to $896 \\times 896$ . During training, we employ rotation and color jitter augmentation. For PromptHMR-Vid, we use BEDLAM and 3DPW datasets following [53, 54].", + "bbox": [ + 89, + 582, + 482, + 717 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To use datasets with different annotations for training, we adopt different losses described in Sec.3.5 of the main paper. For the ones (e.g. BEDLAM, AGORA, CHI3D, HI4D) with ground truth SMPL/SMPL-X annotations, we employ all loss items. While on AIC, InstaVariety, and 3DPW, we drop the translation loss. On COCO and MPII, we only compute 2D keypoint reprojection loss.", + "bbox": [ + 89, + 719, + 482, + 824 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We generate the whole-body bounding boxes by projecting the ground-truth SMPL-X meshes onto the image plane. To generate the face bounding boxes, we project the head vertices. To generate truncated boxes, we take groups of keypoints (e.g. upper body keypoints) and compute their", + "bbox": [ + 89, + 825, + 483, + 901 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "bounding boxes. Gaussian noise is then added to both corners.", + "bbox": [ + 511, + 156, + 903, + 185 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "On BEDLAM, AGORA, and AIC, we follow SHAPY [8] to compute the shape attribute scores. During training, we compose a shape description for each instance, such as \"a tall and broad-shoulder female\" with a few augmentation rules. Each sentence will randomly sample 1-3 top attributes. The gender information is augmented with synonyms, such as \"female\", \"woman\", \"girl\", etc.", + "bbox": [ + 511, + 186, + 906, + 308 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "8.2. Architecture", + "text_level": 1, + "bbox": [ + 511, + 319, + 648, + 333 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We adopt the ViT-L [11], pretrained by DINOv2 [44], as our image encoder. We use an input image size of 896 and a patch size of 14, leading to the same spatial resolution as the recent Sapiens models [26]. The text encoder is from MetaCLIP [69]. The SMPL-X decoder consists of 3 attention blocks with an embedding dimension of 1024. From the output tokens $(T_{smpl}^{\\prime}$ and $T_{depth}^{\\prime}$ ), we use separate 2-layer MLPs to regress $\\theta$ , $\\beta$ , $p_{xy}$ and $p_z$ as introduced in Sec.3.2.", + "bbox": [ + 511, + 342, + 906, + 477 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "8.3. Training", + "text_level": 1, + "bbox": [ + 511, + 488, + 617, + 503 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We train the PromptHMR image model using 8 H100 GPUs, with a batch size of 96 (12 images on each GPU). We use AdamW with a learning rate of 1e-5 for the image encoder, a learning rate of 3e-5 for the prompt encoder, and the SMPL-X decoder, $\\beta_{1}$ of 0.9, $\\beta_{2}$ of 0.999, and a weight decay of 5e-5.", + "bbox": [ + 511, + 512, + 905, + 602 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The losses presented in Sec.3.5 are weighted differently. For $\\mathcal{L}_{2D},\\mathcal{L}_{3D},\\mathcal{L}_{\\mathrm{SMPL}},\\mathcal{L}_V$ and $\\mathcal{L}_{trans}$ , the weights are set to $\\{50.0,5.0,1.0,1.0,10.0\\}$ respectively.", + "bbox": [ + 511, + 603, + 905, + 648 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "PromptHMR-Vid We train the PromptHMR video model on 2 H100 GPUs with a batch size of 512 samples consisting of 120 frames each. We use AdamW with a learning rate of 2e-4 and a weight decay of 5e-5. We use the same losses as the image-based version in addition to binary cross-entropy loss for joint contact predictions.", + "bbox": [ + 511, + 669, + 906, + 760 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "8.4. Metric", + "text_level": 1, + "bbox": [ + 511, + 770, + 602, + 785 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this section, we provide more details on the evaluation metric used in Sec.4 of the main paper.", + "bbox": [ + 511, + 794, + 903, + 824 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Mean Per Joint Position Error (MPJPE) is calculated by aligning the 3D joints obtained from SMPL-X with the ground truth at the pelvis before computing the mean square error. For historical reasons, different datasets use a different set of joints. Additionally, the pelvis definition could", + "bbox": [ + 511, + 825, + 905, + 900 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/0ce1f2959db11ef87e142908954c88a9bbc66d4fd12da3a4358a967120790538.jpg", + "image_caption": [ + "Figure 9. Ablation of interaction module. When fine-tuning the image model on CHI3D, adding the interaction module improves two-person interaction reconstruction on HI4D, which demonstrates the out-of-domain generalization ability of interaction prompting. Fine-tuning on both CHI3D and HI4D further improves results." + ], + "image_footnote": [], + "bbox": [ + 96, + 80, + 877, + 705 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "be different. To evaluate methods that predict SMPL-X on the datasets with SMPL labels, it's customary to convert the SMPL-X vertices to SMPL vertices and use a joint regressor on the converted vertices to obtain the 3D joints comparable to the labels. Note that all the above choices could alter the results and sometimes produce large \"artificial\" improvements. So we strictly follow the most recent methods in the evaluation procedure. It's reported in the unit of mm.", + "bbox": [ + 89, + 771, + 486, + 893 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Per Vertex error (PVE) computes mean square error on the vertices after pelvis alignment. Compared to MPJPE, it measures the combined pose and shape error. It's reported in the unit of mm.", + "bbox": [ + 511, + 771, + 908, + 830 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Procrustes-aligned MPJPE (PA-MPJPE) performs general Procrustes alignment on the 3D joints before computing MPJPE. It measures purely the local articulated pose error. It's reported in the unit of mm.", + "bbox": [ + 511, + 839, + 908, + 902 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Paired PA-MPJPE (Pair-PA-MPJPE) aligns two people as a whole with the ground truth before computing MPJPE. In addition to per-person error, it also measures the error in the relative position and orientation of the two people. It's used in HI4D and CHI3D to evaluate interaction reconstruction. It's reported in the unit of mm.", + "bbox": [ + 89, + 90, + 480, + 180 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "World-aligned $\\mathbf{MPJPE}_{100}$ (WA-MPJPE $_{100}$ ) measures the world-grounded motion accuracy. It aligns a segment of 100 frames of predictions with the ground truth before computing MPJPE. It's reported in the unit of mm.", + "bbox": [ + 89, + 181, + 480, + 241 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "World $\\mathrm{MPJPE}_{100}$ ( $\\mathbf{W} - \\mathbf{MPJPE}_{100}$ ) is similar to WA-MPJPE but only aligns the first two frames of the 100-frame segment. Therefore, it provides a better measurement of the drifting in the direction and scale of the trajectories. It's reported in the unit of mm.", + "bbox": [ + 89, + 242, + 480, + 316 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Root Trajectory Error (RTE) measures the accuracy of the whole trajectory including the scale. It performs rigid alignment on the trajectory of the root and computes the mean square error. It's reported in the unit of $\\%$", + "bbox": [ + 89, + 316, + 480, + 377 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Motion Jitter (Jitter) uses finite difference to compute the jerk $(3^{rd}$ derivative) on the 3D joints. It measures rapid abrupt changes. It's reported in the unit of $10m / s^3$ .", + "bbox": [ + 89, + 378, + 480, + 422 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Foot Skating measures erroneous foot sliding. It thresholds the velocity of the ground truth foot vertices to compute contact frames, and calculates the displacement on the predicted foot vertices during contact. It's reported in the unit of $\\text{mm}$ .", + "bbox": [ + 89, + 422, + 480, + 497 + ], + "page_idx": 14 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_model.json b/data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..91c8787049e013beaad26c4a4d6f955fcde0634a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_model.json @@ -0,0 +1,2936 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.244, + 0.131, + 0.756, + 0.154 + ], + "angle": 0, + "content": "PromptHMR: Promptable Human Mesh Recovery" + }, + { + "type": "text", + "bbox": [ + 0.239, + 0.18, + 0.758, + 0.199 + ], + "angle": 0, + "content": "Yufu Wang\\(^{1,4}\\) Yu Sun\\(^{1}\\) Priyanka Patel\\(^{1}\\) Kostas Daniilidis\\(^{4,5}\\)" + }, + { + "type": "text", + "bbox": [ + 0.315, + 0.199, + 0.683, + 0.216 + ], + "angle": 0, + "content": "Michael J. Black1,2 Muhammed Kocabas1,2,3" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.216, + 0.885, + 0.234 + ], + "angle": 0, + "content": "\\(^{1}\\) Meshcapade \\(^{2}\\) MPI for Intelligent Systems \\(^{3}\\) ETH Zürich \\(^{4}\\) University of Pennsylvania \\(^{5}\\) Archimedes" + }, + { + "type": "text", + "bbox": [ + 0.277, + 0.235, + 0.716, + 0.253 + ], + "angle": 0, + "content": "https://yufu-wang.github.io/phmr-page" + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.268, + 0.339, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.175, + 0.387, + 0.325, + 0.401 + ], + "angle": 0, + "content": "image \\(\\downarrow\\) box prompts" + }, + { + "type": "image_caption", + "bbox": [ + 0.18, + 0.409, + 0.288, + 0.427 + ], + "angle": 0, + "content": "PromptHMR" + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.269, + 0.576, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.4, + 0.388, + 0.549, + 0.401 + ], + "angle": 0, + "content": "image ↓ box prompts" + }, + { + "type": "image_caption", + "bbox": [ + 0.407, + 0.409, + 0.514, + 0.425 + ], + "angle": 0, + "content": "PromptHMR" + }, + { + "type": "image", + "bbox": [ + 0.588, + 0.269, + 0.701, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.591, + 0.388, + 0.7, + 0.4 + ], + "angle": 0, + "content": "image ↓ masks" + }, + { + "type": "image_caption", + "bbox": [ + 0.594, + 0.409, + 0.7, + 0.427 + ], + "angle": 0, + "content": "PromptHMR" + }, + { + "type": "image", + "bbox": [ + 0.727, + 0.269, + 0.862, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.739, + 0.388, + 0.865, + 0.399 + ], + "angle": 0, + "content": "image box+text" + }, + { + "type": "image_caption", + "bbox": [ + 0.743, + 0.409, + 0.85, + 0.426 + ], + "angle": 0, + "content": "PromptHMR" + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.445, + 0.338, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.446, + 0.576, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.588, + 0.465, + 0.703, + 0.562 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.727, + 0.464, + 0.862, + 0.562 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.57, + 0.907, + 0.655 + ], + "angle": 0, + "content": "Figure 1. PromptHMR is a promptable human pose and shape (HPS) estimation method that processes images with spatial or semantic prompts. It takes \"side information\" readily available from vision-language models or user input to improve the accuracy and robustness of 3D HPS. PromptHMR recovers human pose and shape from spatial prompts such as (a) face bounding boxes, (b) partial or complete person detection boxes, or (c) segmentation masks. It refines its predictions using semantic prompts such as (c) person-person interaction labels for close contact scenarios, or (d) natural language descriptions of body shape to improve body shape predictions. Both image and video versions of PromptHMR achieve state-of-the-art accuracy." + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.681, + 0.327, + 0.697 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.486, + 0.901 + ], + "angle": 0, + "content": "Human pose and shape (HPS) estimation presents challenges in diverse scenarios such as crowded scenes, person-person interactions, and single-view reconstruction. Existing approaches lack mechanisms to incorporate auxiliary \"side information\" that could enhance reconstruction accuracy in such challenging scenarios. Furthermore, the most accurate methods rely on cropped person detections and cannot exploit scene context while methods that process the whole image often fail to detect people and are less accurate than methods that use crops. While recent language-based methods explore HPS reasoning through large language or vision-language models, their metric accuracy is well below" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.683, + 0.909, + 0.911 + ], + "angle": 0, + "content": "the state of the art. In contrast, we present PromptHMR, a transformer-based promptable method that reformulates HPS estimation through spatial and semantic prompts. Our method processes full images to maintain scene context and accepts multiple input modalities: spatial prompts like bounding boxes and masks, and semantic prompts like language descriptions or interaction labels. PromptHMR demonstrates robust performance across challenging scenarios: estimating people from bounding boxes as small as faces in crowded scenes, improving body shape estimation through language descriptions, modeling person-person interactions, and producing temporally coherent motions in videos. Experiments on benchmarks show that PromptHMR achieves state-of-the-art performance while offering flexible prompt-based control over the HPS estimation process." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.256, + 0.059, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.06397v2 [cs.CV] 24 May 2025" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.09, + 0.223, + 0.107 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.482, + 0.281 + ], + "angle": 0, + "content": "The estimation of 3D human pose and shape (HPS) is classically viewed as regressing the parameters of shape and pose from pixels. In particular, most methods take a tightly cropped image of a person and output the pose and shape in camera coordinates. While the accuracy of such methods has increased rapidly, they do not address the whole problem. In particular, an HPS method should be able to take an image or video containing complex human-human and human-scene interactions, return the parameters of every person in the scene, and place these people in a consistent global coordinate frame." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.282, + 0.482, + 0.402 + ], + "angle": 0, + "content": "Our key observation is that the classical \"pixels to parameters\" formulation of the problem is too narrow. Today, we have large vision-language foundation models (VLMs) that understand a great deal about images and what people are doing in them. What these models lack, however, is an understanding of 3D human pose and shape. Recent work [10, 16] has tried to bring together VLMs and 3D HPS but with 3D accuracy well below the best classical methods." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.403, + 0.483, + 0.674 + ], + "angle": 0, + "content": "Consequently, we need to think about the problem in a different way and ask whether we can exploit readily available side information (e.g. provided by a VLM) to improve 3D HPS regression robustness, usefulness, and accuracy. To that end, we develop a novel \"promptable\" HPS architecture called PromptHMR. Consider the sample images shown in Fig. 1. In crowded scenes, existing person detection methods struggle, while face detection methods remain reliable. When people closely interact, their body parts overlap and occlude each other, introducing ambiguity in pose estimation. Moreover, 3D body shape estimation from monocular views is challenging due to perspective ambiguity. In all these cases, we can extract cues, or prompts, that provide \"side information\" that can help an HPS method better analyze the scene. PromptHMR formalizes this intuition by combining image evidence with different types of spatial and semantic information that can come from either humans or AI systems such as VLMs." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.675, + 0.483, + 0.87 + ], + "angle": 0, + "content": "Specifically, our approach combines three key components: (1) a vision transformer that extracts features from high-resolution full images to preserve scene context, (2) a multi-modal prompt encoder that processes spatial and semantic inputs, and (3) a transformer decoder that attends to both prompt and image tokens to generate SMPL-X [47] body parameters. This design addresses the limitations of cropped-image HPS methods by processing full images using side information in the form of prompts. It addresses the challenges that full-image HPS methods have in detecting all people in a scene by accepting readily available bounding boxes. Last, our method incorporates auxiliary semantic information through text descriptions or interaction labels." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.871, + 0.483, + 0.902 + ], + "angle": 0, + "content": "By combining spatial and semantic prompting, our method offers a powerful and versatile approach to 3D HPS" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.302 + ], + "angle": 0, + "content": "estimation from the whole image. At test time, we show that this promptable structure (1) can take various bounding boxes or segmentation masks to recover full body HPS in a robust way, (2) improve its body shape predictions by using textual descriptions as input, (3) is capable of modeling person-person close interaction directly in the regression process, and (4) uses full image context to reconstruct people coherently in the camera space and the world space. Our model can handle video by incorporating temporal transformer layers at the SMPL-X decoding phase, yielding temporally stable and smooth motions. Last, following TRAM [67], we combine the temporal version of our model with metric SLAM to estimate human motion in world coordinates." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.303, + 0.905, + 0.514 + ], + "angle": 0, + "content": "We make several key design choices that make PromptHMR successful. To achieve robustness to different spatial inputs, we train our model by simulating noisy full-body and face-region bounding boxes. For improved body shape estimation, we leverage SHAPY [8] to generate automatic body shape descriptions for training samples and process them with a pretrained text encoder [50]. To enhance person-person interaction reconstruction, we use segmentation masks as more precise spatial prompts and develop person-person attention layers that operate between prompted people, producing coherent reconstructions of close interactions. Through random masking of different input types during training, our model learns to work with any combination of prompts at test time." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.515, + 0.905, + 0.62 + ], + "angle": 0, + "content": "Quantitative experiments on the EMDB [25], 3DPW [65], RICH [21], Hi4D [71], CHI3D [17] and HBW [8] benchmark datasets demonstrate that our method outperforms state-of-the-art (SOTA) approaches and strong baselines. We also provide many qualitative examples of in-the-wild images and videos that illustrate the robustness and generalization of PromptHMR." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.621, + 0.905, + 0.787 + ], + "angle": 0, + "content": "By moving away from the pure pixels-to-parameters approach, PromptHMR not only achieves a new SOTA, it shows a new way of improving both accuracy and robustness by leveraging side information that is easily available. One can think of this as a collaboration between VLMs, which know a lot about people in images but not in 3D, and a metric regressor that knows a lot about 3D humans but not about the semantics of what they do. We show that this combination has significant upside potential to increase both generality and accuracy. Our code and model are available for research purposes." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.8, + 0.655, + 0.815 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.826, + 0.905, + 0.903 + ], + "angle": 0, + "content": "Human pose and shape estimation from images. Existing methods for human pose and shape (HPS) estimation can be broadly categorized into two main approaches. The first [6, 18, 23, 29, 30, 32-34, 40, 66, 73] uses a tightly cropped image of an individual as input, and estimates" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.168 + ], + "angle": 0, + "content": "pose and shape in camera coordinates. While effective for isolated individuals, this approach discards scene context that is essential to resolve human pose in cases of occlusion, severe overlap and close interaction in multi-person scenes [17, 71]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.169, + 0.482, + 0.306 + ], + "angle": 0, + "content": "The second category [2, 22, 57, 59–61] build upon object detection frameworks [5, 13] to jointly detect humans and estimate their pose and shape parameters. Having access to the entire image, they can better perceive occluded individuals and infer depth relationships, but they often suffer from detection failures due to the difficulty in simultaneously performing detection and reconstruction. Our \"promptable\" architecture leverages detection box prompts to resolve such conflicts while having access to the entire scene context." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.307, + 0.482, + 0.413 + ], + "angle": 0, + "content": "Human pose and shape estimation from video. Methods for human motion estimation from video can also be divided into two main categories. The first [7, 24, 28, 38, 58] focuses on estimating smooth human motion in camera space. These methods build upon single-person HPS estimation approaches [23, 32] by adding temporal layers during the SMPL decoding phase to introduce temporal coherence." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.414, + 0.482, + 0.581 + ], + "angle": 0, + "content": "More recent methods estimate human motion in world coordinates from videos captured with dynamic cameras. These methods follow a two-stage approach, first estimating camera motion using SLAM techniques [19, 20, 41, 42, 62, 63], and then leveraging human motion priors to optimize the human world motion [31, 70, 72]. Others [53, 54] learn temporal models to directly regress human world motion from image and camera features. Still others [67, 74] use monocular metric depth estimation to solve for the scale of camera motion and transform human motion from camera space to world coordinates." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.582, + 0.482, + 0.673 + ], + "angle": 0, + "content": "In our approach, we extend PromptHMR to video by taking the SMPL-X output tokens and utilizing a temporal transformer module to estimate temporally stable and smooth human motion and translation in camera space. We follow TRAM [67] to transform human motion to world coordinates due to its simplicity and effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.675, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Semantic reasoning about 3D humans in images. Recent methods explore combining different types of semantic information, such as language descriptions and knowledge of person-person interactions, to improve reasoning about 3D humans from images and videos. For example, ChatPose [16] follows the common approach of visual language models (VLMs) [36] by fine-tuning a large language model (LLM) with a combination of images and tokens to estimate SMPL parameters. In a similar direction, PoseEmbroider [10] is a multi-modal framework that aligns image, 3D pose, and text representations in a shared latent space. While ChatPose focuses on combining high-level scene reasoning with 3D HPS, PoseEmbroider exploits detailed language descriptions of human pose. While promising, neither method achieves SOTA accuracy on the HPS task. Note" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.137 + ], + "angle": 0, + "content": "that many other methods relate language to human pose or motion, without considering images [1, 9, 37, 48, 64], but these are outside our scope." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.137, + 0.905, + 0.318 + ], + "angle": 0, + "content": "Additionally, several methods [8, 49, 55] focus on modeling the relationship between SMPL body shape and natural language descriptions. These methods show that language descriptions and images can provide complementary information to solve this task. Other approaches, such as BUDDI [43] and ProsePose [56], address the challenge of estimating person-person interactions. BUDDI is an optimization-based approach that leverages diffusion model as a prior over interacting people, while ProsePose queries a VLM to estimate contact points on the human body surface and uses these contact points to guide an optimization process for improving human interaction." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.318, + 0.905, + 0.47 + ], + "angle": 0, + "content": "Overall, methods like ChatPose [16] and PoseEmbroider [10] are promising steps toward jointly learning the relationship between vision, language, and 3D humans, but their understanding of 3D humans remains limited, as indicated by their relatively low 3D pose accuracy. Meanwhile, SHAPY [8], BodyShapeGPT [49], and BodyTalk [55] focus solely on exploring the relationship between SMPL body shape and natural language. BUDDI and ProsePose are post-processing approaches for interaction that do not directly reason using image information." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.47, + 0.905, + 0.545 + ], + "angle": 0, + "content": "Our approach addresses the limitations of these methods by training a single model capable of flexible prompting that achieves state-of-the-art (SOTA) performance, not only on standard HPS benchmarks but also on benchmarks tailored to body shape and person-person interaction." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.557, + 0.605, + 0.571 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.582, + 0.905, + 0.643 + ], + "angle": 0, + "content": "Given an image \\(I\\) containing \\(N\\) people and a set of prompts, our main goal is to recover the pose, shape, and locations of the people in the camera space to form a coherent human-centric 3D scene. Figure 2 shows an overview." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.65, + 0.768, + 0.667 + ], + "angle": 0, + "content": "3.1. Promptable mesh regression" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.673, + 0.905, + 0.764 + ], + "angle": 0, + "content": "We adopt SMPL-X [47] to represent each person \\(i\\) in the 3D space, including the orientation \\(\\phi_i \\in \\mathbb{R}^3\\), local body pose \\(\\theta_i \\in \\mathbb{R}^{22 \\times 3}\\), shape \\(\\beta_i \\in \\mathbb{R}^{10}\\), and translation \\(\\tau_i \\in \\mathbb{R}^3\\) in the camera space. We do not include face and hand parameters in this work. Each human \\(H_i\\) is mapped to a 3D mesh with the differentiable SMPL-X layer." + }, + { + "type": "equation", + "bbox": [ + 0.638, + 0.771, + 0.905, + 0.789 + ], + "angle": 0, + "content": "\\[\nH _ {i} = \\left\\{\\phi_ {i}, \\theta_ {i}, \\beta_ {i}, \\tau_ {i} \\right\\}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.796, + 0.907, + 0.902 + ], + "angle": 0, + "content": "Each person can be prompted with spatial and semantic prompts. Spatial prompts include a bounding box \\( b_{i} \\in \\mathbb{R}^{2 \\times 2} \\) (the two corners) and a segmentation mask \\( m_{i} \\in \\mathbb{R}^{h \\times w} \\). Semantic prompts consist of text and two-person interaction labels. The text prompt is the CLIP embedding \\( t_{i} \\) of a sentence describing the body shape. The interaction prompt is a binary variable \\( k_{i} \\) indicating whether two" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.092, + 0.9, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.243, + 0.907, + 0.327 + ], + "angle": 0, + "content": "Figure 2. Method overview. PromptHMR estimates SMPL-X parameters for each person in an image based on various types of prompts, such as boxes, language descriptions, and person-person interaction cues. Given an image and prompts, we utilize a vision transformer to generate image embeddings and mask and prompt encoders to map different types of prompts to tokens. Optionally, camera intrinsics can be embedded along with the image embeddings. The image embeddings and prompt tokens are then fed to the SMPL-X decoder. The SMPL-X decoder is a transformer-based module that attends to both the image and prompt tokens to estimate SMPL-X parameters. Note that the language and interaction prompts are optional, but providing them enhances the accuracy of the estimated SMPL-X parameters." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.346, + 0.484, + 0.406 + ], + "angle": 0, + "content": "people are in close contact. While semantic prompts are optional, each human needs at least one spatial prompt to be reconstructed. Overall, the input prompts are represented as \\( P_{i} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.218, + 0.412, + 0.482, + 0.435 + ], + "angle": 0, + "content": "\\[\nP _ {i} \\subseteq \\left\\{b _ {i}, m _ {i}, t _ {i}, k _ {i} \\right\\} \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.227, + 0.433, + 0.356, + 0.446 + ], + "angle": 0, + "content": "\\[\nb _ {i} \\in P _ {i} \\text {o r} m _ {i} \\in P _ {i}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.452, + 0.484, + 0.497 + ], + "angle": 0, + "content": "Promptable human mesh recovery (PromptHMR) is defined as a learnable function that maps an image and a set of prompts to a set of 3D humans" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.502, + 0.482, + 0.521 + ], + "angle": 0, + "content": "\\[\nf: \\left(I, \\left\\{P _ {i} \\right\\} _ {i = 1} ^ {N}\\right)\\rightarrow \\left\\{H _ {i} \\right\\} _ {i = 1} ^ {N}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.526, + 0.484, + 0.557 + ], + "angle": 0, + "content": "This task definition integrates all available contexts to locate and reconstruct prompted humans in the image." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.563, + 0.178, + 0.578 + ], + "angle": 0, + "content": "3.2. Model" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.587, + 0.484, + 0.617 + ], + "angle": 0, + "content": "Image encoder. The image is first encoded as tokens by a vision transformer (ViT) encoder from DINOv2 [12, 44]:" + }, + { + "type": "equation", + "bbox": [ + 0.225, + 0.623, + 0.482, + 0.64 + ], + "angle": 0, + "content": "\\[\nF = \\operatorname {E n c o d e r} (I), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.646, + 0.484, + 0.736 + ], + "angle": 0, + "content": "To ensure sufficient resolution for modeling humans at both near and far distances, we use \\(896 \\times 896\\) images. The encoder is run once per frame regardless of the number of people prompted. When camera intrinsics are provided, we add positional encoding of the camera rays to the image tokens to make them camera-aware [2, 15]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.737, + 0.484, + 0.811 + ], + "angle": 0, + "content": "Mask encoder. When available, masks are first processed by an encoder consisting of stripped convolutional layers that downsample the masks. The output mask features are added to the image tokens. If no mask is provided, a learned \"no mask\" token is added instead." + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.818, + 0.482, + 0.835 + ], + "angle": 0, + "content": "\\[\nF _ {i} = \\operatorname {E n c o d e r} _ {\\mathrm {m}} \\left(m _ {i}\\right) + F. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Prompt encoder. The prompt encoder consists of a set of transformations that map different types of prompts to token vectors of the same dimension. When a prompt is not available, it is replaced with a learned null token." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.346, + 0.907, + 0.497 + ], + "angle": 0, + "content": "For bounding boxes, we encode \\( b_{i} \\) using positional encoding summed with learned embeddings to form the box prompt tokens \\( T_{bi} = \\mathrm{PE}(b_{i}) \\), with \\( T_{bi} \\in \\mathbb{R}^{2 \\times d} \\). We design different box transformations during training to allow the model to use different boxes as a human identifier. In the training phase, each instance is prompted with either a whole-body bounding box, a face bounding box, or a truncated box covering part of the body. Gaussian noise is added to both corners. At inference time, the model accepts boxes without needing to know the box types." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.497, + 0.906, + 0.633 + ], + "angle": 0, + "content": "Language is a natural way to supply semantic information, and in this paper, we use language to supplement spatial prompts with information on body shape. A sentence such as \"a muscular and tall male\" is encoded with the CLIP text encoder \\( T_{ti} = \\mathrm{CLIP}(t_i) \\), with \\( T_{ti} \\in \\mathbb{R}^{d} \\). To generate paired (image, text) data, we run SHAPY's [8] shape-to-attribute method on the ground truth shape parameters to obtain shape attribute scores and randomly pick a subset of top attributes to form a sentence." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.633, + 0.906, + 0.678 + ], + "angle": 0, + "content": "The interaction prompt \\( k_{i} \\) passes through the prompt encoder without modification and directly switches on-off the cross-person attention that is described in Sec. 3.3." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.678, + 0.906, + 0.723 + ], + "angle": 0, + "content": "SMPL-X decoder. The SMPL-X decoder appends two query tokens \\( T_{\\mathrm{spl}}, T_{\\mathrm{depth}} \\) with the prompt tokens \\( T_{bi}, T_{ti} \\) to form the person-specific prompt \\( T_i \\in \\mathbb{R}^{5 \\times d} \\)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.724, + 0.906, + 0.754 + ], + "angle": 0, + "content": "Finally, we use a standard transformer decoder and two MLP heads to produce the final output" + }, + { + "type": "equation", + "bbox": [ + 0.586, + 0.76, + 0.813, + 0.78 + ], + "angle": 0, + "content": "\\[\nT _ {s m p l} ^ {\\prime}, T _ {d e p t h} ^ {\\prime} = \\mathrm {D e c o d e r} (F _ {i}, T _ {i})\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.782, + 0.905, + 0.799 + ], + "angle": 0, + "content": "\\[\n\\phi_ {i}, \\theta_ {i}, \\beta_ {i} = \\operatorname {H e a d} _ {s m p l} \\left(T _ {s m p l} ^ {\\prime}\\right) \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.664, + 0.802, + 0.831, + 0.82 + ], + "angle": 0, + "content": "\\[\n\\tau_ {i} = \\operatorname {H e a d} _ {d e p t h} (T _ {d e p t h} ^ {\\prime}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.826, + 0.906, + 0.901 + ], + "angle": 0, + "content": "The transformer consists of three attention blocks. Each block applies self-attention on the tokens, cross-person attention (described in Sec. 3.3), and then two-way cross-attention between the tokens and the image embeddings [27]. The self-attention and cross-attention with the" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.152 + ], + "angle": 0, + "content": "image are applied to each prompted person independently. We use separate tokens \\( T_{\\mathrm{spl}} \\) and \\( T_{\\mathrm{depth}} \\) to make the location representation invariant to the 3D human pose and shape representation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.153, + 0.483, + 0.243 + ], + "angle": 0, + "content": "Regressing the location of the human in the camera space is much more challenging than most prior work that models humans in a cropped image space. Therefore, we do not regress \\(\\tau\\) directly. We regress focal length normalized 2D translation \\(p_{xy} \\in \\mathbb{R}^2\\) and inverse depth \\(p_z \\in \\mathbb{R}\\), and then transform them to \\(\\tau\\) as follows" + }, + { + "type": "equation", + "bbox": [ + 0.142, + 0.25, + 0.483, + 0.283 + ], + "angle": 0, + "content": "\\[\nt _ {x y} = \\frac {p _ {x y}}{p _ {z}} \\quad t _ {z} = \\frac {1}{p _ {z}} \\times \\frac {f}{f _ {c}} \\quad \\tau = [ t _ {x y}, t z ], \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.292, + 0.484, + 0.399 + ], + "angle": 0, + "content": "where \\( f \\) is the ground truth or estimated focal length of the image, and \\( f_{c} \\) is the canonical focal length. Predicting the normalized inverse depth follows the recent monocular depth literature [51] and is also intuitive since the inverse depth is linearly related to the size of the human in the image. Predicting \\( p_{xy} \\) is equivalent to predicting the 2D location of the human in a normalized image plane." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.407, + 0.307, + 0.423 + ], + "angle": 0, + "content": "3.3. Two-person interaction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.43, + 0.483, + 0.492 + ], + "angle": 0, + "content": "We introduce promptable layers in the decoder to model two-person interaction. We describe the case where there are two people in the image, but the implementation can extend to model an interacting pair in a larger group." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.491, + 0.483, + 0.567 + ], + "angle": 0, + "content": "The promptability is modeled as a flow control with a residual connection (Fig. 3). Specifically, if two humans are interacting (as indicated by \\( k_{i} \\)), their query tokens pass through an additional self-attention layer; otherwise, non-interacting humans skip this." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.567, + 0.483, + 0.672 + ], + "angle": 0, + "content": "Applying attention to every person often creates unnecessary dependency in crowded scenes, and there is limited training data for large-group scenarios. However, there is high-quality data featuring two-person social interactions. By making the interaction layers promptable, we mitigate data diversity issues and increase flexibility, regardless of the number of people in the scene." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.672, + 0.483, + 0.793 + ], + "angle": 0, + "content": "Our proposed interaction layer uses a standard self-attention mechanism. First, we add positional encodings to the query tokens to distinguish the two individuals. The encoded tokens then go through a self-attention layer, whose output is combined with the original tokens via a residual connection. Our experiments demonstrate that including these interaction layers significantly improves inter-person pose accuracy in two-person interaction benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.802, + 0.336, + 0.818 + ], + "angle": 0, + "content": "3.4. PromptHMR video version" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.483, + 0.903 + ], + "angle": 0, + "content": "In addition to the single-image variant of PromptHMR, we train an extended version that processes videos to estimate human motion in world coordinates. To achieve this, we introduce a simple and efficient temporal transformer module. Given a monocular video sequence \\(\\{I^t\\}_{t=0}^T\\), we first run" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.091, + 0.9, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.208, + 0.906, + 0.251 + ], + "angle": 0, + "content": "Figure 3. SMPL-X decoder. The top row shows one attention block in the decoder. The cross-person interaction module can be turned on/off. The bottom row shows the cross-person attention." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.27, + 0.906, + 0.407 + ], + "angle": 0, + "content": "PromptHMR to obtain per-subject SMPL-X decoder output tokens \\( T_{\\mathrm{mpl}}^{\\prime} \\) and \\( T_{\\mathrm{depth}}^{\\prime} \\), assuming that the subject identities are provided with the prompts. These tokens, along with the positional encoding of time \\( t \\), are fed to a decoder-only temporal transformer module with twelve attention blocks. The output tokens are converted to SMPL-X parameters \\( \\phi_t, \\theta_t, \\beta_t \\), translation \\( \\tau_t \\), and joint contact probabilities \\( c_t \\). The contact probabilities indicate whether a given joint is in contact with the ground plane similar to [52-54]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.408, + 0.906, + 0.56 + ], + "angle": 0, + "content": "To obtain results in world coordinates, we adopt the approach from TRAM [67]. Specifically, we use DROID-SLAM [62] and a monocular metric depth estimation model, ZoeDepth [3], to estimate camera motion in metric world coordinates. The translation parameters \\(\\tau_{t}\\) are then transformed to world coordinates using the estimated camera motion. To refine the human trajectory and mitigate foot-skating artifacts, we leverage the estimated contact probabilities and run a fast postprocessing that optimizes the contact joints to have zero velocity." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.575, + 0.602, + 0.589 + ], + "angle": 0, + "content": "3.5. Losses" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.6, + 0.906, + 0.631 + ], + "angle": 0, + "content": "PromptHMR is trained with a combination of 2D and 3D losses, following traditional HMR methods [23, 32]:" + }, + { + "type": "equation", + "bbox": [ + 0.535, + 0.646, + 0.884, + 0.663 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\lambda_ {1} \\mathcal {L} _ {2 D} + \\lambda_ {2} \\mathcal {L} _ {3 D} + \\lambda_ {3} \\mathcal {L} _ {\\mathrm {S M P L}} + \\lambda_ {4} \\mathcal {L} _ {V} + \\lambda_ {5} \\mathcal {L} _ {t}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.678, + 0.704, + 0.692 + ], + "angle": 0, + "content": "with each term calculated as" + }, + { + "type": "equation", + "bbox": [ + 0.597, + 0.708, + 0.78, + 0.726 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {2 D} = \\left\\| \\hat {\\mathcal {J}} _ {2 D} - \\Pi \\left(\\mathcal {J} _ {3 D}\\right) \\right\\| _ {F} ^ {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.597, + 0.729, + 0.755, + 0.747 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {3 D} = \\left\\| \\hat {\\mathcal {J}} _ {3 D} - \\mathcal {J} _ {3 D} \\right\\| _ {F} ^ {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.578, + 0.751, + 0.72, + 0.769 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {S M P L}} = | | \\hat {\\Theta} - \\Theta | | _ {2} ^ {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.603, + 0.773, + 0.722, + 0.79 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {V} = \\left| \\left| \\hat {V} - V \\right| \\right| _ {F} ^ {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.607, + 0.793, + 0.844, + 0.811 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {t} = \\left| \\left| \\hat {p} _ {x y} - p _ {x y} \\right| \\right| _ {F} ^ {2} + \\left| \\left| \\hat {p} _ {z} - p _ {z} \\right| \\right| _ {F} ^ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.825, + 0.906, + 0.903 + ], + "angle": 0, + "content": "where \\(\\mathcal{J}_{3D}\\) and \\(V\\) are the 3D joints and vertices of the SMPL-X model, with the hat operator denoting the ground truth. \\(\\Pi\\) is the camera reprojection operator. Additionally, on datasets with ground truth translation labels, we supervise the normalized translation \\(p_{xy}\\) and inverse depth \\(p_z\\)." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.091, + 0.09, + 0.224, + 0.108 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.116, + 0.484, + 0.252 + ], + "angle": 0, + "content": "Datasets. We train PromptHMR with standard datasets: BEDLAM [4], AGORA [46], 3DPW [65], COCO [35], and MPII [39]. Following 4DHumans, we add AIC [68] and InstaVariety [24] as in-the-wild data, with pseudoground truth from CamSMPLify [45]. Additionally, we add CHI3D [17] and HI4D [71] to enable learning two-person interaction following the train-test splits from BUDDI [43]. Including CHI3D and HI4D does not improve performance on other benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.254, + 0.484, + 0.33 + ], + "angle": 0, + "content": "Implementation. We train PromptHMR with AdamW with a batch size of 96 images of resolution \\(896 \\times 896\\). We use a learning rate of \\(1e^{-5}\\) for the image encoder and \\(3e^{-5}\\) for the prompt encoder and the SMPL-X decoder, with a weight decay of \\(5e^{-5}\\). The training converges within 350K steps." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.331, + 0.484, + 0.421 + ], + "angle": 0, + "content": "Evaluation. We evaluate camera space reconstruction accuracy on 3DPW [65], EMDB [25] and RICH [21], using MPJPE, Procrustes-aligned MPJPE (PA-MPJPE) and Per Vertex Error (PVE) [23]. We evaluate inter-person accuracy on HI4D and CHI3D by Pair-PA-MPJPE, which aligns the two people as a whole with the ground truth [43]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.423, + 0.484, + 0.484 + ], + "angle": 0, + "content": "To evaluate world-grounded motion on EMDB with PromptHMR video (PromptHMR-vid), we compute World-aligned MPJPE \\((\\mathrm{WA - MPJPE}_{100})\\), World MPJPE \\((\\mathrm{W - MPJPE}_{100})\\) and Root Translation Error (RTE in %) [54, 70]." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.497, + 0.318, + 0.513 + ], + "angle": 0, + "content": "4.1. Reconstruction accuracy" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.521, + 0.484, + 0.657 + ], + "angle": 0, + "content": "For camera space reconstruction, as shown in Table 1, PromptHMR and PromptHMR-Vid demonstrate state-of-the-art performance, matching crop-based methods while achieving better results than other full-image methods. PromptHMR and CameraHMR use the same training data and have similar performance, which validates that this prompt-based approach can achieve metrically accurate results. For representative results, see Fig. 7, where PromptHMR recovers coherent 3D scenes of people." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.659, + 0.484, + 0.764 + ], + "angle": 0, + "content": "For interaction reconstruction, PromptHMR achieves good accuracy as indicated in Table 2. Compared to BUDDI which is also trained on CHI3D and HI4D, our method achieves better overall accuracy on per-person and interperson metrics. We show qualitative results in Fig. 8. As a monocular regression method, PromptHMR still cannot avoid interpenetration between closely interacting people." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.765, + 0.484, + 0.901 + ], + "angle": 0, + "content": "PromptHMR-Vid achieves SOTA performance among methods that estimate human motion in world coordinates, as shown in Table 4. Unlike TRAM, we estimate the joint contact probabilities similar to [53, 54]. Therefore, we achieve lower foot skating than TRAM, even though we use the same metric SLAM method to transform motion in camera space to world coordinates. Please refer to our supplementary material (SupMat) for qualitative results of PromptHMR-Vid." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.089, + 0.901, + 0.164 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.168, + 0.906, + 0.196 + ], + "angle": 0, + "content": "Figure 4. Effect of box prompts. Our method remains stable with different boxes, including noisy truncated boxes." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.205, + 0.901, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.291, + 0.906, + 0.334 + ], + "angle": 0, + "content": "Figure 5. Effect of mask prompts. Results are from the same model with different prompt inputs. Masks are better for close interaction scenarios where boxes are ambiguous." + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.337, + 0.9, + 0.597 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.603, + 0.906, + 0.659 + ], + "angle": 0, + "content": "Figure 6. Effect of shape prompts. Compared to the baseline that does not incorporate shape description during training and testing, the model with shape prompts has better accuracy on HBW, especially in ambiguous images." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.679, + 0.779, + 0.696 + ], + "angle": 0, + "content": "4.2. Effect of multimodal prompts" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.703, + 0.906, + 0.764 + ], + "angle": 0, + "content": "We conduct qualitative and quantitative evaluations of the multimodal prompts. For efficient ablation, we train models with \\(448 \\times 448\\) input resolution and select the best model within 150K steps of training." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.765, + 0.906, + 0.869 + ], + "angle": 0, + "content": "For box prompts, as shown in rows 3-4 of Fig. 7, our method is able to take a combination of different boxes from in-the-wild images to reconstruct crowded scenes. Figure 4 also shows an example with varying box inputs. PromptHMR remains stable when the boxes change and uses full image context to reconstruct the human even when the boxes are truncated." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.906, + 0.902 + ], + "angle": 0, + "content": "The mask prompt is more effective than boxes when people closely overlap (Fig. 5), as boxes are ambiguous in such" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.155, + 0.091, + 0.842, + 0.317 + ], + "angle": 0, + "content": "
Models3DPW (14)EMDB (24)RICH (24)
PA-MPJPEMPJPEPVEPA-MPJPEMPJPEPVEPA-MPJPEMPJPEPVE
cropped imageCLIFF* [33]43.069.081.268.3103.3123.768.1103.3128.0
HMR2.0a [18]44.469.882.261.597.8120.060.798.3120.8
TokenHMR [14]44.371.084.655.691.7109.4---
CameraHMR [45]35.156.065.943.370.281.734.055.764.4
full imageBEV [60]46.978.592.370.9112.2133.4---
Multi-HMR* [2]45.973.187.150.181.695.746.373.883.0
PromptHMR*36.658.769.441.071.784.537.356.665.5
videoWHAM [54]37.559.871.552.081.696.944.380.091.2
TRAM [67]35.659.369.645.774.486.6---
GVHMR [53]37.056.668.744.574.285.939.566.074.4
PromptHMR-Vid35.556.967.340.168.179.237.057.465.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.324, + 0.907, + 0.368 + ], + "angle": 0, + "content": "Table 1. Comparison of mesh reconstruction on the 3DPW, EMDB and RICH datasets, with the number of joints in parenthesis. \\(\\star\\) denotes methods that use ground truth focal length during inference. Note that we remove the test-time flip augmentation from all of the video methods to ensure a fair comparison. All metrics are in mm." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.381, + 0.478, + 0.482 + ], + "angle": 0, + "content": "
ModelsHI4D (14)CHI3D (14)
PA-MPJPEMPJPEPair-PA-MPJPEPA-MPJPEMPJPEPair-PA-MPJPE
BEV* [60]81-13651-96
BUDDI [43]73-9847-68
Multi-HMR* [2]49.867.880.631.754.0100.0
PromptHMR*39.263.978.127.248.058.5
PromptHMR30.139.639.524.746.545.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.487, + 0.483, + 0.557 + ], + "angle": 0, + "content": "Table 2. Comparison on interaction reconstruction. PromptHMR is more accurate in per-person and inter-person accuracy. * denote a method or baseline is not trained on HI4D or CHI3D. All metrics are in mm. The impact of HI4D and the interaction prompt are evaluated in Table 5." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.568, + 0.476, + 0.675 + ], + "angle": 0, + "content": "
Train w/ textTest w/ textHBW
HeightChestWaistHipP2P-20k
××6951886326
×6948866026
6243765824
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.683, + 0.483, + 0.741 + ], + "angle": 0, + "content": "Table 3. Ablation of shape prompts using text. Training with shape prompts improves shape accuracy. Using shape prompts during inference further improves shape accuracy. The ablation study is conducted with a \\(448 \\times 448\\) model. Errors are in mm." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.764, + 0.483, + 0.794 + ], + "angle": 0, + "content": "cases. Ablation of HI4D (rows 1-2 in Tab. 5) shows that using masks as the spatial prompt improves accuracy." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.796, + 0.483, + 0.903 + ], + "angle": 0, + "content": "Experiments on the HBW validation set (Tab. 3) show that text prompts effectively improve shape accuracy when used during both training and testing. Moreover, training with shape descriptions alone provides an accuracy boost even if prompts are not given at test time. As illustrated in Fig. 6, text prompts provide notable improvements, especially when large perspective effects create ambiguity." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.381, + 0.899, + 0.468 + ], + "angle": 0, + "content": "
ModelsEMDB-2 (24)
WA-MPJPE100W-MPJPE100RTEJitterFoot Skating
WHAM [54]135.6354.86.022.54.4
TRAM [67]76.4222.41.418.523.4
GVHMR [53]111.0276.52.016.73.5
PromptHMR-Vid71.0216.51.316.33.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.475, + 0.907, + 0.53 + ], + "angle": 0, + "content": "Table 4. Evaluation of motion in world coordinates. PromptHMR-Vid combined with metric SLAM from TRAM [67] surpasses SOTA methods at predicting human motion in world coordinates." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.55, + 0.905, + 0.676 + ], + "angle": 0, + "content": "
Trained withHI4D (14)
MaskInteractionHI4DPA-MPJPEMPJPEPair-PA-MPJPE
×××47.071.487.2
××43.460.583.0
××43.761.373.0
××36.349.452.6
36.547.147.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.512, + 0.684, + 0.907, + 0.753 + ], + "angle": 0, + "content": "Table 5. Ablation on interaction prompt. The interaction module improves inter-person reconstruction metrics Pair-PA-MPJPE on HI4D, especially when the method does not include HI4D in training. Ablation is conducted with a \\(448 \\times 448\\) model. All metrics are in mm." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.909, + 0.903 + ], + "angle": 0, + "content": "For interaction prompts, we show an ablation in Table 5. The proposed interaction module is beneficial and largely improves inter-person accuracy on HI4D even without HI4D training, indicating out-off-domain generalization. When trained on HI4D, the interaction module does not improve per-person PA-MPJPE but still improves interperson Pair-PA-MPJPE. Please refer to our SupMat for more qualitative results on interaction prompts." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.09, + 0.898, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.443, + 0.908, + 0.473 + ], + "angle": 0, + "content": "Figure 7. Qualitative comparison: Multi-HMR vs PromptHMR. Our model can recover coherent 3D scenes of people. In crowded scenes, face detection provides reliable box prompts for our model. Please zoom in to see the details." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.486, + 0.881, + 0.677 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.681, + 0.908, + 0.71 + ], + "angle": 0, + "content": "Figure 8. Qualitative results. PromptHMR recovers coherent two-person close interaction. Despite suffering from some interpenetration, the relative positions of the interacting people are accurately recovered. More examples are provided in the Supplementary." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.727, + 0.214, + 0.743 + ], + "angle": 0, + "content": "5. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.484, + 0.903 + ], + "angle": 0, + "content": "We see PromptHMR as a step towards a holistic perception model for 3D humans, but several limitations need to be addressed in future work. Currently, the shape description and interaction prompts are not automatically generated and need to be supplied by the user. Future work should explore how to effectively integrate our promptable model with VLMs to automate prompting. We show how semantic prompts can improve reconstruction accuracy, but many other potential types of side information such as action descriptions, 3D scene context, or body measurements may" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.729, + 0.838, + 0.743 + ], + "angle": 0, + "content": "provide additional benefits in different scenarios." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.755, + 0.634, + 0.771 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.907, + 0.903 + ], + "angle": 0, + "content": "We have presented PromptHMR, a promptable HPS estimation approach that leverages full image context with spatial and semantic prompts to infer 3D humans in the scene. Our method demonstrates state-of-the-art accuracy across diverse benchmarks and generalizes well in the wild. Our experiments show that incorporating diverse input information through flexible prompting enables robustness and adaptability in challenging scenarios." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.181 + ], + "angle": 0, + "content": "Acknowledgement. The authors would like to thank Yan Zhang, Yao Feng, and Nitin Saini for their suggestions. The majority of the work was done when Yufu was an intern at Meshcapade. Yufu and Kostas thank the support of NSF NCS-FO 2124355, NSF FRR 2220868, and NSF IISRI 2212433." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.182, + 0.482, + 0.243 + ], + "angle": 0, + "content": "Disclosure. While MJB is a co-founder and Chief Scientist at Meshcapade, his research in this project was performed solely at, and funded solely by, the Max Planck Society." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.256, + 0.188, + 0.272 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.281, + 0.483, + 0.322 + ], + "angle": 0, + "content": "[1] Nikos Athanasiou, Alpar Ceske, Markos Diomataris, Michael J. Black, and Gül Varol. MotionFix: Text-driven 3D human motion editing. In SIGGRAPH Asia, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.1, + 0.324, + 0.484, + 0.392 + ], + "angle": 0, + "content": "[2] Fabien Baradel, Matthieu Armando, Salma Galaoui, Romain Brégier, Philippe Weinzaepfel, Grégory Rogez, and Thomas Lucas. Multi-HMR: Multi-person whole-body human mesh recovery in a single shot. European Conference on Computer Vision, 2024. 3, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.394, + 0.484, + 0.449 + ], + "angle": 0, + "content": "[3] Shariq Farooq Bhat, Reiner Birkl, Diana Wofk, Peter Wonka, and Matthias Müller. ZoeDepth: Zero-shot transfer by combining relative and metric depth. arXiv preprint arXiv:2302.12288, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.451, + 0.483, + 0.52 + ], + "angle": 0, + "content": "[4] Michael J Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. BEDLAM: A synthetic dataset of bodies exhibiting detailed lifelike animated motion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8726-8737, 2023. 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.522, + 0.483, + 0.577 + ], + "angle": 0, + "content": "[5] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.579, + 0.482, + 0.646 + ], + "angle": 0, + "content": "[6] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2Mesh: Graph convolutional network for 3D human pose and mesh recovery from a 2D human pose. In European Conference on Computer Vision, pages 769-787. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.649, + 0.483, + 0.717 + ], + "angle": 0, + "content": "[7] Hongsuk Choi, Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. Beyond static features for temporally consistent 3D human pose and shape from a video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1964-1973, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.719, + 0.483, + 0.801 + ], + "angle": 0, + "content": "[8] Vasileios Choutas, Lea Müller, Chun-Hao P. Huang, Siyu Tang, Dimitrios Tzionas, and Michael J. Black. Accurate 3D body shape regression using metric and semantic attributes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2718-2728, 2022. 2, 3, 4, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.804, + 0.483, + 0.872 + ], + "angle": 0, + "content": "[9] Ginger Delmas, Philippe Weinzaepfel, Francesc Moreno-Noguer, and Grégory Rogez. PoseFix: correcting 3D human poses with natural language. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15018-15028, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.874, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[10] Ginger Delmas, Philippe Weinzaepfel, Francesc Moreno-Noguer, and Grégory Rogez. Posembroider: Towards a" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.281, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.12 + ], + "angle": 0, + "content": "3D, visual, semantic-aware human pose representation. In European Conference on Computer Vision, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.122, + 0.905, + 0.203 + ], + "angle": 0, + "content": "[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.205, + 0.905, + 0.286 + ], + "angle": 0, + "content": "[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.288, + 0.905, + 0.355 + ], + "angle": 0, + "content": "[13] Kaiwen Duan, Song Bai, Lingxi Xie, Honggang Qi, Qingming Huang, and Qi Tian. Centernet: Keypoint triplets for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6569-6578, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.357, + 0.905, + 0.426 + ], + "angle": 0, + "content": "[14] Sai Kumar Dwivedi, Yu Sun, Priyanka Patel, Yao Feng, and Michael J Black. TokenHMR: Advancing human mesh recovery with a tokenized pose representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1323-1333, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.428, + 0.905, + 0.509 + ], + "angle": 0, + "content": "[15] Jose M. Facil, Benjamin Ummenhofer, Huizhong Zhou, Luis Montesano, Thomas Brox, and Javier Civera. CAM-Convs: Camera-aware multi-scale convolutions for single-view depth. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, page 11818-11827, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.511, + 0.905, + 0.566 + ], + "angle": 0, + "content": "[16] Yao Feng, Jing Lin, Sai Kumar Dwivedi, Yu Sun, Priyanka Patel, and Michael J. Black. ChatPose: Chatting about 3D human pose. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.567, + 0.905, + 0.622 + ], + "angle": 0, + "content": "[17] Mihai Fieraru, Mihai Zanfir, Elisabeta Oneata, Alin-Ionut Popa, Vlad Olaru, and Cristian Sminchisescu. Reconstructing three-dimensional models of interacting humans. arXiv preprint arXiv:2308.01854, 2023. 2, 3, 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.624, + 0.905, + 0.691 + ], + "angle": 0, + "content": "[18] Shubham Goel, Georgios Pavlakos, Jathushan Rajasegaran, Angjoo Kanazawa, and Jitendra Malik. Reconstructing and tracking humans with transformers. Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.693, + 0.905, + 0.748 + ], + "angle": 0, + "content": "[19] Dorian F Henning, Tristan Laidlow, and Stefan Leutenegger. BodySLAM: joint camera localisation, mapping, and human motion tracking. In European Conference on Computer Vision, pages 656-673. Springer, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.749, + 0.905, + 0.817 + ], + "angle": 0, + "content": "[20] Dorian F Henning, Christopher Choi, Simon Schaefer, and Stefan Leutenegger. BodySLAM++: Fast and tightly-coupled visual-inertial camera and human motion tracking. In IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 3781-3788. IEEE, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.818, + 0.907, + 0.901 + ], + "angle": 0, + "content": "[21] Chun-Hao P Huang, Hongwei Yi, Markus Höschle, Matvey Safroshkin, Tsvetelina Alexiadis, Senya Polikovsky, Daniel Scharstein, and Michael J Black. Capturing and inferring dense full-body human-scene contact. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13274-13285, 2022. 2, 6" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.486, + 0.161 + ], + "angle": 0, + "content": "[22] Wen Jiang, Nikos Kolotouros, Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Coherent reconstruction of multiple humans from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5579-5588, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.164, + 0.485, + 0.233 + ], + "angle": 0, + "content": "[23] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7122-7131, 2018. 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.235, + 0.484, + 0.303 + ], + "angle": 0, + "content": "[24] Angjoo Kanazawa, Jason Y Zhang, Panna Felsen, and Jitendra Malik. Learning 3D human dynamics from video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5614-5623, 2019. 3, 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.305, + 0.484, + 0.389 + ], + "angle": 0, + "content": "[25] Manuel Kaufmann, Jie Song, Chen Guo, Kaiyue Shen, Tianjian Jiang, Chengcheng Tang, Juan José Zárate, and Otmar Hilliges. EMDB: The electromagnetic database of global 3d human pose and shape in the wild. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14632-14643, 2023. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.39, + 0.484, + 0.459 + ], + "angle": 0, + "content": "[26] Rawal Khirodkar, Timur Bagautdinov, Julieta Martinez, Su Zhaoen, Austin James, Peter Selednik, Stuart Anderson, and Shunsuke Saito. Sapiens: Foundation for human vision models. In European Conference on Computer Vision, pages 206-228. Springer, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.461, + 0.484, + 0.517 + ], + "angle": 0, + "content": "[27] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.518, + 0.484, + 0.587 + ], + "angle": 0, + "content": "[28] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. VIBE: Video inference for human body pose and shape estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5253-5263, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.589, + 0.484, + 0.658 + ], + "angle": 0, + "content": "[29] Muhammed Kocabas, Chun-Hao P Huang, Otmar Hilliges, and Michael J Black. PARE: Part attention regressor for 3D human body estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11127-11137, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.66, + 0.484, + 0.73 + ], + "angle": 0, + "content": "[30] Muhammed Kocabas, Chun-Hao P. Huang, Joachim Tesch, Lea Müller, Otmar Hilliges, and Michael J. Black. SPEC: Seeing people in the wild with an estimated camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11035-11045, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.732, + 0.484, + 0.8 + ], + "angle": 0, + "content": "[31] Muhammed Kocabas, Ye Yuan, Pavlo Molchanov, Yunrong Guo, Michael J Black, Otmar Hilliges, Jan Kautz, and Umar Iqbal. PACE: Human and camera motion estimation from inthe-wild videos. In International Conference on 3D Vision, pages 397-408, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.802, + 0.484, + 0.871 + ], + "angle": 0, + "content": "[32] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2252-2261, 2019. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.873, + 0.484, + 0.902 + ], + "angle": 0, + "content": "[33] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. CLIFF: Carrying location information" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.486, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.092, + 0.907, + 0.134 + ], + "angle": 0, + "content": "in full frames into human pose and shape estimation. In European Conference on Computer Vision, pages 590-606. Springer, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.136, + 0.907, + 0.19 + ], + "angle": 0, + "content": "[34] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12939-12948, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.193, + 0.906, + 0.262 + ], + "angle": 0, + "content": "[35] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft COCO: Common objects in context. In European Conference on Computer Vision, pages 740-755. Springer, 2014. 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.264, + 0.905, + 0.291 + ], + "angle": 0, + "content": "[36] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.293, + 0.906, + 0.348 + ], + "angle": 0, + "content": "[37] Thomas Lucas, Fabien Baradel, Philippe Weinzaepfel, and Grégory Rogez. Posegpt: Quantization-based 3d human motion generation and forecasting. In European Conference on Computer Vision, pages 417-435, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.35, + 0.906, + 0.405 + ], + "angle": 0, + "content": "[38] Zhengyi Luo, S. Alireza Golestaneh, and Kris M. Kitani. 3d human motion estimation via motion compression and refinement. In Proceedings of the Asian Conference on Computer Vision, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.407, + 0.906, + 0.476 + ], + "angle": 0, + "content": "[39] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved cnn supervision. In International Conference on 3D Vision, pages 506-516. IEEE, 2017. 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.478, + 0.906, + 0.547 + ], + "angle": 0, + "content": "[40] Gyeongsik Moon and Kyoung Mu Lee. I2L-MeshNet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single RGB image. In European Conference on Computer Vision, pages 752-768. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.549, + 0.906, + 0.603 + ], + "angle": 0, + "content": "[41] Raul Mur-Artal and Juan D Tardós. ORB-SLAM: An opensource slam system for monocular, stereo, and RGB-D cameras. IEEE Transactions on Robotics, 33(5):1255-1262, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.606, + 0.906, + 0.66 + ], + "angle": 0, + "content": "[42] Raul Mur-Artal, Jose Maria Martinez Montiel, and Juan D Tardos. ORB-SLAM: A versatile and accurate monocular SLAM system. IEEE Transactions on Robotics, 31(5):1147-1163, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.662, + 0.907, + 0.732 + ], + "angle": 0, + "content": "[43] Lea Müller, Vickie Ye, Georgios Pavlakos, Michael J. Black, and Angjoo Kanazawa. Generative proxemics: A prior for 3D social interaction from images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.734, + 0.907, + 0.858 + ], + "angle": 0, + "content": "[44] Maxime Oquab, Timothée Darcet, Theo Moutakanni, Huy V. Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, Russell Howes, Po-Yao Huang, Hu Xu, Vasu Sharma, Shang-Wen Li, Wojciech Galuba, Mike Rabbat, Mido Assran, Nicolas Ballas, Gabriel Synnaeve, Ishan Misra, Herve Jegou, Julien Mairal, Patrick Labatut, Armand Joulin, and Piotr Bojanowski. DINoV2: Learning robust visual features without supervision, 2023. 4, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.86, + 0.906, + 0.901 + ], + "angle": 0, + "content": "[45] Priyanka Patel and Michael J. Black. Camerahrm: Aligning people with perspective. International Conference on 3D Vision (3DV), 2025. 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.175 + ], + "angle": 0, + "content": "[46] Priyanka Patel, Chun-Hao P Huang, Joachim Tesch, David T Hoffmann, Shashank Tripathi, and Michael J Black. AGORA: Avatars in geography optimized for regression analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13468-13478, 2021. 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.178, + 0.483, + 0.261 + ], + "angle": 0, + "content": "[47] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3D hands, face, and body from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10975-10985, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.263, + 0.482, + 0.317 + ], + "angle": 0, + "content": "[48] Mathis Petrovich, Michael J Black, and Gül Varol. Action-conditioned 3D human motion synthesis with transformer VAE. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10985-10995, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.319, + 0.482, + 0.36 + ], + "angle": 0, + "content": "[49] Baldomero R. Árbol and Dan Casas. BodyShapeGPT: SMPL body shape manipulation with LLMs. In European Conference on Computer Vision Workshops, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.362, + 0.482, + 0.431 + ], + "angle": 0, + "content": "[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.433, + 0.482, + 0.502 + ], + "angle": 0, + "content": "[51] Rene Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(3):1623-1637, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.504, + 0.482, + 0.573 + ], + "angle": 0, + "content": "[52] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J Guibas. HUMOR: 3D human motion model for robust pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11488-11499, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.575, + 0.482, + 0.63 + ], + "angle": 0, + "content": "[53] Zehong Shen, Huajin Pi, Yan Xia, Zhi Cen, Sida Peng, Zechen Hu, Hujun Bao, Ruizhen Hu, and Xiaowei Zhou. World-grounded human motion recovery via gravity-view coordinates. In SIGGRAPH Asia, 2024. 3, 6, 7, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.632, + 0.482, + 0.686 + ], + "angle": 0, + "content": "[54] Soyong Shin, Juyong Kim, Eni Halilaj, and Michael J Black. WHAM: Reconstructing world-grounded humans with accurate 3D motion. arXiv preprint arXiv:2312.07531, 2023. 3, 5, 6, 7, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.688, + 0.482, + 0.743 + ], + "angle": 0, + "content": "[55] Stephan Streuber, M Alejandra Quiros-Ramirez, Matthew Q Hill, Carina A Hahn, Silvia Zuffi, Alice O'Toole, and Michael J Black. Body talk: Crowdshaping realistic 3D avatars with words. ACM TOG, 35(4):1-14, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.746, + 0.482, + 0.787 + ], + "angle": 0, + "content": "[56] Sanjay Subramanian, Evonne Ng, Lea Müller, Dan Klein, Shiry Ginosar, and Trevor Darrell. Pose priors from language models. arXiv preprint arXiv:2405.03689, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.789, + 0.482, + 0.871 + ], + "angle": 0, + "content": "[57] Qingping Sun, Yanjun Wang, Ailing Zeng, Wanqi Yin, Chen Wei, Wenjia Wang, Haiyi Mei, Chi-Sing Leung, Ziwei Liu, Lei Yang, and Zhongang Cai. AiOS: All-in-one-stage expressive human pose and shape estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, page 1834-1843, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.873, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[58] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.133 + ], + "angle": 0, + "content": "skeleton-disentangled representation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.135, + 0.905, + 0.19 + ], + "angle": 0, + "content": "[59] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J Black, and Tao Mei. Monocular, one-stage, regression of multiple 3D people. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11179-11188, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.192, + 0.905, + 0.258 + ], + "angle": 0, + "content": "[60] Yu Sun, Wu Liu, Qian Bao, Yili Fu, Tao Mei, and Michael J Black. Putting people in their place: Monocular regression of 3D people in depth. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13243-13252, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.261, + 0.907, + 0.329 + ], + "angle": 0, + "content": "[61] Yu Sun, Qian Bao, Wu Liu, Tao Mei, and Michael J Black. TRACE: 5D temporal regression of avatars with dynamic cameras in 3D environments. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8856-8866, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.33, + 0.905, + 0.383 + ], + "angle": 0, + "content": "[62] Zachary Teed and Jia Deng. DRPOID-SLAM: Deep visual slam for monocular, stereo, and RGB-D cameras. Advances in Neural Information Processing Systems, 34:16558-16569, 2021. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.386, + 0.905, + 0.427 + ], + "angle": 0, + "content": "[63] Zachary Teed, Lahav Lipson, and Jia Deng. Deep patch visual odometry. Advances in Neural Information Processing Systems, 36, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.428, + 0.905, + 0.481 + ], + "angle": 0, + "content": "[64] Guy Tevet, Sigal Raab, Brian Gordon, Yoni Shafir, Daniel Cohen-or, and Amit Haim Bermano. Human motion diffusion model. In International Conference on Learning Representations, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.484, + 0.905, + 0.551 + ], + "angle": 0, + "content": "[65] Timo Von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In European Conference on Computer Vision, pages 601-617, 2018. 2, 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.554, + 0.905, + 0.607 + ], + "angle": 0, + "content": "[66] Yufu Wang and Kostas Daniilidis. ReFit: Recurrent fitting network for 3D human recovery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14644-14654, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.609, + 0.905, + 0.663 + ], + "angle": 0, + "content": "[67] Yufu Wang, Ziyun Wang, Lingjie Liu, and Kostas Daniilidis. TRAM: Global trajectory and motion of 3d humans from inthe-wild videos. In European Conference on Computer Vision, 2024. 2, 3, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.665, + 0.905, + 0.733 + ], + "angle": 0, + "content": "[68] Jiahong Wu, He Zheng, Bo Zhao, Yixin Li, Baoming Yan, Rui Liang, Wenjia Wang, Shipei Zhou, Guosen Lin, Yanwei Fu, et al. AI challenger: A large-scale dataset for going deeper in image understanding. arXiv preprint arXiv:1711.06475, 2017. 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.735, + 0.905, + 0.789 + ], + "angle": 0, + "content": "[69] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. arXiv preprint arXiv:2309.16671, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.79, + 0.905, + 0.858 + ], + "angle": 0, + "content": "[70] Vickie Ye, Georgios Pavlakos, Jitendra Malik, and Angjoo Kanazawa. Decoupling human and camera motion from videos in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21222-21232, 2023. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.86, + 0.907, + 0.901 + ], + "angle": 0, + "content": "[71] Yifei Yin, Chen Guo, Manuel Kaufmann, Juan Jose Zarate, Jie Song, and Otmar Hilliges. Hi4D: 4D instance segmentation of close human interaction. In Proceedings of" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.092, + 0.482, + 0.12 + ], + "angle": 0, + "content": "the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17016-17027, 2023. 2, 3, 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.122, + 0.483, + 0.191 + ], + "angle": 0, + "content": "[72] Ye Yuan, Umar Iqbal, Pavlo Molchanov, Kris Kitani, and Jan Kautz. GLAMR: Global occlusion-aware human mesh recovery with dynamic cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11038-11049, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.192, + 0.483, + 0.274 + ], + "angle": 0, + "content": "[73] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. PyMAF: 3D human pose and shape regression with pyramidal mesh alignment feedback loop. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11446-11456, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.277, + 0.482, + 0.346 + ], + "angle": 0, + "content": "[74] Yizhou Zhao, Tuanfeng Yang Wang, Bhiksha Raj, Min Xu, Jimei Yang, and Chun-Hao Paul Huang. Synergistic global-space camera and human reconstruction from videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1216-1226, 2024. 3" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.483, + 0.346 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.244, + 0.086, + 0.756, + 0.14 + ], + "angle": 0, + "content": "PromptHMR: Promptable Human Mesh Recovery Supplementary Material" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.156, + 0.273, + 0.172 + ], + "angle": 0, + "content": "7. Additional Results" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.182, + 0.483, + 0.243 + ], + "angle": 0, + "content": "In this section, we demonstrate more qualitative results to show the effects of interaction prompting and the video module. Please refer to the supplementary video to see the results from PromptHMR-Vid." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.253, + 0.303, + 0.27 + ], + "angle": 0, + "content": "7.1. Interaction Prompting" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.277, + 0.485, + 0.518 + ], + "angle": 0, + "content": "We perform qualitative and quantitative ablation studies of interaction prompting on the HI4D dataset. In Tab. 5 of the main paper, we demonstrate that introducing interaction prompting improves the quantitative results on HI4D. In Fig. 9, we present more qualitative results to show the effect of the interaction module. As shown in the first column of Fig. 9, without the interaction module, the model does not learn to reconstruct close interaction effectively, even when trained with CHI3D interaction data. By adding the proposed interaction module, in the second column, the relative position and orientation of the interacting people are improved, and the penetration is reduced. Note that if we turn off the interaction module via the proposed flow control, the results will become similar to the first column. Finally, training with both CHI3D and HI4D leads to better results." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.534, + 0.279, + 0.551 + ], + "angle": 0, + "content": "8. Experiment Details" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.56, + 0.194, + 0.574 + ], + "angle": 0, + "content": "8.1. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.583, + 0.483, + 0.718 + ], + "angle": 0, + "content": "The training set of the image model includes BEDLAM [4], AIC [68], InstaVariety [24], HI4D [71], CHI3D [17], AGORA [46], 3DPW [65], COCO [35], and MPII [39], with the sampling rate of \\(\\{0.2, 0.2, 0.3, 0.08, 0.08, 0.06, 0.06, 0.01, 0.01\\}\\). All input images are padded and resized to \\(896 \\times 896\\). During training, we employ rotation and color jitter augmentation. For PromptHMR-Vid, we use BEDLAM and 3DPW datasets following [53, 54]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.483, + 0.825 + ], + "angle": 0, + "content": "To use datasets with different annotations for training, we adopt different losses described in Sec.3.5 of the main paper. For the ones (e.g. BEDLAM, AGORA, CHI3D, HI4D) with ground truth SMPL/SMPL-X annotations, we employ all loss items. While on AIC, InstaVariety, and 3DPW, we drop the translation loss. On COCO and MPII, we only compute 2D keypoint reprojection loss." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.826, + 0.484, + 0.902 + ], + "angle": 0, + "content": "We generate the whole-body bounding boxes by projecting the ground-truth SMPL-X meshes onto the image plane. To generate the face bounding boxes, we project the head vertices. To generate truncated boxes, we take groups of keypoints (e.g. upper body keypoints) and compute their" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.157, + 0.905, + 0.186 + ], + "angle": 0, + "content": "bounding boxes. Gaussian noise is then added to both corners." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.188, + 0.907, + 0.309 + ], + "angle": 0, + "content": "On BEDLAM, AGORA, and AIC, we follow SHAPY [8] to compute the shape attribute scores. During training, we compose a shape description for each instance, such as \"a tall and broad-shoulder female\" with a few augmentation rules. Each sentence will randomly sample 1-3 top attributes. The gender information is augmented with synonyms, such as \"female\", \"woman\", \"girl\", etc." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.32, + 0.649, + 0.334 + ], + "angle": 0, + "content": "8.2. Architecture" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.343, + 0.907, + 0.478 + ], + "angle": 0, + "content": "We adopt the ViT-L [11], pretrained by DINOv2 [44], as our image encoder. We use an input image size of 896 and a patch size of 14, leading to the same spatial resolution as the recent Sapiens models [26]. The text encoder is from MetaCLIP [69]. The SMPL-X decoder consists of 3 attention blocks with an embedding dimension of 1024. From the output tokens \\((T_{smpl}^{\\prime}\\) and \\(T_{depth}^{\\prime}\\)), we use separate 2-layer MLPs to regress \\(\\theta\\), \\(\\beta\\), \\(p_{xy}\\) and \\(p_z\\) as introduced in Sec.3.2." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.489, + 0.618, + 0.505 + ], + "angle": 0, + "content": "8.3. Training" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.513, + 0.906, + 0.603 + ], + "angle": 0, + "content": "We train the PromptHMR image model using 8 H100 GPUs, with a batch size of 96 (12 images on each GPU). We use AdamW with a learning rate of 1e-5 for the image encoder, a learning rate of 3e-5 for the prompt encoder, and the SMPL-X decoder, \\(\\beta_{1}\\) of 0.9, \\(\\beta_{2}\\) of 0.999, and a weight decay of 5e-5." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.604, + 0.906, + 0.65 + ], + "angle": 0, + "content": "The losses presented in Sec.3.5 are weighted differently. For \\(\\mathcal{L}_{2D},\\mathcal{L}_{3D},\\mathcal{L}_{\\mathrm{SMPL}},\\mathcal{L}_V\\) and \\(\\mathcal{L}_{trans}\\), the weights are set to \\(\\{50.0,5.0,1.0,1.0,10.0\\}\\) respectively." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.67, + 0.907, + 0.761 + ], + "angle": 0, + "content": "PromptHMR-Vid We train the PromptHMR video model on 2 H100 GPUs with a batch size of 512 samples consisting of 120 frames each. We use AdamW with a learning rate of 2e-4 and a weight decay of 5e-5. We use the same losses as the image-based version in addition to binary cross-entropy loss for joint contact predictions." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.771, + 0.603, + 0.786 + ], + "angle": 0, + "content": "8.4. Metric" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.795, + 0.905, + 0.825 + ], + "angle": 0, + "content": "In this section, we provide more details on the evaluation metric used in Sec.4 of the main paper." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.826, + 0.906, + 0.901 + ], + "angle": 0, + "content": "Mean Per Joint Position Error (MPJPE) is calculated by aligning the 3D joints obtained from SMPL-X with the ground truth at the pelvis before computing the mean square error. For historical reasons, different datasets use a different set of joints. Additionally, the pelvis definition could" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.098, + 0.082, + 0.879, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.711, + 0.908, + 0.754 + ], + "angle": 0, + "content": "Figure 9. Ablation of interaction module. When fine-tuning the image model on CHI3D, adding the interaction module improves two-person interaction reconstruction on HI4D, which demonstrates the out-of-domain generalization ability of interaction prompting. Fine-tuning on both CHI3D and HI4D further improves results." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.772, + 0.487, + 0.894 + ], + "angle": 0, + "content": "be different. To evaluate methods that predict SMPL-X on the datasets with SMPL labels, it's customary to convert the SMPL-X vertices to SMPL vertices and use a joint regressor on the converted vertices to obtain the 3D joints comparable to the labels. Note that all the above choices could alter the results and sometimes produce large \"artificial\" improvements. So we strictly follow the most recent methods in the evaluation procedure. It's reported in the unit of mm." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.772, + 0.909, + 0.832 + ], + "angle": 0, + "content": "Per Vertex error (PVE) computes mean square error on the vertices after pelvis alignment. Compared to MPJPE, it measures the combined pose and shape error. It's reported in the unit of mm." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Procrustes-aligned MPJPE (PA-MPJPE) performs general Procrustes alignment on the 3D joints before computing MPJPE. It measures purely the local articulated pose error. It's reported in the unit of mm." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.181 + ], + "angle": 0, + "content": "Paired PA-MPJPE (Pair-PA-MPJPE) aligns two people as a whole with the ground truth before computing MPJPE. In addition to per-person error, it also measures the error in the relative position and orientation of the two people. It's used in HI4D and CHI3D to evaluate interaction reconstruction. It's reported in the unit of mm." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.182, + 0.482, + 0.242 + ], + "angle": 0, + "content": "World-aligned \\(\\mathbf{MPJPE}_{100}\\) (WA-MPJPE\\(_{100}\\)) measures the world-grounded motion accuracy. It aligns a segment of 100 frames of predictions with the ground truth before computing MPJPE. It's reported in the unit of mm." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.243, + 0.482, + 0.317 + ], + "angle": 0, + "content": "World \\(\\mathrm{MPJPE}_{100}\\) (\\(\\mathbf{W} - \\mathbf{MPJPE}_{100}\\)) is similar to WA-MPJPE but only aligns the first two frames of the 100-frame segment. Therefore, it provides a better measurement of the drifting in the direction and scale of the trajectories. It's reported in the unit of mm." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.318, + 0.482, + 0.378 + ], + "angle": 0, + "content": "Root Trajectory Error (RTE) measures the accuracy of the whole trajectory including the scale. It performs rigid alignment on the trajectory of the root and computes the mean square error. It's reported in the unit of \\(\\%\\)" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.379, + 0.482, + 0.424 + ], + "angle": 0, + "content": "Motion Jitter (Jitter) uses finite difference to compute the jerk \\((3^{rd}\\) derivative) on the 3D joints. It measures rapid abrupt changes. It's reported in the unit of \\(10m / s^3\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.424, + 0.482, + 0.498 + ], + "angle": 0, + "content": "Foot Skating measures erroneous foot sliding. It thresholds the velocity of the ground truth foot vertices to compute contact frames, and calculates the displacement on the predicted foot vertices during contact. It's reported in the unit of \\( \\text{mm} \\)." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_origin.pdf b/data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..724d89f6d7c79e75c9b6e25de16ab3d0f6b3c72b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97c349504e3934e27bc9e91bb0e53e30f2a2157f21da7f546628fe7936d36a69 +size 7573447 diff --git a/data/2025/2504_06xxx/2504.06397/full.md b/data/2025/2504_06xxx/2504.06397/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7041ce594de57d4ba30f88f670f398ac01503d44 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/full.md @@ -0,0 +1,455 @@ +# PromptHMR: Promptable Human Mesh Recovery + +Yufu Wang $^{1,4}$ Yu Sun $^{1}$ Priyanka Patel $^{1}$ Kostas Daniilidis $^{4,5}$ + +Michael J. Black1,2 Muhammed Kocabas1,2,3 + +$^{1}$ Meshcapade $^{2}$ MPI for Intelligent Systems $^{3}$ ETH Zürich $^{4}$ University of Pennsylvania $^{5}$ Archimedes + +https://yufu-wang.github.io/phmr-page + +![](images/9270978d52c1cf9dadd732232e763d94d0b8368cc6d89f133ad0335f742112de.jpg) +image $\downarrow$ box prompts + +![](images/201617bcc60b7ea35f45b6e032b28f2e5f14eeb391b77e8238450717c31c8156.jpg) +image ↓ box prompts + +![](images/f6ba05c518e44eb65aca5d52635611bd11aa77d87bb42893fe1dfbd26de0b8ec.jpg) +image ↓ masks +PromptHMR + +![](images/02daf49395bd2b890a48cabb843ab75004decfd12c4023d5ee0b4d1251cfd2f6.jpg) +image box+text +PromptHMR + +![](images/4dd0a14461f0856bedb6eedd29c14f07ab7a15e151b7f5a8f9b9fc20f3634e8e.jpg) +PromptHMR + +![](images/756ef6ee3dce2b408d2c590ea77778c0e6c4634ab245800eabe66fc8221dff3c.jpg) +PromptHMR +Figure 1. PromptHMR is a promptable human pose and shape (HPS) estimation method that processes images with spatial or semantic prompts. It takes "side information" readily available from vision-language models or user input to improve the accuracy and robustness of 3D HPS. PromptHMR recovers human pose and shape from spatial prompts such as (a) face bounding boxes, (b) partial or complete person detection boxes, or (c) segmentation masks. It refines its predictions using semantic prompts such as (c) person-person interaction labels for close contact scenarios, or (d) natural language descriptions of body shape to improve body shape predictions. Both image and video versions of PromptHMR achieve state-of-the-art accuracy. + +![](images/7c2f9735852252556d12572991c936f8502ca7b7ceaa2c90fbd13948e95061f2.jpg) + +![](images/9e599b70e43046dd11573a4225224c1af4fd86746468273033a022f96c5b671e.jpg) + +# Abstract + +Human pose and shape (HPS) estimation presents challenges in diverse scenarios such as crowded scenes, person-person interactions, and single-view reconstruction. Existing approaches lack mechanisms to incorporate auxiliary "side information" that could enhance reconstruction accuracy in such challenging scenarios. Furthermore, the most accurate methods rely on cropped person detections and cannot exploit scene context while methods that process the whole image often fail to detect people and are less accurate than methods that use crops. While recent language-based methods explore HPS reasoning through large language or vision-language models, their metric accuracy is well below + +the state of the art. In contrast, we present PromptHMR, a transformer-based promptable method that reformulates HPS estimation through spatial and semantic prompts. Our method processes full images to maintain scene context and accepts multiple input modalities: spatial prompts like bounding boxes and masks, and semantic prompts like language descriptions or interaction labels. PromptHMR demonstrates robust performance across challenging scenarios: estimating people from bounding boxes as small as faces in crowded scenes, improving body shape estimation through language descriptions, modeling person-person interactions, and producing temporally coherent motions in videos. Experiments on benchmarks show that PromptHMR achieves state-of-the-art performance while offering flexible prompt-based control over the HPS estimation process. + +# 1. Introduction + +The estimation of 3D human pose and shape (HPS) is classically viewed as regressing the parameters of shape and pose from pixels. In particular, most methods take a tightly cropped image of a person and output the pose and shape in camera coordinates. While the accuracy of such methods has increased rapidly, they do not address the whole problem. In particular, an HPS method should be able to take an image or video containing complex human-human and human-scene interactions, return the parameters of every person in the scene, and place these people in a consistent global coordinate frame. + +Our key observation is that the classical "pixels to parameters" formulation of the problem is too narrow. Today, we have large vision-language foundation models (VLMs) that understand a great deal about images and what people are doing in them. What these models lack, however, is an understanding of 3D human pose and shape. Recent work [10, 16] has tried to bring together VLMs and 3D HPS but with 3D accuracy well below the best classical methods. + +Consequently, we need to think about the problem in a different way and ask whether we can exploit readily available side information (e.g. provided by a VLM) to improve 3D HPS regression robustness, usefulness, and accuracy. To that end, we develop a novel "promptable" HPS architecture called PromptHMR. Consider the sample images shown in Fig. 1. In crowded scenes, existing person detection methods struggle, while face detection methods remain reliable. When people closely interact, their body parts overlap and occlude each other, introducing ambiguity in pose estimation. Moreover, 3D body shape estimation from monocular views is challenging due to perspective ambiguity. In all these cases, we can extract cues, or prompts, that provide "side information" that can help an HPS method better analyze the scene. PromptHMR formalizes this intuition by combining image evidence with different types of spatial and semantic information that can come from either humans or AI systems such as VLMs. + +Specifically, our approach combines three key components: (1) a vision transformer that extracts features from high-resolution full images to preserve scene context, (2) a multi-modal prompt encoder that processes spatial and semantic inputs, and (3) a transformer decoder that attends to both prompt and image tokens to generate SMPL-X [47] body parameters. This design addresses the limitations of cropped-image HPS methods by processing full images using side information in the form of prompts. It addresses the challenges that full-image HPS methods have in detecting all people in a scene by accepting readily available bounding boxes. Last, our method incorporates auxiliary semantic information through text descriptions or interaction labels. + +By combining spatial and semantic prompting, our method offers a powerful and versatile approach to 3D HPS + +estimation from the whole image. At test time, we show that this promptable structure (1) can take various bounding boxes or segmentation masks to recover full body HPS in a robust way, (2) improve its body shape predictions by using textual descriptions as input, (3) is capable of modeling person-person close interaction directly in the regression process, and (4) uses full image context to reconstruct people coherently in the camera space and the world space. Our model can handle video by incorporating temporal transformer layers at the SMPL-X decoding phase, yielding temporally stable and smooth motions. Last, following TRAM [67], we combine the temporal version of our model with metric SLAM to estimate human motion in world coordinates. + +We make several key design choices that make PromptHMR successful. To achieve robustness to different spatial inputs, we train our model by simulating noisy full-body and face-region bounding boxes. For improved body shape estimation, we leverage SHAPY [8] to generate automatic body shape descriptions for training samples and process them with a pretrained text encoder [50]. To enhance person-person interaction reconstruction, we use segmentation masks as more precise spatial prompts and develop person-person attention layers that operate between prompted people, producing coherent reconstructions of close interactions. Through random masking of different input types during training, our model learns to work with any combination of prompts at test time. + +Quantitative experiments on the EMDB [25], 3DPW [65], RICH [21], Hi4D [71], CHI3D [17] and HBW [8] benchmark datasets demonstrate that our method outperforms state-of-the-art (SOTA) approaches and strong baselines. We also provide many qualitative examples of in-the-wild images and videos that illustrate the robustness and generalization of PromptHMR. + +By moving away from the pure pixels-to-parameters approach, PromptHMR not only achieves a new SOTA, it shows a new way of improving both accuracy and robustness by leveraging side information that is easily available. One can think of this as a collaboration between VLMs, which know a lot about people in images but not in 3D, and a metric regressor that knows a lot about 3D humans but not about the semantics of what they do. We show that this combination has significant upside potential to increase both generality and accuracy. Our code and model are available for research purposes. + +# 2. Related Work + +Human pose and shape estimation from images. Existing methods for human pose and shape (HPS) estimation can be broadly categorized into two main approaches. The first [6, 18, 23, 29, 30, 32-34, 40, 66, 73] uses a tightly cropped image of an individual as input, and estimates + +pose and shape in camera coordinates. While effective for isolated individuals, this approach discards scene context that is essential to resolve human pose in cases of occlusion, severe overlap and close interaction in multi-person scenes [17, 71]. + +The second category [2, 22, 57, 59–61] build upon object detection frameworks [5, 13] to jointly detect humans and estimate their pose and shape parameters. Having access to the entire image, they can better perceive occluded individuals and infer depth relationships, but they often suffer from detection failures due to the difficulty in simultaneously performing detection and reconstruction. Our "promptable" architecture leverages detection box prompts to resolve such conflicts while having access to the entire scene context. + +Human pose and shape estimation from video. Methods for human motion estimation from video can also be divided into two main categories. The first [7, 24, 28, 38, 58] focuses on estimating smooth human motion in camera space. These methods build upon single-person HPS estimation approaches [23, 32] by adding temporal layers during the SMPL decoding phase to introduce temporal coherence. + +More recent methods estimate human motion in world coordinates from videos captured with dynamic cameras. These methods follow a two-stage approach, first estimating camera motion using SLAM techniques [19, 20, 41, 42, 62, 63], and then leveraging human motion priors to optimize the human world motion [31, 70, 72]. Others [53, 54] learn temporal models to directly regress human world motion from image and camera features. Still others [67, 74] use monocular metric depth estimation to solve for the scale of camera motion and transform human motion from camera space to world coordinates. + +In our approach, we extend PromptHMR to video by taking the SMPL-X output tokens and utilizing a temporal transformer module to estimate temporally stable and smooth human motion and translation in camera space. We follow TRAM [67] to transform human motion to world coordinates due to its simplicity and effectiveness. + +Semantic reasoning about 3D humans in images. Recent methods explore combining different types of semantic information, such as language descriptions and knowledge of person-person interactions, to improve reasoning about 3D humans from images and videos. For example, ChatPose [16] follows the common approach of visual language models (VLMs) [36] by fine-tuning a large language model (LLM) with a combination of images and tokens to estimate SMPL parameters. In a similar direction, PoseEmbroider [10] is a multi-modal framework that aligns image, 3D pose, and text representations in a shared latent space. While ChatPose focuses on combining high-level scene reasoning with 3D HPS, PoseEmbroider exploits detailed language descriptions of human pose. While promising, neither method achieves SOTA accuracy on the HPS task. Note + +that many other methods relate language to human pose or motion, without considering images [1, 9, 37, 48, 64], but these are outside our scope. + +Additionally, several methods [8, 49, 55] focus on modeling the relationship between SMPL body shape and natural language descriptions. These methods show that language descriptions and images can provide complementary information to solve this task. Other approaches, such as BUDDI [43] and ProsePose [56], address the challenge of estimating person-person interactions. BUDDI is an optimization-based approach that leverages diffusion model as a prior over interacting people, while ProsePose queries a VLM to estimate contact points on the human body surface and uses these contact points to guide an optimization process for improving human interaction. + +Overall, methods like ChatPose [16] and PoseEmbroider [10] are promising steps toward jointly learning the relationship between vision, language, and 3D humans, but their understanding of 3D humans remains limited, as indicated by their relatively low 3D pose accuracy. Meanwhile, SHAPY [8], BodyShapeGPT [49], and BodyTalk [55] focus solely on exploring the relationship between SMPL body shape and natural language. BUDDI and ProsePose are post-processing approaches for interaction that do not directly reason using image information. + +Our approach addresses the limitations of these methods by training a single model capable of flexible prompting that achieves state-of-the-art (SOTA) performance, not only on standard HPS benchmarks but also on benchmarks tailored to body shape and person-person interaction. + +# 3. Method + +Given an image $I$ containing $N$ people and a set of prompts, our main goal is to recover the pose, shape, and locations of the people in the camera space to form a coherent human-centric 3D scene. Figure 2 shows an overview. + +# 3.1. Promptable mesh regression + +We adopt SMPL-X [47] to represent each person $i$ in the 3D space, including the orientation $\phi_i \in \mathbb{R}^3$ , local body pose $\theta_i \in \mathbb{R}^{22 \times 3}$ , shape $\beta_i \in \mathbb{R}^{10}$ , and translation $\tau_i \in \mathbb{R}^3$ in the camera space. We do not include face and hand parameters in this work. Each human $H_i$ is mapped to a 3D mesh with the differentiable SMPL-X layer. + +$$ +H _ {i} = \left\{\phi_ {i}, \theta_ {i}, \beta_ {i}, \tau_ {i} \right\}. \tag {1} +$$ + +Each person can be prompted with spatial and semantic prompts. Spatial prompts include a bounding box $b_{i} \in \mathbb{R}^{2 \times 2}$ (the two corners) and a segmentation mask $m_{i} \in \mathbb{R}^{h \times w}$ . Semantic prompts consist of text and two-person interaction labels. The text prompt is the CLIP embedding $t_{i}$ of a sentence describing the body shape. The interaction prompt is a binary variable $k_{i}$ indicating whether two + +![](images/5052c9e8ab63f19eb48f844b7af74060fa66875a0636e9684ea47923970d46f8.jpg) +Figure 2. Method overview. PromptHMR estimates SMPL-X parameters for each person in an image based on various types of prompts, such as boxes, language descriptions, and person-person interaction cues. Given an image and prompts, we utilize a vision transformer to generate image embeddings and mask and prompt encoders to map different types of prompts to tokens. Optionally, camera intrinsics can be embedded along with the image embeddings. The image embeddings and prompt tokens are then fed to the SMPL-X decoder. The SMPL-X decoder is a transformer-based module that attends to both the image and prompt tokens to estimate SMPL-X parameters. Note that the language and interaction prompts are optional, but providing them enhances the accuracy of the estimated SMPL-X parameters. + +people are in close contact. While semantic prompts are optional, each human needs at least one spatial prompt to be reconstructed. Overall, the input prompts are represented as $P_{i}$ : + +$$ +P _ {i} \subseteq \left\{b _ {i}, m _ {i}, t _ {i}, k _ {i} \right\} \tag {2} +$$ + +$$ +b _ {i} \in P _ {i} \text {o r} m _ {i} \in P _ {i} +$$ + +Promptable human mesh recovery (PromptHMR) is defined as a learnable function that maps an image and a set of prompts to a set of 3D humans + +$$ +f: \left(I, \left\{P _ {i} \right\} _ {i = 1} ^ {N}\right)\rightarrow \left\{H _ {i} \right\} _ {i = 1} ^ {N}. \tag {3} +$$ + +This task definition integrates all available contexts to locate and reconstruct prompted humans in the image. + +# 3.2. Model + +Image encoder. The image is first encoded as tokens by a vision transformer (ViT) encoder from DINOv2 [12, 44]: + +$$ +F = \operatorname {E n c o d e r} (I), \tag {4} +$$ + +To ensure sufficient resolution for modeling humans at both near and far distances, we use $896 \times 896$ images. The encoder is run once per frame regardless of the number of people prompted. When camera intrinsics are provided, we add positional encoding of the camera rays to the image tokens to make them camera-aware [2, 15]. + +Mask encoder. When available, masks are first processed by an encoder consisting of stripped convolutional layers that downsample the masks. The output mask features are added to the image tokens. If no mask is provided, a learned "no mask" token is added instead. + +$$ +F _ {i} = \operatorname {E n c o d e r} _ {\mathrm {m}} \left(m _ {i}\right) + F. \tag {5} +$$ + +Prompt encoder. The prompt encoder consists of a set of transformations that map different types of prompts to token vectors of the same dimension. When a prompt is not available, it is replaced with a learned null token. + +For bounding boxes, we encode $b_{i}$ using positional encoding summed with learned embeddings to form the box prompt tokens $T_{bi} = \mathrm{PE}(b_{i})$ , with $T_{bi} \in \mathbb{R}^{2 \times d}$ . We design different box transformations during training to allow the model to use different boxes as a human identifier. In the training phase, each instance is prompted with either a whole-body bounding box, a face bounding box, or a truncated box covering part of the body. Gaussian noise is added to both corners. At inference time, the model accepts boxes without needing to know the box types. + +Language is a natural way to supply semantic information, and in this paper, we use language to supplement spatial prompts with information on body shape. A sentence such as "a muscular and tall male" is encoded with the CLIP text encoder $T_{ti} = \mathrm{CLIP}(t_i)$ , with $T_{ti} \in \mathbb{R}^{d}$ . To generate paired (image, text) data, we run SHAPY's [8] shape-to-attribute method on the ground truth shape parameters to obtain shape attribute scores and randomly pick a subset of top attributes to form a sentence. + +The interaction prompt $k_{i}$ passes through the prompt encoder without modification and directly switches on-off the cross-person attention that is described in Sec. 3.3. + +SMPL-X decoder. The SMPL-X decoder appends two query tokens $T_{\mathrm{spl}}, T_{\mathrm{depth}}$ with the prompt tokens $T_{bi}, T_{ti}$ to form the person-specific prompt $T_i \in \mathbb{R}^{5 \times d}$ . + +Finally, we use a standard transformer decoder and two MLP heads to produce the final output + +$$ +T _ {s m p l} ^ {\prime}, T _ {d e p t h} ^ {\prime} = \mathrm {D e c o d e r} (F _ {i}, T _ {i}) +$$ + +$$ +\phi_ {i}, \theta_ {i}, \beta_ {i} = \operatorname {H e a d} _ {s m p l} \left(T _ {s m p l} ^ {\prime}\right) \tag {6} +$$ + +$$ +\tau_ {i} = \operatorname {H e a d} _ {d e p t h} (T _ {d e p t h} ^ {\prime}). +$$ + +The transformer consists of three attention blocks. Each block applies self-attention on the tokens, cross-person attention (described in Sec. 3.3), and then two-way cross-attention between the tokens and the image embeddings [27]. The self-attention and cross-attention with the + +image are applied to each prompted person independently. We use separate tokens $T_{\mathrm{spl}}$ and $T_{\mathrm{depth}}$ to make the location representation invariant to the 3D human pose and shape representation. + +Regressing the location of the human in the camera space is much more challenging than most prior work that models humans in a cropped image space. Therefore, we do not regress $\tau$ directly. We regress focal length normalized 2D translation $p_{xy} \in \mathbb{R}^2$ and inverse depth $p_z \in \mathbb{R}$ , and then transform them to $\tau$ as follows + +$$ +t _ {x y} = \frac {p _ {x y}}{p _ {z}} \quad t _ {z} = \frac {1}{p _ {z}} \times \frac {f}{f _ {c}} \quad \tau = [ t _ {x y}, t z ], \tag {7} +$$ + +where $f$ is the ground truth or estimated focal length of the image, and $f_{c}$ is the canonical focal length. Predicting the normalized inverse depth follows the recent monocular depth literature [51] and is also intuitive since the inverse depth is linearly related to the size of the human in the image. Predicting $p_{xy}$ is equivalent to predicting the 2D location of the human in a normalized image plane. + +# 3.3. Two-person interaction + +We introduce promptable layers in the decoder to model two-person interaction. We describe the case where there are two people in the image, but the implementation can extend to model an interacting pair in a larger group. + +The promptability is modeled as a flow control with a residual connection (Fig. 3). Specifically, if two humans are interacting (as indicated by $k_{i}$ ), their query tokens pass through an additional self-attention layer; otherwise, non-interacting humans skip this. + +Applying attention to every person often creates unnecessary dependency in crowded scenes, and there is limited training data for large-group scenarios. However, there is high-quality data featuring two-person social interactions. By making the interaction layers promptable, we mitigate data diversity issues and increase flexibility, regardless of the number of people in the scene. + +Our proposed interaction layer uses a standard self-attention mechanism. First, we add positional encodings to the query tokens to distinguish the two individuals. The encoded tokens then go through a self-attention layer, whose output is combined with the original tokens via a residual connection. Our experiments demonstrate that including these interaction layers significantly improves inter-person pose accuracy in two-person interaction benchmarks. + +# 3.4. PromptHMR video version + +In addition to the single-image variant of PromptHMR, we train an extended version that processes videos to estimate human motion in world coordinates. To achieve this, we introduce a simple and efficient temporal transformer module. Given a monocular video sequence $\{I^t\}_{t=0}^T$ , we first run + +![](images/8c6c59d99ccd878b1a6ed924cf39f0de820cb6afdd9d730c0a6e96d7833acb42.jpg) +Figure 3. SMPL-X decoder. The top row shows one attention block in the decoder. The cross-person interaction module can be turned on/off. The bottom row shows the cross-person attention. + +PromptHMR to obtain per-subject SMPL-X decoder output tokens $T_{\mathrm{mpl}}^{\prime}$ and $T_{\mathrm{depth}}^{\prime}$ , assuming that the subject identities are provided with the prompts. These tokens, along with the positional encoding of time $t$ , are fed to a decoder-only temporal transformer module with twelve attention blocks. The output tokens are converted to SMPL-X parameters $\phi_t, \theta_t, \beta_t$ , translation $\tau_t$ , and joint contact probabilities $c_t$ . The contact probabilities indicate whether a given joint is in contact with the ground plane similar to [52-54]. + +To obtain results in world coordinates, we adopt the approach from TRAM [67]. Specifically, we use DROID-SLAM [62] and a monocular metric depth estimation model, ZoeDepth [3], to estimate camera motion in metric world coordinates. The translation parameters $\tau_{t}$ are then transformed to world coordinates using the estimated camera motion. To refine the human trajectory and mitigate foot-skating artifacts, we leverage the estimated contact probabilities and run a fast postprocessing that optimizes the contact joints to have zero velocity. + +# 3.5. Losses + +PromptHMR is trained with a combination of 2D and 3D losses, following traditional HMR methods [23, 32]: + +$$ +\mathcal {L} = \lambda_ {1} \mathcal {L} _ {2 D} + \lambda_ {2} \mathcal {L} _ {3 D} + \lambda_ {3} \mathcal {L} _ {\mathrm {S M P L}} + \lambda_ {4} \mathcal {L} _ {V} + \lambda_ {5} \mathcal {L} _ {t} +$$ + +with each term calculated as + +$$ +\mathcal {L} _ {2 D} = \left\| \hat {\mathcal {J}} _ {2 D} - \Pi \left(\mathcal {J} _ {3 D}\right) \right\| _ {F} ^ {2} +$$ + +$$ +\mathcal {L} _ {3 D} = \left\| \hat {\mathcal {J}} _ {3 D} - \mathcal {J} _ {3 D} \right\| _ {F} ^ {2} +$$ + +$$ +\mathcal {L} _ {\mathrm {S M P L}} = | | \hat {\Theta} - \Theta | | _ {2} ^ {2} +$$ + +$$ +\mathcal {L} _ {V} = \left| \left| \hat {V} - V \right| \right| _ {F} ^ {2} +$$ + +$$ +\mathcal {L} _ {t} = \left| \left| \hat {p} _ {x y} - p _ {x y} \right| \right| _ {F} ^ {2} + \left| \left| \hat {p} _ {z} - p _ {z} \right| \right| _ {F} ^ {2} +$$ + +where $\mathcal{J}_{3D}$ and $V$ are the 3D joints and vertices of the SMPL-X model, with the hat operator denoting the ground truth. $\Pi$ is the camera reprojection operator. Additionally, on datasets with ground truth translation labels, we supervise the normalized translation $p_{xy}$ and inverse depth $p_z$ . + +# 4. Experiments + +Datasets. We train PromptHMR with standard datasets: BEDLAM [4], AGORA [46], 3DPW [65], COCO [35], and MPII [39]. Following 4DHumans, we add AIC [68] and InstaVariety [24] as in-the-wild data, with pseudoground truth from CamSMPLify [45]. Additionally, we add CHI3D [17] and HI4D [71] to enable learning two-person interaction following the train-test splits from BUDDI [43]. Including CHI3D and HI4D does not improve performance on other benchmarks. + +Implementation. We train PromptHMR with AdamW with a batch size of 96 images of resolution $896 \times 896$ . We use a learning rate of $1e^{-5}$ for the image encoder and $3e^{-5}$ for the prompt encoder and the SMPL-X decoder, with a weight decay of $5e^{-5}$ . The training converges within 350K steps. + +Evaluation. We evaluate camera space reconstruction accuracy on 3DPW [65], EMDB [25] and RICH [21], using MPJPE, Procrustes-aligned MPJPE (PA-MPJPE) and Per Vertex Error (PVE) [23]. We evaluate inter-person accuracy on HI4D and CHI3D by Pair-PA-MPJPE, which aligns the two people as a whole with the ground truth [43]. + +To evaluate world-grounded motion on EMDB with PromptHMR video (PromptHMR-vid), we compute World-aligned MPJPE $(\mathrm{WA - MPJPE}_{100})$ , World MPJPE $(\mathrm{W - MPJPE}_{100})$ and Root Translation Error (RTE in %) [54, 70]. + +# 4.1. Reconstruction accuracy + +For camera space reconstruction, as shown in Table 1, PromptHMR and PromptHMR-Vid demonstrate state-of-the-art performance, matching crop-based methods while achieving better results than other full-image methods. PromptHMR and CameraHMR use the same training data and have similar performance, which validates that this prompt-based approach can achieve metrically accurate results. For representative results, see Fig. 7, where PromptHMR recovers coherent 3D scenes of people. + +For interaction reconstruction, PromptHMR achieves good accuracy as indicated in Table 2. Compared to BUDDI which is also trained on CHI3D and HI4D, our method achieves better overall accuracy on per-person and interperson metrics. We show qualitative results in Fig. 8. As a monocular regression method, PromptHMR still cannot avoid interpenetration between closely interacting people. + +PromptHMR-Vid achieves SOTA performance among methods that estimate human motion in world coordinates, as shown in Table 4. Unlike TRAM, we estimate the joint contact probabilities similar to [53, 54]. Therefore, we achieve lower foot skating than TRAM, even though we use the same metric SLAM method to transform motion in camera space to world coordinates. Please refer to our supplementary material (SupMat) for qualitative results of PromptHMR-Vid. + +![](images/bed63959a43ef9b6517723265c8d98562acb316fc42046804d36f64950b13e1e.jpg) +Figure 4. Effect of box prompts. Our method remains stable with different boxes, including noisy truncated boxes. + +![](images/d460b7604ea14b3e3eeca7c6c5ccbdf17a95d7dc5d115e02e0e0b1efc67f4b85.jpg) +Figure 5. Effect of mask prompts. Results are from the same model with different prompt inputs. Masks are better for close interaction scenarios where boxes are ambiguous. + +![](images/2964c98ed8bbefebb6c7c00d61ec76077da35ec78188b8515b97045c789f956e.jpg) +Figure 6. Effect of shape prompts. Compared to the baseline that does not incorporate shape description during training and testing, the model with shape prompts has better accuracy on HBW, especially in ambiguous images. + +# 4.2. Effect of multimodal prompts + +We conduct qualitative and quantitative evaluations of the multimodal prompts. For efficient ablation, we train models with $448 \times 448$ input resolution and select the best model within 150K steps of training. + +For box prompts, as shown in rows 3-4 of Fig. 7, our method is able to take a combination of different boxes from in-the-wild images to reconstruct crowded scenes. Figure 4 also shows an example with varying box inputs. PromptHMR remains stable when the boxes change and uses full image context to reconstruct the human even when the boxes are truncated. + +The mask prompt is more effective than boxes when people closely overlap (Fig. 5), as boxes are ambiguous in such + +
Models3DPW (14)EMDB (24)RICH (24)
PA-MPJPEMPJPEPVEPA-MPJPEMPJPEPVEPA-MPJPEMPJPEPVE
cropped imageCLIFF* [33]43.069.081.268.3103.3123.768.1103.3128.0
HMR2.0a [18]44.469.882.261.597.8120.060.798.3120.8
TokenHMR [14]44.371.084.655.691.7109.4---
CameraHMR [45]35.156.065.943.370.281.734.055.764.4
full imageBEV [60]46.978.592.370.9112.2133.4---
Multi-HMR* [2]45.973.187.150.181.695.746.373.883.0
PromptHMR*36.658.769.441.071.784.537.356.665.5
videoWHAM [54]37.559.871.552.081.696.944.380.091.2
TRAM [67]35.659.369.645.774.486.6---
GVHMR [53]37.056.668.744.574.285.939.566.074.4
PromptHMR-Vid35.556.967.340.168.179.237.057.465.8
+ +Table 1. Comparison of mesh reconstruction on the 3DPW, EMDB and RICH datasets, with the number of joints in parenthesis. $\star$ denotes methods that use ground truth focal length during inference. Note that we remove the test-time flip augmentation from all of the video methods to ensure a fair comparison. All metrics are in mm. + +
ModelsHI4D (14)CHI3D (14)
PA-MPJPEMPJPEPair-PA-MPJPEPA-MPJPEMPJPEPair-PA-MPJPE
BEV* [60]81-13651-96
BUDDI [43]73-9847-68
Multi-HMR* [2]49.867.880.631.754.0100.0
PromptHMR*39.263.978.127.248.058.5
PromptHMR30.139.639.524.746.545.3
+ +Table 2. Comparison on interaction reconstruction. PromptHMR is more accurate in per-person and inter-person accuracy. * denote a method or baseline is not trained on HI4D or CHI3D. All metrics are in mm. The impact of HI4D and the interaction prompt are evaluated in Table 5. + +
Train w/ textTest w/ textHBW
HeightChestWaistHipP2P-20k
××6951886326
×6948866026
6243765824
+ +cases. Ablation of HI4D (rows 1-2 in Tab. 5) shows that using masks as the spatial prompt improves accuracy. + +Experiments on the HBW validation set (Tab. 3) show that text prompts effectively improve shape accuracy when used during both training and testing. Moreover, training with shape descriptions alone provides an accuracy boost even if prompts are not given at test time. As illustrated in Fig. 6, text prompts provide notable improvements, especially when large perspective effects create ambiguity. + +Table 3. Ablation of shape prompts using text. Training with shape prompts improves shape accuracy. Using shape prompts during inference further improves shape accuracy. The ablation study is conducted with a $448 \times 448$ model. Errors are in mm. + +
ModelsEMDB-2 (24)
WA-MPJPE100W-MPJPE100RTEJitterFoot Skating
WHAM [54]135.6354.86.022.54.4
TRAM [67]76.4222.41.418.523.4
GVHMR [53]111.0276.52.016.73.5
PromptHMR-Vid71.0216.51.316.33.5
+ +Table 4. Evaluation of motion in world coordinates. PromptHMR-Vid combined with metric SLAM from TRAM [67] surpasses SOTA methods at predicting human motion in world coordinates. + +
Trained withHI4D (14)
MaskInteractionHI4DPA-MPJPEMPJPEPair-PA-MPJPE
×××47.071.487.2
××43.460.583.0
××43.761.373.0
××36.349.452.6
36.547.147.9
+ +Table 5. Ablation on interaction prompt. The interaction module improves inter-person reconstruction metrics Pair-PA-MPJPE on HI4D, especially when the method does not include HI4D in training. Ablation is conducted with a $448 \times 448$ model. All metrics are in mm. + +For interaction prompts, we show an ablation in Table 5. The proposed interaction module is beneficial and largely improves inter-person accuracy on HI4D even without HI4D training, indicating out-off-domain generalization. When trained on HI4D, the interaction module does not improve per-person PA-MPJPE but still improves interperson Pair-PA-MPJPE. Please refer to our SupMat for more qualitative results on interaction prompts. + +![](images/f8f9cf2acc709924ae2bab9efea5c107298d4e5de4209433bf74abca05fca567.jpg) +Figure 7. Qualitative comparison: Multi-HMR vs PromptHMR. Our model can recover coherent 3D scenes of people. In crowded scenes, face detection provides reliable box prompts for our model. Please zoom in to see the details. + +![](images/793495b0ca62fcf69d0eb4f71001d3888dbb2dafe49de988b7bd3fb3108bb57d.jpg) +Figure 8. Qualitative results. PromptHMR recovers coherent two-person close interaction. Despite suffering from some interpenetration, the relative positions of the interacting people are accurately recovered. More examples are provided in the Supplementary. + +# 5. Limitations + +We see PromptHMR as a step towards a holistic perception model for 3D humans, but several limitations need to be addressed in future work. Currently, the shape description and interaction prompts are not automatically generated and need to be supplied by the user. Future work should explore how to effectively integrate our promptable model with VLMs to automate prompting. We show how semantic prompts can improve reconstruction accuracy, but many other potential types of side information such as action descriptions, 3D scene context, or body measurements may + +provide additional benefits in different scenarios. + +# 6. Conclusion + +We have presented PromptHMR, a promptable HPS estimation approach that leverages full image context with spatial and semantic prompts to infer 3D humans in the scene. Our method demonstrates state-of-the-art accuracy across diverse benchmarks and generalizes well in the wild. Our experiments show that incorporating diverse input information through flexible prompting enables robustness and adaptability in challenging scenarios. + +Acknowledgement. The authors would like to thank Yan Zhang, Yao Feng, and Nitin Saini for their suggestions. The majority of the work was done when Yufu was an intern at Meshcapade. Yufu and Kostas thank the support of NSF NCS-FO 2124355, NSF FRR 2220868, and NSF IISRI 2212433. + +Disclosure. While MJB is a co-founder and Chief Scientist at Meshcapade, his research in this project was performed solely at, and funded solely by, the Max Planck Society. + +# References + +[1] Nikos Athanasiou, Alpar Ceske, Markos Diomataris, Michael J. Black, and Gül Varol. MotionFix: Text-driven 3D human motion editing. In SIGGRAPH Asia, 2024. 3 +[2] Fabien Baradel, Matthieu Armando, Salma Galaoui, Romain Brégier, Philippe Weinzaepfel, Grégory Rogez, and Thomas Lucas. Multi-HMR: Multi-person whole-body human mesh recovery in a single shot. European Conference on Computer Vision, 2024. 3, 4, 7 +[3] Shariq Farooq Bhat, Reiner Birkl, Diana Wofk, Peter Wonka, and Matthias Müller. ZoeDepth: Zero-shot transfer by combining relative and metric depth. arXiv preprint arXiv:2302.12288, 2023. 5 +[4] Michael J Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. BEDLAM: A synthetic dataset of bodies exhibiting detailed lifelike animated motion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8726-8737, 2023. 6, 1 +[5] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229, 2020. 3 +[6] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2Mesh: Graph convolutional network for 3D human pose and mesh recovery from a 2D human pose. In European Conference on Computer Vision, pages 769-787. Springer, 2020. 2 +[7] Hongsuk Choi, Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. Beyond static features for temporally consistent 3D human pose and shape from a video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1964-1973, 2021. 3 +[8] Vasileios Choutas, Lea Müller, Chun-Hao P. Huang, Siyu Tang, Dimitrios Tzionas, and Michael J. Black. Accurate 3D body shape regression using metric and semantic attributes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2718-2728, 2022. 2, 3, 4, 1 +[9] Ginger Delmas, Philippe Weinzaepfel, Francesc Moreno-Noguer, and Grégory Rogez. PoseFix: correcting 3D human poses with natural language. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15018-15028, 2023. 3 +[10] Ginger Delmas, Philippe Weinzaepfel, Francesc Moreno-Noguer, and Grégory Rogez. Posembroider: Towards a + +3D, visual, semantic-aware human pose representation. In European Conference on Computer Vision, 2024. 2, 3 +[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1 +[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021. 4 +[13] Kaiwen Duan, Song Bai, Lingxi Xie, Honggang Qi, Qingming Huang, and Qi Tian. Centernet: Keypoint triplets for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6569-6578, 2019. 3 +[14] Sai Kumar Dwivedi, Yu Sun, Priyanka Patel, Yao Feng, and Michael J Black. TokenHMR: Advancing human mesh recovery with a tokenized pose representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1323-1333, 2024. 7 +[15] Jose M. Facil, Benjamin Ummenhofer, Huizhong Zhou, Luis Montesano, Thomas Brox, and Javier Civera. CAM-Convs: Camera-aware multi-scale convolutions for single-view depth. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, page 11818-11827, 2019. 4 +[16] Yao Feng, Jing Lin, Sai Kumar Dwivedi, Yu Sun, Priyanka Patel, and Michael J. Black. ChatPose: Chatting about 3D human pose. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 2, 3 +[17] Mihai Fieraru, Mihai Zanfir, Elisabeta Oneata, Alin-Ionut Popa, Vlad Olaru, and Cristian Sminchisescu. Reconstructing three-dimensional models of interacting humans. arXiv preprint arXiv:2308.01854, 2023. 2, 3, 6, 1 +[18] Shubham Goel, Georgios Pavlakos, Jathushan Rajasegaran, Angjoo Kanazawa, and Jitendra Malik. Reconstructing and tracking humans with transformers. Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2, 7 +[19] Dorian F Henning, Tristan Laidlow, and Stefan Leutenegger. BodySLAM: joint camera localisation, mapping, and human motion tracking. In European Conference on Computer Vision, pages 656-673. Springer, 2022. 3 +[20] Dorian F Henning, Christopher Choi, Simon Schaefer, and Stefan Leutenegger. BodySLAM++: Fast and tightly-coupled visual-inertial camera and human motion tracking. In IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 3781-3788. IEEE, 2023. 3 +[21] Chun-Hao P Huang, Hongwei Yi, Markus Höschle, Matvey Safroshkin, Tsvetelina Alexiadis, Senya Polikovsky, Daniel Scharstein, and Michael J Black. Capturing and inferring dense full-body human-scene contact. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13274-13285, 2022. 2, 6 + +[22] Wen Jiang, Nikos Kolotouros, Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Coherent reconstruction of multiple humans from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5579-5588, 2020. 3 +[23] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7122-7131, 2018. 2, 3, 5, 6 +[24] Angjoo Kanazawa, Jason Y Zhang, Panna Felsen, and Jitendra Malik. Learning 3D human dynamics from video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5614-5623, 2019. 3, 6, 1 +[25] Manuel Kaufmann, Jie Song, Chen Guo, Kaiyue Shen, Tianjian Jiang, Chengcheng Tang, Juan José Zárate, and Otmar Hilliges. EMDB: The electromagnetic database of global 3d human pose and shape in the wild. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14632-14643, 2023. 2, 6 +[26] Rawal Khirodkar, Timur Bagautdinov, Julieta Martinez, Su Zhaoen, Austin James, Peter Selednik, Stuart Anderson, and Shunsuke Saito. Sapiens: Foundation for human vision models. In European Conference on Computer Vision, pages 206-228. Springer, 2025. 1 +[27] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 4 +[28] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. VIBE: Video inference for human body pose and shape estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5253-5263, 2020. 3 +[29] Muhammed Kocabas, Chun-Hao P Huang, Otmar Hilliges, and Michael J Black. PARE: Part attention regressor for 3D human body estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11127-11137, 2021. 2 +[30] Muhammed Kocabas, Chun-Hao P. Huang, Joachim Tesch, Lea Müller, Otmar Hilliges, and Michael J. Black. SPEC: Seeing people in the wild with an estimated camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11035-11045, 2021. 2 +[31] Muhammed Kocabas, Ye Yuan, Pavlo Molchanov, Yunrong Guo, Michael J Black, Otmar Hilliges, Jan Kautz, and Umar Iqbal. PACE: Human and camera motion estimation from inthe-wild videos. In International Conference on 3D Vision, pages 397-408, 2024. 3 +[32] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2252-2261, 2019. 2, 3, 5 +[33] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. CLIFF: Carrying location information + +in full frames into human pose and shape estimation. In European Conference on Computer Vision, pages 590-606. Springer, 2022. 7 +[34] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12939-12948, 2021. 2 +[35] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft COCO: Common objects in context. In European Conference on Computer Vision, pages 740-755. Springer, 2014. 6, 1 +[36] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning, 2023. 3 +[37] Thomas Lucas, Fabien Baradel, Philippe Weinzaepfel, and Grégory Rogez. Posegpt: Quantization-based 3d human motion generation and forecasting. In European Conference on Computer Vision, pages 417-435, 2022. 3 +[38] Zhengyi Luo, S. Alireza Golestaneh, and Kris M. Kitani. 3d human motion estimation via motion compression and refinement. In Proceedings of the Asian Conference on Computer Vision, 2020. 3 +[39] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved cnn supervision. In International Conference on 3D Vision, pages 506-516. IEEE, 2017. 6, 1 +[40] Gyeongsik Moon and Kyoung Mu Lee. I2L-MeshNet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single RGB image. In European Conference on Computer Vision, pages 752-768. Springer, 2020. 2 +[41] Raul Mur-Artal and Juan D Tardós. ORB-SLAM: An opensource slam system for monocular, stereo, and RGB-D cameras. IEEE Transactions on Robotics, 33(5):1255-1262, 2017. 3 +[42] Raul Mur-Artal, Jose Maria Martinez Montiel, and Juan D Tardos. ORB-SLAM: A versatile and accurate monocular SLAM system. IEEE Transactions on Robotics, 31(5):1147-1163, 2015. 3 +[43] Lea Müller, Vickie Ye, Georgios Pavlakos, Michael J. Black, and Angjoo Kanazawa. Generative proxemics: A prior for 3D social interaction from images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3, 6, 7 +[44] Maxime Oquab, Timothée Darcet, Theo Moutakanni, Huy V. Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, Russell Howes, Po-Yao Huang, Hu Xu, Vasu Sharma, Shang-Wen Li, Wojciech Galuba, Mike Rabbat, Mido Assran, Nicolas Ballas, Gabriel Synnaeve, Ishan Misra, Herve Jegou, Julien Mairal, Patrick Labatut, Armand Joulin, and Piotr Bojanowski. DINoV2: Learning robust visual features without supervision, 2023. 4, 1 +[45] Priyanka Patel and Michael J. Black. Camerahrm: Aligning people with perspective. International Conference on 3D Vision (3DV), 2025. 6, 7 + +[46] Priyanka Patel, Chun-Hao P Huang, Joachim Tesch, David T Hoffmann, Shashank Tripathi, and Michael J Black. AGORA: Avatars in geography optimized for regression analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13468-13478, 2021. 6, 1 +[47] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3D hands, face, and body from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10975-10985, 2019. 2, 3 +[48] Mathis Petrovich, Michael J Black, and Gül Varol. Action-conditioned 3D human motion synthesis with transformer VAE. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10985-10995, 2021. 3 +[49] Baldomero R. Árbol and Dan Casas. BodyShapeGPT: SMPL body shape manipulation with LLMs. In European Conference on Computer Vision Workshops, 2024. 3 +[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision, 2021. 2 +[51] Rene Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(3):1623-1637, 2022. 5 +[52] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J Guibas. HUMOR: 3D human motion model for robust pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11488-11499, 2021. 5 +[53] Zehong Shen, Huajin Pi, Yan Xia, Zhi Cen, Sida Peng, Zechen Hu, Hujun Bao, Ruizhen Hu, and Xiaowei Zhou. World-grounded human motion recovery via gravity-view coordinates. In SIGGRAPH Asia, 2024. 3, 6, 7, 1 +[54] Soyong Shin, Juyong Kim, Eni Halilaj, and Michael J Black. WHAM: Reconstructing world-grounded humans with accurate 3D motion. arXiv preprint arXiv:2312.07531, 2023. 3, 5, 6, 7, 1 +[55] Stephan Streuber, M Alejandra Quiros-Ramirez, Matthew Q Hill, Carina A Hahn, Silvia Zuffi, Alice O'Toole, and Michael J Black. Body talk: Crowdshaping realistic 3D avatars with words. ACM TOG, 35(4):1-14, 2016. 3 +[56] Sanjay Subramanian, Evonne Ng, Lea Müller, Dan Klein, Shiry Ginosar, and Trevor Darrell. Pose priors from language models. arXiv preprint arXiv:2405.03689, 2024. 3 +[57] Qingping Sun, Yanjun Wang, Ailing Zeng, Wanqi Yin, Chen Wei, Wenjia Wang, Haiyi Mei, Chi-Sing Leung, Ziwei Liu, Lei Yang, and Zhongang Cai. AiOS: All-in-one-stage expressive human pose and shape estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, page 1834-1843, 2024. 3 +[58] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a + +skeleton-disentangled representation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019. 3 +[59] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J Black, and Tao Mei. Monocular, one-stage, regression of multiple 3D people. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11179-11188, 2021. 3 +[60] Yu Sun, Wu Liu, Qian Bao, Yili Fu, Tao Mei, and Michael J Black. Putting people in their place: Monocular regression of 3D people in depth. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13243-13252, 2022. 7 +[61] Yu Sun, Qian Bao, Wu Liu, Tao Mei, and Michael J Black. TRACE: 5D temporal regression of avatars with dynamic cameras in 3D environments. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8856-8866, 2023. 3 +[62] Zachary Teed and Jia Deng. DRPOID-SLAM: Deep visual slam for monocular, stereo, and RGB-D cameras. Advances in Neural Information Processing Systems, 34:16558-16569, 2021. 3, 5 +[63] Zachary Teed, Lahav Lipson, and Jia Deng. Deep patch visual odometry. Advances in Neural Information Processing Systems, 36, 2024. 3 +[64] Guy Tevet, Sigal Raab, Brian Gordon, Yoni Shafir, Daniel Cohen-or, and Amit Haim Bermano. Human motion diffusion model. In International Conference on Learning Representations, 2023. 3 +[65] Timo Von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In European Conference on Computer Vision, pages 601-617, 2018. 2, 6, 1 +[66] Yufu Wang and Kostas Daniilidis. ReFit: Recurrent fitting network for 3D human recovery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14644-14654, 2023. 2 +[67] Yufu Wang, Ziyun Wang, Lingjie Liu, and Kostas Daniilidis. TRAM: Global trajectory and motion of 3d humans from inthe-wild videos. In European Conference on Computer Vision, 2024. 2, 3, 5, 7 +[68] Jiahong Wu, He Zheng, Bo Zhao, Yixin Li, Baoming Yan, Rui Liang, Wenjia Wang, Shipei Zhou, Guosen Lin, Yanwei Fu, et al. AI challenger: A large-scale dataset for going deeper in image understanding. arXiv preprint arXiv:1711.06475, 2017. 6, 1 +[69] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. arXiv preprint arXiv:2309.16671, 2023. 1 +[70] Vickie Ye, Georgios Pavlakos, Jitendra Malik, and Angjoo Kanazawa. Decoupling human and camera motion from videos in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21222-21232, 2023. 3, 6 +[71] Yifei Yin, Chen Guo, Manuel Kaufmann, Juan Jose Zarate, Jie Song, and Otmar Hilliges. Hi4D: 4D instance segmentation of close human interaction. In Proceedings of + +the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17016-17027, 2023. 2, 3, 6, 1 +[72] Ye Yuan, Umar Iqbal, Pavlo Molchanov, Kris Kitani, and Jan Kautz. GLAMR: Global occlusion-aware human mesh recovery with dynamic cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11038-11049, 2022. 3 +[73] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. PyMAF: 3D human pose and shape regression with pyramidal mesh alignment feedback loop. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11446-11456, 2021. 2 +[74] Yizhou Zhao, Tuanfeng Yang Wang, Bhiksha Raj, Min Xu, Jimei Yang, and Chun-Hao Paul Huang. Synergistic global-space camera and human reconstruction from videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1216-1226, 2024. 3 + +# PromptHMR: Promptable Human Mesh Recovery Supplementary Material + +# 7. Additional Results + +In this section, we demonstrate more qualitative results to show the effects of interaction prompting and the video module. Please refer to the supplementary video to see the results from PromptHMR-Vid. + +# 7.1. Interaction Prompting + +We perform qualitative and quantitative ablation studies of interaction prompting on the HI4D dataset. In Tab. 5 of the main paper, we demonstrate that introducing interaction prompting improves the quantitative results on HI4D. In Fig. 9, we present more qualitative results to show the effect of the interaction module. As shown in the first column of Fig. 9, without the interaction module, the model does not learn to reconstruct close interaction effectively, even when trained with CHI3D interaction data. By adding the proposed interaction module, in the second column, the relative position and orientation of the interacting people are improved, and the penetration is reduced. Note that if we turn off the interaction module via the proposed flow control, the results will become similar to the first column. Finally, training with both CHI3D and HI4D leads to better results. + +# 8. Experiment Details + +# 8.1. Datasets + +The training set of the image model includes BEDLAM [4], AIC [68], InstaVariety [24], HI4D [71], CHI3D [17], AGORA [46], 3DPW [65], COCO [35], and MPII [39], with the sampling rate of $\{0.2, 0.2, 0.3, 0.08, 0.08, 0.06, 0.06, 0.01, 0.01\}$ . All input images are padded and resized to $896 \times 896$ . During training, we employ rotation and color jitter augmentation. For PromptHMR-Vid, we use BEDLAM and 3DPW datasets following [53, 54]. + +To use datasets with different annotations for training, we adopt different losses described in Sec.3.5 of the main paper. For the ones (e.g. BEDLAM, AGORA, CHI3D, HI4D) with ground truth SMPL/SMPL-X annotations, we employ all loss items. While on AIC, InstaVariety, and 3DPW, we drop the translation loss. On COCO and MPII, we only compute 2D keypoint reprojection loss. + +We generate the whole-body bounding boxes by projecting the ground-truth SMPL-X meshes onto the image plane. To generate the face bounding boxes, we project the head vertices. To generate truncated boxes, we take groups of keypoints (e.g. upper body keypoints) and compute their + +bounding boxes. Gaussian noise is then added to both corners. + +On BEDLAM, AGORA, and AIC, we follow SHAPY [8] to compute the shape attribute scores. During training, we compose a shape description for each instance, such as "a tall and broad-shoulder female" with a few augmentation rules. Each sentence will randomly sample 1-3 top attributes. The gender information is augmented with synonyms, such as "female", "woman", "girl", etc. + +# 8.2. Architecture + +We adopt the ViT-L [11], pretrained by DINOv2 [44], as our image encoder. We use an input image size of 896 and a patch size of 14, leading to the same spatial resolution as the recent Sapiens models [26]. The text encoder is from MetaCLIP [69]. The SMPL-X decoder consists of 3 attention blocks with an embedding dimension of 1024. From the output tokens $(T_{smpl}^{\prime}$ and $T_{depth}^{\prime}$ ), we use separate 2-layer MLPs to regress $\theta$ , $\beta$ , $p_{xy}$ and $p_z$ as introduced in Sec.3.2. + +# 8.3. Training + +We train the PromptHMR image model using 8 H100 GPUs, with a batch size of 96 (12 images on each GPU). We use AdamW with a learning rate of 1e-5 for the image encoder, a learning rate of 3e-5 for the prompt encoder, and the SMPL-X decoder, $\beta_{1}$ of 0.9, $\beta_{2}$ of 0.999, and a weight decay of 5e-5. + +The losses presented in Sec.3.5 are weighted differently. For $\mathcal{L}_{2D},\mathcal{L}_{3D},\mathcal{L}_{\mathrm{SMPL}},\mathcal{L}_V$ and $\mathcal{L}_{trans}$ , the weights are set to $\{50.0,5.0,1.0,1.0,10.0\}$ respectively. + +PromptHMR-Vid We train the PromptHMR video model on 2 H100 GPUs with a batch size of 512 samples consisting of 120 frames each. We use AdamW with a learning rate of 2e-4 and a weight decay of 5e-5. We use the same losses as the image-based version in addition to binary cross-entropy loss for joint contact predictions. + +# 8.4. Metric + +In this section, we provide more details on the evaluation metric used in Sec.4 of the main paper. + +Mean Per Joint Position Error (MPJPE) is calculated by aligning the 3D joints obtained from SMPL-X with the ground truth at the pelvis before computing the mean square error. For historical reasons, different datasets use a different set of joints. Additionally, the pelvis definition could + +![](images/0ce1f2959db11ef87e142908954c88a9bbc66d4fd12da3a4358a967120790538.jpg) +Figure 9. Ablation of interaction module. When fine-tuning the image model on CHI3D, adding the interaction module improves two-person interaction reconstruction on HI4D, which demonstrates the out-of-domain generalization ability of interaction prompting. Fine-tuning on both CHI3D and HI4D further improves results. + +be different. To evaluate methods that predict SMPL-X on the datasets with SMPL labels, it's customary to convert the SMPL-X vertices to SMPL vertices and use a joint regressor on the converted vertices to obtain the 3D joints comparable to the labels. Note that all the above choices could alter the results and sometimes produce large "artificial" improvements. So we strictly follow the most recent methods in the evaluation procedure. It's reported in the unit of mm. + +Per Vertex error (PVE) computes mean square error on the vertices after pelvis alignment. Compared to MPJPE, it measures the combined pose and shape error. It's reported in the unit of mm. + +Procrustes-aligned MPJPE (PA-MPJPE) performs general Procrustes alignment on the 3D joints before computing MPJPE. It measures purely the local articulated pose error. It's reported in the unit of mm. + +Paired PA-MPJPE (Pair-PA-MPJPE) aligns two people as a whole with the ground truth before computing MPJPE. In addition to per-person error, it also measures the error in the relative position and orientation of the two people. It's used in HI4D and CHI3D to evaluate interaction reconstruction. It's reported in the unit of mm. + +World-aligned $\mathbf{MPJPE}_{100}$ (WA-MPJPE $_{100}$ ) measures the world-grounded motion accuracy. It aligns a segment of 100 frames of predictions with the ground truth before computing MPJPE. It's reported in the unit of mm. + +World $\mathrm{MPJPE}_{100}$ ( $\mathbf{W} - \mathbf{MPJPE}_{100}$ ) is similar to WA-MPJPE but only aligns the first two frames of the 100-frame segment. Therefore, it provides a better measurement of the drifting in the direction and scale of the trajectories. It's reported in the unit of mm. + +Root Trajectory Error (RTE) measures the accuracy of the whole trajectory including the scale. It performs rigid alignment on the trajectory of the root and computes the mean square error. It's reported in the unit of $\%$ + +Motion Jitter (Jitter) uses finite difference to compute the jerk $(3^{rd}$ derivative) on the 3D joints. It measures rapid abrupt changes. It's reported in the unit of $10m / s^3$ . + +Foot Skating measures erroneous foot sliding. It thresholds the velocity of the ground truth foot vertices to compute contact frames, and calculates the displacement on the predicted foot vertices during contact. It's reported in the unit of $\text{mm}$ . \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06397/images/00757eb75783a41f1cbdfaa4aff4d3041cf964409225395beaa02325bd9fd550.jpg b/data/2025/2504_06xxx/2504.06397/images/00757eb75783a41f1cbdfaa4aff4d3041cf964409225395beaa02325bd9fd550.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7056487f971d8a683ffef2dc6b426961d320c82 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/00757eb75783a41f1cbdfaa4aff4d3041cf964409225395beaa02325bd9fd550.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:445b5588c573c1fa92d481332a106130c8352e48cda1f9814cfb6d1b67c9ae0a +size 4029 diff --git a/data/2025/2504_06xxx/2504.06397/images/02daf49395bd2b890a48cabb843ab75004decfd12c4023d5ee0b4d1251cfd2f6.jpg b/data/2025/2504_06xxx/2504.06397/images/02daf49395bd2b890a48cabb843ab75004decfd12c4023d5ee0b4d1251cfd2f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9a096084fd43a0c117f916fb903f80f4d2269432 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/02daf49395bd2b890a48cabb843ab75004decfd12c4023d5ee0b4d1251cfd2f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79db44bd46de54b4e49a409ee6be36086f90a3cf165b88609a1ac07819e0bdef +size 19137 diff --git a/data/2025/2504_06xxx/2504.06397/images/0308ef773eef4ff74a60a1ebfdf87d0d192464007bb0372b2e2e0590beaefe8b.jpg b/data/2025/2504_06xxx/2504.06397/images/0308ef773eef4ff74a60a1ebfdf87d0d192464007bb0372b2e2e0590beaefe8b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..171eea20ed0872c550ad7a6a0dc52d3130197a69 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/0308ef773eef4ff74a60a1ebfdf87d0d192464007bb0372b2e2e0590beaefe8b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17bc811efa74e898465a554baa549a0b9f5e590328f0f355e08a3dd58d4e771d +size 28917 diff --git a/data/2025/2504_06xxx/2504.06397/images/0395bd1361ba1d9d5b420e84266a4d0dd1fc97cfc2cb0a78aedcf34a8c0f4e4f.jpg b/data/2025/2504_06xxx/2504.06397/images/0395bd1361ba1d9d5b420e84266a4d0dd1fc97cfc2cb0a78aedcf34a8c0f4e4f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5ef9edf2f2a28826d6d4c1a86129dff6f9f1bd5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/0395bd1361ba1d9d5b420e84266a4d0dd1fc97cfc2cb0a78aedcf34a8c0f4e4f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da4a0acb5fe1c1e92203d43a7e37fccece5089b30c536e33291e4378079eeb96 +size 3246 diff --git a/data/2025/2504_06xxx/2504.06397/images/0ce1f2959db11ef87e142908954c88a9bbc66d4fd12da3a4358a967120790538.jpg b/data/2025/2504_06xxx/2504.06397/images/0ce1f2959db11ef87e142908954c88a9bbc66d4fd12da3a4358a967120790538.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9161863e67bceb14cfee14028304f3c53615aaf7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/0ce1f2959db11ef87e142908954c88a9bbc66d4fd12da3a4358a967120790538.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:991236c476c9f6c7aa899259e5a87d8b21b37c85a3238f2823c0b81ff760c153 +size 211451 diff --git a/data/2025/2504_06xxx/2504.06397/images/10b66dfbccf02f42f4e3b9a2ffd895e955ecf5bab5cfa84ddc478ead506cd34f.jpg b/data/2025/2504_06xxx/2504.06397/images/10b66dfbccf02f42f4e3b9a2ffd895e955ecf5bab5cfa84ddc478ead506cd34f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62c08e793cbaa1cd1b1468102b736fb1856074ea --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/10b66dfbccf02f42f4e3b9a2ffd895e955ecf5bab5cfa84ddc478ead506cd34f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6433f9a816a526837cb4481c934dcc8d0942d5d5363cbc36c522e373260b8918 +size 100568 diff --git a/data/2025/2504_06xxx/2504.06397/images/201617bcc60b7ea35f45b6e032b28f2e5f14eeb391b77e8238450717c31c8156.jpg b/data/2025/2504_06xxx/2504.06397/images/201617bcc60b7ea35f45b6e032b28f2e5f14eeb391b77e8238450717c31c8156.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3ff63870eed8ddc2e54353e35d971487e44297a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/201617bcc60b7ea35f45b6e032b28f2e5f14eeb391b77e8238450717c31c8156.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b44172d9ec3c0ec2fe2cfdb4e86f1942443bc056ab8d980e49cfa28fd7f1688 +size 24698 diff --git a/data/2025/2504_06xxx/2504.06397/images/2964c98ed8bbefebb6c7c00d61ec76077da35ec78188b8515b97045c789f956e.jpg b/data/2025/2504_06xxx/2504.06397/images/2964c98ed8bbefebb6c7c00d61ec76077da35ec78188b8515b97045c789f956e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7c3e47a6a1c90ce60756b328995699e79a1a3b8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/2964c98ed8bbefebb6c7c00d61ec76077da35ec78188b8515b97045c789f956e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eee4bd4a138aade5da8b54f767ad93299771409448241688bd4e48a19ec9e6f7 +size 63540 diff --git a/data/2025/2504_06xxx/2504.06397/images/2a3130479b6a0ee73927f3bbb9dfd224f660b0a3f08b9aa59d0f91bf3942754e.jpg b/data/2025/2504_06xxx/2504.06397/images/2a3130479b6a0ee73927f3bbb9dfd224f660b0a3f08b9aa59d0f91bf3942754e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13247fd18a2c62871e0091e269589b3c0bd3371d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/2a3130479b6a0ee73927f3bbb9dfd224f660b0a3f08b9aa59d0f91bf3942754e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03a42a5aedd511700a40c3d5736d9c0d66a7dbee647eaaf9d90958298681f48a +size 2294 diff --git a/data/2025/2504_06xxx/2504.06397/images/2b22fb35cc8785c31aa5d8369e5105b52049a14b0fb90e57307f5f1a0ac3546b.jpg b/data/2025/2504_06xxx/2504.06397/images/2b22fb35cc8785c31aa5d8369e5105b52049a14b0fb90e57307f5f1a0ac3546b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f972b4a051466f530aa1c3f17c19ad83e0d677f9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/2b22fb35cc8785c31aa5d8369e5105b52049a14b0fb90e57307f5f1a0ac3546b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a573d43b4f9e794919bbdf977b6854cada5ddf6b3bf160c0066e042bca0f8e1d +size 5665 diff --git a/data/2025/2504_06xxx/2504.06397/images/4dd0a14461f0856bedb6eedd29c14f07ab7a15e151b7f5a8f9b9fc20f3634e8e.jpg b/data/2025/2504_06xxx/2504.06397/images/4dd0a14461f0856bedb6eedd29c14f07ab7a15e151b7f5a8f9b9fc20f3634e8e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f75d2642dbeeb34547128c19ab2c9e9bd117edbb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/4dd0a14461f0856bedb6eedd29c14f07ab7a15e151b7f5a8f9b9fc20f3634e8e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b14138197cbccdaef9b78c7c543621552ef97cdf5e7d0b283c9a02df744c141 +size 15972 diff --git a/data/2025/2504_06xxx/2504.06397/images/5052c9e8ab63f19eb48f844b7af74060fa66875a0636e9684ea47923970d46f8.jpg b/data/2025/2504_06xxx/2504.06397/images/5052c9e8ab63f19eb48f844b7af74060fa66875a0636e9684ea47923970d46f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c98ed74213f8e8ff65d406355a3af85cbff9685 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/5052c9e8ab63f19eb48f844b7af74060fa66875a0636e9684ea47923970d46f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:916cb34ca9fe81dd39a4b98c112087d82888b277be8c32d38397fb659d37d753 +size 60811 diff --git a/data/2025/2504_06xxx/2504.06397/images/54b008e16b117a177ad8cc7b9a849aecad5a5a01d169fab867b30a16c81170d6.jpg b/data/2025/2504_06xxx/2504.06397/images/54b008e16b117a177ad8cc7b9a849aecad5a5a01d169fab867b30a16c81170d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8eaf9cc6603f226957e39075b90cebe0bebbbfa --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/54b008e16b117a177ad8cc7b9a849aecad5a5a01d169fab867b30a16c81170d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c3214d41028c4d3fd2d2dae2795cabe61b36ca1623f0c69038f8925960903fb +size 3513 diff --git a/data/2025/2504_06xxx/2504.06397/images/56545b98164f3d44d755701f2cd0c4d315ce4d8ea2e750b11b796f77a9f6c710.jpg b/data/2025/2504_06xxx/2504.06397/images/56545b98164f3d44d755701f2cd0c4d315ce4d8ea2e750b11b796f77a9f6c710.jpg new file mode 100644 index 0000000000000000000000000000000000000000..986cea21e2db1fc20723bf1b25810a20d57e86f2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/56545b98164f3d44d755701f2cd0c4d315ce4d8ea2e750b11b796f77a9f6c710.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45b31edeb888ae8b307f6d295725595bb14fe8bbcb7dd5d55e896290bd8dc691 +size 21434 diff --git a/data/2025/2504_06xxx/2504.06397/images/756ef6ee3dce2b408d2c590ea77778c0e6c4634ab245800eabe66fc8221dff3c.jpg b/data/2025/2504_06xxx/2504.06397/images/756ef6ee3dce2b408d2c590ea77778c0e6c4634ab245800eabe66fc8221dff3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c6d888bc40efa44fb4ff02e96d997c8e51de367 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/756ef6ee3dce2b408d2c590ea77778c0e6c4634ab245800eabe66fc8221dff3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86c5022c290effb8498df32fd91251de7b0eeaee3a0d538c4627592983b2a3cf +size 14953 diff --git a/data/2025/2504_06xxx/2504.06397/images/786e6190b2e7d369c95bb59543041ca8d17a5c27883afb76fcfa7fd758c59a17.jpg b/data/2025/2504_06xxx/2504.06397/images/786e6190b2e7d369c95bb59543041ca8d17a5c27883afb76fcfa7fd758c59a17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91715ff37034e57530f143acaed69dd9be59eb44 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/786e6190b2e7d369c95bb59543041ca8d17a5c27883afb76fcfa7fd758c59a17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29e5b9168d0370e1ad25dd22a643fa655ee23db599e09b9f258b87d63a62ce5c +size 23753 diff --git a/data/2025/2504_06xxx/2504.06397/images/793495b0ca62fcf69d0eb4f71001d3888dbb2dafe49de988b7bd3fb3108bb57d.jpg b/data/2025/2504_06xxx/2504.06397/images/793495b0ca62fcf69d0eb4f71001d3888dbb2dafe49de988b7bd3fb3108bb57d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2bfcd1d02285f23e63e56c312d1cdcdda83ba87f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/793495b0ca62fcf69d0eb4f71001d3888dbb2dafe49de988b7bd3fb3108bb57d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16d266068beb290f49c585e99ef6cb282c678a537987959349f81da8aa532a70 +size 77898 diff --git a/data/2025/2504_06xxx/2504.06397/images/7c2f9735852252556d12572991c936f8502ca7b7ceaa2c90fbd13948e95061f2.jpg b/data/2025/2504_06xxx/2504.06397/images/7c2f9735852252556d12572991c936f8502ca7b7ceaa2c90fbd13948e95061f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..596f21a518104ba80705e6d6feb07848a98f0b85 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/7c2f9735852252556d12572991c936f8502ca7b7ceaa2c90fbd13948e95061f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a23498ea1723afeee9851a11186d81de02fe173a999db22b73928a62405cc7a4 +size 5243 diff --git a/data/2025/2504_06xxx/2504.06397/images/83a4c889e13f5a2cefc9fd875f9ef084a162f18ba23dbdac01b8922c059b7608.jpg b/data/2025/2504_06xxx/2504.06397/images/83a4c889e13f5a2cefc9fd875f9ef084a162f18ba23dbdac01b8922c059b7608.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8dd993904257408e0bcefc8abcae327942352274 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/83a4c889e13f5a2cefc9fd875f9ef084a162f18ba23dbdac01b8922c059b7608.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81c65e9ea5c22281039f099833e88a81e18036cec04806de79c915f0e5c43b96 +size 4343 diff --git a/data/2025/2504_06xxx/2504.06397/images/8a24309b5b6b128e1816351cbe5a8342b038c3d351e595a976a66e3fb2c9de9a.jpg b/data/2025/2504_06xxx/2504.06397/images/8a24309b5b6b128e1816351cbe5a8342b038c3d351e595a976a66e3fb2c9de9a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..190cf90a3cb54aa308e11e057a8328b631ecc5b1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/8a24309b5b6b128e1816351cbe5a8342b038c3d351e595a976a66e3fb2c9de9a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2c5ff5cf1483674b445a0f5521fc7d737129a6589c8103c0444b2586794109f +size 3141 diff --git a/data/2025/2504_06xxx/2504.06397/images/8c6c59d99ccd878b1a6ed924cf39f0de820cb6afdd9d730c0a6e96d7833acb42.jpg b/data/2025/2504_06xxx/2504.06397/images/8c6c59d99ccd878b1a6ed924cf39f0de820cb6afdd9d730c0a6e96d7833acb42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..60d292ced2f25480c9422242215c9e817ac7ba71 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/8c6c59d99ccd878b1a6ed924cf39f0de820cb6afdd9d730c0a6e96d7833acb42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4f858f6a9f39cb2159bcacc0ccf1516ce48a8de7a6a6ea5d24bd856de2dafee +size 28437 diff --git a/data/2025/2504_06xxx/2504.06397/images/8dc9c8e1f968d7bd44a9a10dc3858f4276ea423d4a79dd669471b9a3517f9c4f.jpg b/data/2025/2504_06xxx/2504.06397/images/8dc9c8e1f968d7bd44a9a10dc3858f4276ea423d4a79dd669471b9a3517f9c4f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7d0bdc7b52a279ab9ae7999053a15d6298d4cc3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/8dc9c8e1f968d7bd44a9a10dc3858f4276ea423d4a79dd669471b9a3517f9c4f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:215cc75b9523d31f79afb6d81e3a33287bfbc8cc627097e341d50c3fd438407d +size 4377 diff --git a/data/2025/2504_06xxx/2504.06397/images/9270978d52c1cf9dadd732232e763d94d0b8368cc6d89f133ad0335f742112de.jpg b/data/2025/2504_06xxx/2504.06397/images/9270978d52c1cf9dadd732232e763d94d0b8368cc6d89f133ad0335f742112de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01d108dd46515170f3cd721a11dba3de2425e076 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/9270978d52c1cf9dadd732232e763d94d0b8368cc6d89f133ad0335f742112de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16b40a7c50315a11b015d45f640c8b8cce8ed61b550c1778972421582ccdd3bf +size 25599 diff --git a/data/2025/2504_06xxx/2504.06397/images/9b61919247d1a98b64ccde133d822184e564cd1b957aa34fb2292469afd31d76.jpg b/data/2025/2504_06xxx/2504.06397/images/9b61919247d1a98b64ccde133d822184e564cd1b957aa34fb2292469afd31d76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2bce9b9f734a3ea9e189c423b20c190274361647 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/9b61919247d1a98b64ccde133d822184e564cd1b957aa34fb2292469afd31d76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c569174de604768e4f9ae7671cf2a5739c4deb2ebb369cc92d4fc403cd3fdf9 +size 3776 diff --git a/data/2025/2504_06xxx/2504.06397/images/9e599b70e43046dd11573a4225224c1af4fd86746468273033a022f96c5b671e.jpg b/data/2025/2504_06xxx/2504.06397/images/9e599b70e43046dd11573a4225224c1af4fd86746468273033a022f96c5b671e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b23d8a201e150cac992b229313253e1625f3b34 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/9e599b70e43046dd11573a4225224c1af4fd86746468273033a022f96c5b671e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a1694b99b05e927e045b12d0810ca7a884ef128e1db2c727756fdd373b4d408 +size 5051 diff --git a/data/2025/2504_06xxx/2504.06397/images/b8ababd12ee3e02589e61d1e939e094be5aef68d93977f7e94d2939122583623.jpg b/data/2025/2504_06xxx/2504.06397/images/b8ababd12ee3e02589e61d1e939e094be5aef68d93977f7e94d2939122583623.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98d867f822af6a0920bdefa9c1ceceeb6a7486c4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/b8ababd12ee3e02589e61d1e939e094be5aef68d93977f7e94d2939122583623.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c7dc5fa7cc9040124f6a2703f84d05957c3b44e5ff3aa9d71036629cb1819fc +size 5596 diff --git a/data/2025/2504_06xxx/2504.06397/images/bed63959a43ef9b6517723265c8d98562acb316fc42046804d36f64950b13e1e.jpg b/data/2025/2504_06xxx/2504.06397/images/bed63959a43ef9b6517723265c8d98562acb316fc42046804d36f64950b13e1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ba8893d34464fba8b9ea6abba54f46e889b8698 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/bed63959a43ef9b6517723265c8d98562acb316fc42046804d36f64950b13e1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7675b80aab2a624d8fbb3bbe94306af70a40b3f3f7ca46d97f537761d64903b9 +size 22772 diff --git a/data/2025/2504_06xxx/2504.06397/images/c07db1a1be1b7572ff0cb1e6b085d4d5921d6ebfd8b78c274643e0514daa440c.jpg b/data/2025/2504_06xxx/2504.06397/images/c07db1a1be1b7572ff0cb1e6b085d4d5921d6ebfd8b78c274643e0514daa440c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1949448f5b63bd16310197a8f81ba5894d00a29 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/c07db1a1be1b7572ff0cb1e6b085d4d5921d6ebfd8b78c274643e0514daa440c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bcaca53d343b112c6216d29fcaa097a04b76071c46ca799bee63162014e7c43 +size 2520 diff --git a/data/2025/2504_06xxx/2504.06397/images/c1fa09e71427461b1e5ebaacdbf8c8294d86641a77d6f9e8be765e32ec0660a4.jpg b/data/2025/2504_06xxx/2504.06397/images/c1fa09e71427461b1e5ebaacdbf8c8294d86641a77d6f9e8be765e32ec0660a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..11ac48a1e757d82b1b84ab08c95872da8c975d3c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/c1fa09e71427461b1e5ebaacdbf8c8294d86641a77d6f9e8be765e32ec0660a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37616ed6a19c0e84b028f4cc90d6044930a127e967d887f87bd7ef65093e73d7 +size 27934 diff --git a/data/2025/2504_06xxx/2504.06397/images/d10637f2cdf89f6480d91704e0fbdcfddc438a2a133c102c5510813375e9c9f7.jpg b/data/2025/2504_06xxx/2504.06397/images/d10637f2cdf89f6480d91704e0fbdcfddc438a2a133c102c5510813375e9c9f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a1c85f70f8e6edde5381f846df082ebfced7803 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/d10637f2cdf89f6480d91704e0fbdcfddc438a2a133c102c5510813375e9c9f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a852ccff74781b9acd41f61f94d0609a908b6a67815185170f1c68f1f8aecc4f +size 3440 diff --git a/data/2025/2504_06xxx/2504.06397/images/d2b6b299311d4fd2290f405fa713aa218f885e6511c964db332d366023fac5df.jpg b/data/2025/2504_06xxx/2504.06397/images/d2b6b299311d4fd2290f405fa713aa218f885e6511c964db332d366023fac5df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..244b79b8aeb1c679484dd97fe577467db720dcca --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/d2b6b299311d4fd2290f405fa713aa218f885e6511c964db332d366023fac5df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd82ca8ac69aa43e50e907fcf9efdeca14794891ff6f0b47f8b95b6e262a3476 +size 3292 diff --git a/data/2025/2504_06xxx/2504.06397/images/d460b7604ea14b3e3eeca7c6c5ccbdf17a95d7dc5d115e02e0e0b1efc67f4b85.jpg b/data/2025/2504_06xxx/2504.06397/images/d460b7604ea14b3e3eeca7c6c5ccbdf17a95d7dc5d115e02e0e0b1efc67f4b85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93c5d64e7a3615a5e4ca0f51ac3d588eea2c8add --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/d460b7604ea14b3e3eeca7c6c5ccbdf17a95d7dc5d115e02e0e0b1efc67f4b85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dd327553305cf58e9264ac106f0637c123d5a38bd38691ab2398ff179af9699 +size 20385 diff --git a/data/2025/2504_06xxx/2504.06397/images/de787743b2bb7ce67c4891b0fbfa4cdc20aa290b5623a0ace587eccb9ae2782e.jpg b/data/2025/2504_06xxx/2504.06397/images/de787743b2bb7ce67c4891b0fbfa4cdc20aa290b5623a0ace587eccb9ae2782e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe5f5f761fc30f6eaee638cc8811da992b634278 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/de787743b2bb7ce67c4891b0fbfa4cdc20aa290b5623a0ace587eccb9ae2782e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5419d14663e1e6c957c3b22946c9028be0902406e1ee90b40d246d2da7f106e +size 3808 diff --git a/data/2025/2504_06xxx/2504.06397/images/e220cdee247a380a95636ef96ef2843380003bc7c28c9936654f9da4a430c1d9.jpg b/data/2025/2504_06xxx/2504.06397/images/e220cdee247a380a95636ef96ef2843380003bc7c28c9936654f9da4a430c1d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66b4fc91e40c5254b63977f91709ddb68a98e294 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/e220cdee247a380a95636ef96ef2843380003bc7c28c9936654f9da4a430c1d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cbe82715e9c7f8dfc419d79ec6dfa186093a1e478a7171540a18d65dc49a64b +size 2857 diff --git a/data/2025/2504_06xxx/2504.06397/images/e34939023297e096bd1e236cab0e739bc791d6b9177d30fef27d56dfbf3b1f69.jpg b/data/2025/2504_06xxx/2504.06397/images/e34939023297e096bd1e236cab0e739bc791d6b9177d30fef27d56dfbf3b1f69.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2bc79018665c30604c4a1c9a8be78066538a04bd --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/e34939023297e096bd1e236cab0e739bc791d6b9177d30fef27d56dfbf3b1f69.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d48f5334cf84dd545d4093ebf6d431778031c21397785ab6a5384ad5c7ca6c9 +size 4313 diff --git a/data/2025/2504_06xxx/2504.06397/images/f6ba05c518e44eb65aca5d52635611bd11aa77d87bb42893fe1dfbd26de0b8ec.jpg b/data/2025/2504_06xxx/2504.06397/images/f6ba05c518e44eb65aca5d52635611bd11aa77d87bb42893fe1dfbd26de0b8ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..357e7bb692093b06a2ab9d3cc72e864cbe02eaf8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/f6ba05c518e44eb65aca5d52635611bd11aa77d87bb42893fe1dfbd26de0b8ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fce8cc41dcd7797db6442593a74cfa7e766e3f476091509fa31554c18ff8e17 +size 7985 diff --git a/data/2025/2504_06xxx/2504.06397/images/f8f9cf2acc709924ae2bab9efea5c107298d4e5de4209433bf74abca05fca567.jpg b/data/2025/2504_06xxx/2504.06397/images/f8f9cf2acc709924ae2bab9efea5c107298d4e5de4209433bf74abca05fca567.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a6dfce6b5159d797d1c175ab46700573c4f5c2c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/images/f8f9cf2acc709924ae2bab9efea5c107298d4e5de4209433bf74abca05fca567.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6b545bc2dedd99fb4dfa365fe502f60eac8070f862f2a3f6fc22ecc8a0c5bf4 +size 245818 diff --git a/data/2025/2504_06xxx/2504.06397/layout.json b/data/2025/2504_06xxx/2504.06397/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..c1448efa9fc9cec1bc3fbb6ef5de1311ed5fe1e3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06397/layout.json @@ -0,0 +1,10719 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 149, + 103, + 462, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 103, + 462, + 121 + ], + "spans": [ + { + "bbox": [ + 149, + 103, + 462, + 121 + ], + "type": "text", + "content": "PromptHMR: Promptable Human Mesh Recovery" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 146, + 142, + 463, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 142, + 463, + 157 + ], + "spans": [ + { + "bbox": [ + 146, + 142, + 463, + 157 + ], + "type": "text", + "content": "Yufu Wang" + }, + { + "bbox": [ + 146, + 142, + 463, + 157 + ], + "type": "inline_equation", + "content": "^{1,4}" + }, + { + "bbox": [ + 146, + 142, + 463, + 157 + ], + "type": "text", + "content": " Yu Sun" + }, + { + "bbox": [ + 146, + 142, + 463, + 157 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 146, + 142, + 463, + 157 + ], + "type": "text", + "content": " Priyanka Patel" + }, + { + "bbox": [ + 146, + 142, + 463, + 157 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 146, + 142, + 463, + 157 + ], + "type": "text", + "content": " Kostas Daniilidis" + }, + { + "bbox": [ + 146, + 142, + 463, + 157 + ], + "type": "inline_equation", + "content": "^{4,5}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 192, + 157, + 417, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 157, + 417, + 171 + ], + "spans": [ + { + "bbox": [ + 192, + 157, + 417, + 171 + ], + "type": "text", + "content": "Michael J. Black1,2 Muhammed Kocabas1,2,3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "spans": [ + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "type": "text", + "content": " Meshcapade " + }, + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "type": "text", + "content": " MPI for Intelligent Systems " + }, + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "type": "text", + "content": " ETH Zürich " + }, + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "type": "text", + "content": " University of Pennsylvania " + }, + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 66, + 171, + 541, + 185 + ], + "type": "text", + "content": " Archimedes" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 169, + 186, + 438, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 186, + 438, + 200 + ], + "spans": [ + { + "bbox": [ + 169, + 186, + 438, + 200 + ], + "type": "text", + "content": "https://yufu-wang.github.io/phmr-page" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 78, + 212, + 207, + 304 + ], + "blocks": [ + { + "bbox": [ + 78, + 212, + 207, + 304 + ], + "lines": [ + { + "bbox": [ + 78, + 212, + 207, + 304 + ], + "spans": [ + { + "bbox": [ + 78, + 212, + 207, + 304 + ], + "type": "image", + "image_path": "9270978d52c1cf9dadd732232e763d94d0b8368cc6d89f133ad0335f742112de.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 107, + 306, + 198, + 317 + ], + "lines": [ + { + "bbox": [ + 107, + 306, + 198, + 317 + ], + "spans": [ + { + "bbox": [ + 107, + 306, + 198, + 317 + ], + "type": "text", + "content": "image " + }, + { + "bbox": [ + 107, + 306, + 198, + 317 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 107, + 306, + 198, + 317 + ], + "type": "text", + "content": " box prompts" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 214, + 213, + 352, + 305 + ], + "blocks": [ + { + "bbox": [ + 214, + 213, + 352, + 305 + ], + "lines": [ + { + "bbox": [ + 214, + 213, + 352, + 305 + ], + "spans": [ + { + "bbox": [ + 214, + 213, + 352, + 305 + ], + "type": "image", + "image_path": "201617bcc60b7ea35f45b6e032b28f2e5f14eeb391b77e8238450717c31c8156.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 244, + 307, + 335, + 317 + ], + "lines": [ + { + "bbox": [ + 244, + 307, + 335, + 317 + ], + "spans": [ + { + "bbox": [ + 244, + 307, + 335, + 317 + ], + "type": "text", + "content": "image ↓ box prompts" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 359, + 213, + 429, + 305 + ], + "blocks": [ + { + "bbox": [ + 359, + 213, + 429, + 305 + ], + "lines": [ + { + "bbox": [ + 359, + 213, + 429, + 305 + ], + "spans": [ + { + "bbox": [ + 359, + 213, + 429, + 305 + ], + "type": "image", + "image_path": "f6ba05c518e44eb65aca5d52635611bd11aa77d87bb42893fe1dfbd26de0b8ec.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 361, + 307, + 428, + 316 + ], + "lines": [ + { + "bbox": [ + 361, + 307, + 428, + 316 + ], + "spans": [ + { + "bbox": [ + 361, + 307, + 428, + 316 + ], + "type": "text", + "content": "image ↓ masks" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 363, + 323, + 428, + 338 + ], + "lines": [ + { + "bbox": [ + 363, + 323, + 428, + 338 + ], + "spans": [ + { + "bbox": [ + 363, + 323, + 428, + 338 + ], + "type": "text", + "content": "PromptHMR" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 444, + 213, + 527, + 305 + ], + "blocks": [ + { + "bbox": [ + 444, + 213, + 527, + 305 + ], + "lines": [ + { + "bbox": [ + 444, + 213, + 527, + 305 + ], + "spans": [ + { + "bbox": [ + 444, + 213, + 527, + 305 + ], + "type": "image", + "image_path": "02daf49395bd2b890a48cabb843ab75004decfd12c4023d5ee0b4d1251cfd2f6.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 307, + 529, + 316 + ], + "lines": [ + { + "bbox": [ + 452, + 307, + 529, + 316 + ], + "spans": [ + { + "bbox": [ + 452, + 307, + 529, + 316 + ], + "type": "text", + "content": "image box+text" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 454, + 323, + 520, + 337 + ], + "lines": [ + { + "bbox": [ + 454, + 323, + 520, + 337 + ], + "spans": [ + { + "bbox": [ + 454, + 323, + 520, + 337 + ], + "type": "text", + "content": "PromptHMR" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 78, + 352, + 206, + 445 + ], + "blocks": [ + { + "bbox": [ + 110, + 323, + 176, + 338 + ], + "lines": [ + { + "bbox": [ + 110, + 323, + 176, + 338 + ], + "spans": [ + { + "bbox": [ + 110, + 323, + 176, + 338 + ], + "type": "text", + "content": "PromptHMR" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 78, + 352, + 206, + 445 + ], + "lines": [ + { + "bbox": [ + 78, + 352, + 206, + 445 + ], + "spans": [ + { + "bbox": [ + 78, + 352, + 206, + 445 + ], + "type": "image", + "image_path": "4dd0a14461f0856bedb6eedd29c14f07ab7a15e151b7f5a8f9b9fc20f3634e8e.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 214, + 353, + 352, + 445 + ], + "blocks": [ + { + "bbox": [ + 249, + 323, + 314, + 336 + ], + "lines": [ + { + "bbox": [ + 249, + 323, + 314, + 336 + ], + "spans": [ + { + "bbox": [ + 249, + 323, + 314, + 336 + ], + "type": "text", + "content": "PromptHMR" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 214, + 353, + 352, + 445 + ], + "lines": [ + { + "bbox": [ + 214, + 353, + 352, + 445 + ], + "spans": [ + { + "bbox": [ + 214, + 353, + 352, + 445 + ], + "type": "image", + "image_path": "756ef6ee3dce2b408d2c590ea77778c0e6c4634ab245800eabe66fc8221dff3c.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 451, + 555, + 518 + ], + "lines": [ + { + "bbox": [ + 54, + 451, + 555, + 518 + ], + "spans": [ + { + "bbox": [ + 54, + 451, + 555, + 518 + ], + "type": "text", + "content": "Figure 1. PromptHMR is a promptable human pose and shape (HPS) estimation method that processes images with spatial or semantic prompts. It takes \"side information\" readily available from vision-language models or user input to improve the accuracy and robustness of 3D HPS. PromptHMR recovers human pose and shape from spatial prompts such as (a) face bounding boxes, (b) partial or complete person detection boxes, or (c) segmentation masks. It refines its predictions using semantic prompts such as (c) person-person interaction labels for close contact scenarios, or (d) natural language descriptions of body shape to improve body shape predictions. Both image and video versions of PromptHMR achieve state-of-the-art accuracy." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 359, + 368, + 430, + 445 + ], + "blocks": [ + { + "bbox": [ + 359, + 368, + 430, + 445 + ], + "lines": [ + { + "bbox": [ + 359, + 368, + 430, + 445 + ], + "spans": [ + { + "bbox": [ + 359, + 368, + 430, + 445 + ], + "type": "image", + "image_path": "7c2f9735852252556d12572991c936f8502ca7b7ceaa2c90fbd13948e95061f2.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 444, + 367, + 527, + 445 + ], + "blocks": [ + { + "bbox": [ + 444, + 367, + 527, + 445 + ], + "lines": [ + { + "bbox": [ + 444, + 367, + 527, + 445 + ], + "spans": [ + { + "bbox": [ + 444, + 367, + 527, + 445 + ], + "type": "image", + "image_path": "9e599b70e43046dd11573a4225224c1af4fd86746468273033a022f96c5b671e.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 152, + 539, + 200, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 539, + 200, + 552 + ], + "spans": [ + { + "bbox": [ + 152, + 539, + 200, + 552 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 55, + 570, + 297, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 297, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 297, + 713 + ], + "type": "text", + "content": "Human pose and shape (HPS) estimation presents challenges in diverse scenarios such as crowded scenes, person-person interactions, and single-view reconstruction. Existing approaches lack mechanisms to incorporate auxiliary \"side information\" that could enhance reconstruction accuracy in such challenging scenarios. Furthermore, the most accurate methods rely on cropped person detections and cannot exploit scene context while methods that process the whole image often fail to detect people and are less accurate than methods that use crops. While recent language-based methods explore HPS reasoning through large language or vision-language models, their metric accuracy is well below" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 540, + 556, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 556, + 721 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 556, + 721 + ], + "type": "text", + "content": "the state of the art. In contrast, we present PromptHMR, a transformer-based promptable method that reformulates HPS estimation through spatial and semantic prompts. Our method processes full images to maintain scene context and accepts multiple input modalities: spatial prompts like bounding boxes and masks, and semantic prompts like language descriptions or interaction labels. PromptHMR demonstrates robust performance across challenging scenarios: estimating people from bounding boxes as small as faces in crowded scenes, improving body shape estimation through language descriptions, modeling person-person interactions, and producing temporally coherent motions in videos. Experiments on benchmarks show that PromptHMR achieves state-of-the-art performance while offering flexible prompt-based control over the HPS estimation process." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 202, + 36, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 202, + 36, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 202, + 36, + 559 + ], + "type": "text", + "content": "arXiv:2504.06397v2 [cs.CV] 24 May 2025" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 136, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 136, + 84 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 136, + 84 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 294, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 294, + 222 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 294, + 222 + ], + "type": "text", + "content": "The estimation of 3D human pose and shape (HPS) is classically viewed as regressing the parameters of shape and pose from pixels. In particular, most methods take a tightly cropped image of a person and output the pose and shape in camera coordinates. While the accuracy of such methods has increased rapidly, they do not address the whole problem. In particular, an HPS method should be able to take an image or video containing complex human-human and human-scene interactions, return the parameters of every person in the scene, and place these people in a consistent global coordinate frame." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 223, + 294, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 223, + 294, + 318 + ], + "spans": [ + { + "bbox": [ + 55, + 223, + 294, + 318 + ], + "type": "text", + "content": "Our key observation is that the classical \"pixels to parameters\" formulation of the problem is too narrow. Today, we have large vision-language foundation models (VLMs) that understand a great deal about images and what people are doing in them. What these models lack, however, is an understanding of 3D human pose and shape. Recent work [10, 16] has tried to bring together VLMs and 3D HPS but with 3D accuracy well below the best classical methods." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 319, + 295, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 319, + 295, + 533 + ], + "spans": [ + { + "bbox": [ + 55, + 319, + 295, + 533 + ], + "type": "text", + "content": "Consequently, we need to think about the problem in a different way and ask whether we can exploit readily available side information (e.g. provided by a VLM) to improve 3D HPS regression robustness, usefulness, and accuracy. To that end, we develop a novel \"promptable\" HPS architecture called PromptHMR. Consider the sample images shown in Fig. 1. In crowded scenes, existing person detection methods struggle, while face detection methods remain reliable. When people closely interact, their body parts overlap and occlude each other, introducing ambiguity in pose estimation. Moreover, 3D body shape estimation from monocular views is challenging due to perspective ambiguity. In all these cases, we can extract cues, or prompts, that provide \"side information\" that can help an HPS method better analyze the scene. PromptHMR formalizes this intuition by combining image evidence with different types of spatial and semantic information that can come from either humans or AI systems such as VLMs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 534, + 295, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 534, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 534, + 295, + 689 + ], + "type": "text", + "content": "Specifically, our approach combines three key components: (1) a vision transformer that extracts features from high-resolution full images to preserve scene context, (2) a multi-modal prompt encoder that processes spatial and semantic inputs, and (3) a transformer decoder that attends to both prompt and image tokens to generate SMPL-X [47] body parameters. This design addresses the limitations of cropped-image HPS methods by processing full images using side information in the form of prompts. It addresses the challenges that full-image HPS methods have in detecting all people in a scene by accepting readily available bounding boxes. Last, our method incorporates auxiliary semantic information through text descriptions or interaction labels." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "content": "By combining spatial and semantic prompting, our method offers a powerful and versatile approach to 3D HPS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 553, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 239 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 239 + ], + "type": "text", + "content": "estimation from the whole image. At test time, we show that this promptable structure (1) can take various bounding boxes or segmentation masks to recover full body HPS in a robust way, (2) improve its body shape predictions by using textual descriptions as input, (3) is capable of modeling person-person close interaction directly in the regression process, and (4) uses full image context to reconstruct people coherently in the camera space and the world space. Our model can handle video by incorporating temporal transformer layers at the SMPL-X decoding phase, yielding temporally stable and smooth motions. Last, following TRAM [67], we combine the temporal version of our model with metric SLAM to estimate human motion in world coordinates." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 239, + 553, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 239, + 553, + 407 + ], + "spans": [ + { + "bbox": [ + 313, + 239, + 553, + 407 + ], + "type": "text", + "content": "We make several key design choices that make PromptHMR successful. To achieve robustness to different spatial inputs, we train our model by simulating noisy full-body and face-region bounding boxes. For improved body shape estimation, we leverage SHAPY [8] to generate automatic body shape descriptions for training samples and process them with a pretrained text encoder [50]. To enhance person-person interaction reconstruction, we use segmentation masks as more precise spatial prompts and develop person-person attention layers that operate between prompted people, producing coherent reconstructions of close interactions. Through random masking of different input types during training, our model learns to work with any combination of prompts at test time." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 407, + 553, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 407, + 553, + 491 + ], + "spans": [ + { + "bbox": [ + 313, + 407, + 553, + 491 + ], + "type": "text", + "content": "Quantitative experiments on the EMDB [25], 3DPW [65], RICH [21], Hi4D [71], CHI3D [17] and HBW [8] benchmark datasets demonstrate that our method outperforms state-of-the-art (SOTA) approaches and strong baselines. We also provide many qualitative examples of in-the-wild images and videos that illustrate the robustness and generalization of PromptHMR." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 491, + 553, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 491, + 553, + 623 + ], + "spans": [ + { + "bbox": [ + 313, + 491, + 553, + 623 + ], + "type": "text", + "content": "By moving away from the pure pixels-to-parameters approach, PromptHMR not only achieves a new SOTA, it shows a new way of improving both accuracy and robustness by leveraging side information that is easily available. One can think of this as a collaboration between VLMs, which know a lot about people in images but not in 3D, and a metric regressor that knows a lot about 3D humans but not about the semantics of what they do. We show that this combination has significant upside potential to increase both generality and accuracy. Our code and model are available for research purposes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 633, + 400, + 645 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 633, + 400, + 645 + ], + "spans": [ + { + "bbox": [ + 313, + 633, + 400, + 645 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 654, + 553, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 553, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 553, + 715 + ], + "type": "text", + "content": "Human pose and shape estimation from images. Existing methods for human pose and shape (HPS) estimation can be broadly categorized into two main approaches. The first [6, 18, 23, 29, 30, 32-34, 40, 66, 73] uses a tightly cropped image of an individual as input, and estimates" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "type": "text", + "content": "pose and shape in camera coordinates. While effective for isolated individuals, this approach discards scene context that is essential to resolve human pose in cases of occlusion, severe overlap and close interaction in multi-person scenes [17, 71]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 133, + 294, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 133, + 294, + 242 + ], + "spans": [ + { + "bbox": [ + 55, + 133, + 294, + 242 + ], + "type": "text", + "content": "The second category [2, 22, 57, 59–61] build upon object detection frameworks [5, 13] to jointly detect humans and estimate their pose and shape parameters. Having access to the entire image, they can better perceive occluded individuals and infer depth relationships, but they often suffer from detection failures due to the difficulty in simultaneously performing detection and reconstruction. Our \"promptable\" architecture leverages detection box prompts to resolve such conflicts while having access to the entire scene context." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 243, + 294, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 243, + 294, + 327 + ], + "spans": [ + { + "bbox": [ + 55, + 243, + 294, + 327 + ], + "type": "text", + "content": "Human pose and shape estimation from video. Methods for human motion estimation from video can also be divided into two main categories. The first [7, 24, 28, 38, 58] focuses on estimating smooth human motion in camera space. These methods build upon single-person HPS estimation approaches [23, 32] by adding temporal layers during the SMPL decoding phase to introduce temporal coherence." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 327, + 294, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 327, + 294, + 460 + ], + "spans": [ + { + "bbox": [ + 55, + 327, + 294, + 460 + ], + "type": "text", + "content": "More recent methods estimate human motion in world coordinates from videos captured with dynamic cameras. These methods follow a two-stage approach, first estimating camera motion using SLAM techniques [19, 20, 41, 42, 62, 63], and then leveraging human motion priors to optimize the human world motion [31, 70, 72]. Others [53, 54] learn temporal models to directly regress human world motion from image and camera features. Still others [67, 74] use monocular metric depth estimation to solve for the scale of camera motion and transform human motion from camera space to world coordinates." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 460, + 294, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 460, + 294, + 533 + ], + "spans": [ + { + "bbox": [ + 55, + 460, + 294, + 533 + ], + "type": "text", + "content": "In our approach, we extend PromptHMR to video by taking the SMPL-X output tokens and utilizing a temporal transformer module to estimate temporally stable and smooth human motion and translation in camera space. We follow TRAM [67] to transform human motion to world coordinates due to its simplicity and effectiveness." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 534, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 534, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 534, + 295, + 713 + ], + "type": "text", + "content": "Semantic reasoning about 3D humans in images. Recent methods explore combining different types of semantic information, such as language descriptions and knowledge of person-person interactions, to improve reasoning about 3D humans from images and videos. For example, ChatPose [16] follows the common approach of visual language models (VLMs) [36] by fine-tuning a large language model (LLM) with a combination of images and tokens to estimate SMPL parameters. In a similar direction, PoseEmbroider [10] is a multi-modal framework that aligns image, 3D pose, and text representations in a shared latent space. While ChatPose focuses on combining high-level scene reasoning with 3D HPS, PoseEmbroider exploits detailed language descriptions of human pose. While promising, neither method achieves SOTA accuracy on the HPS task. Note" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 553, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 108 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 108 + ], + "type": "text", + "content": "that many other methods relate language to human pose or motion, without considering images [1, 9, 37, 48, 64], but these are outside our scope." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 108, + 553, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 108, + 553, + 251 + ], + "spans": [ + { + "bbox": [ + 313, + 108, + 553, + 251 + ], + "type": "text", + "content": "Additionally, several methods [8, 49, 55] focus on modeling the relationship between SMPL body shape and natural language descriptions. These methods show that language descriptions and images can provide complementary information to solve this task. Other approaches, such as BUDDI [43] and ProsePose [56], address the challenge of estimating person-person interactions. BUDDI is an optimization-based approach that leverages diffusion model as a prior over interacting people, while ProsePose queries a VLM to estimate contact points on the human body surface and uses these contact points to guide an optimization process for improving human interaction." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 251, + 553, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 251, + 553, + 372 + ], + "spans": [ + { + "bbox": [ + 313, + 251, + 553, + 372 + ], + "type": "text", + "content": "Overall, methods like ChatPose [16] and PoseEmbroider [10] are promising steps toward jointly learning the relationship between vision, language, and 3D humans, but their understanding of 3D humans remains limited, as indicated by their relatively low 3D pose accuracy. Meanwhile, SHAPY [8], BodyShapeGPT [49], and BodyTalk [55] focus solely on exploring the relationship between SMPL body shape and natural language. BUDDI and ProsePose are post-processing approaches for interaction that do not directly reason using image information." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 372, + 553, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 372, + 553, + 431 + ], + "spans": [ + { + "bbox": [ + 313, + 372, + 553, + 431 + ], + "type": "text", + "content": "Our approach addresses the limitations of these methods by training a single model capable of flexible prompting that achieves state-of-the-art (SOTA) performance, not only on standard HPS benchmarks but also on benchmarks tailored to body shape and person-person interaction." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 441, + 370, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 441, + 370, + 452 + ], + "spans": [ + { + "bbox": [ + 314, + 441, + 370, + 452 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 460, + 553, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 460, + 553, + 509 + ], + "spans": [ + { + "bbox": [ + 313, + 460, + 553, + 509 + ], + "type": "text", + "content": "Given an image " + }, + { + "bbox": [ + 313, + 460, + 553, + 509 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 313, + 460, + 553, + 509 + ], + "type": "text", + "content": " containing " + }, + { + "bbox": [ + 313, + 460, + 553, + 509 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 460, + 553, + 509 + ], + "type": "text", + "content": " people and a set of prompts, our main goal is to recover the pose, shape, and locations of the people in the camera space to form a coherent human-centric 3D scene. Figure 2 shows an overview." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 514, + 470, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 470, + 528 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 470, + 528 + ], + "type": "text", + "content": "3.1. Promptable mesh regression" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "spans": [ + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "text", + "content": "We adopt SMPL-X [47] to represent each person " + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "text", + "content": " in the 3D space, including the orientation " + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "inline_equation", + "content": "\\phi_i \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "text", + "content": ", local body pose " + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "inline_equation", + "content": "\\theta_i \\in \\mathbb{R}^{22 \\times 3}" + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "text", + "content": ", shape " + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "inline_equation", + "content": "\\beta_i \\in \\mathbb{R}^{10}" + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "text", + "content": ", and translation " + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "inline_equation", + "content": "\\tau_i \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "text", + "content": " in the camera space. We do not include face and hand parameters in this work. Each human " + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "inline_equation", + "content": "H_i" + }, + { + "bbox": [ + 313, + 533, + 553, + 605 + ], + "type": "text", + "content": " is mapped to a 3D mesh with the differentiable SMPL-X layer." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 390, + 610, + 553, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 390, + 610, + 553, + 624 + ], + "spans": [ + { + "bbox": [ + 390, + 610, + 553, + 624 + ], + "type": "interline_equation", + "content": "H _ {i} = \\left\\{\\phi_ {i}, \\theta_ {i}, \\beta_ {i}, \\tau_ {i} \\right\\}. \\tag {1}", + "image_path": "54b008e16b117a177ad8cc7b9a849aecad5a5a01d169fab867b30a16c81170d6.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 630, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 555, + 714 + ], + "type": "text", + "content": "Each person can be prompted with spatial and semantic prompts. Spatial prompts include a bounding box " + }, + { + "bbox": [ + 313, + 630, + 555, + 714 + ], + "type": "inline_equation", + "content": "b_{i} \\in \\mathbb{R}^{2 \\times 2}" + }, + { + "bbox": [ + 313, + 630, + 555, + 714 + ], + "type": "text", + "content": " (the two corners) and a segmentation mask " + }, + { + "bbox": [ + 313, + 630, + 555, + 714 + ], + "type": "inline_equation", + "content": "m_{i} \\in \\mathbb{R}^{h \\times w}" + }, + { + "bbox": [ + 313, + 630, + 555, + 714 + ], + "type": "text", + "content": ". Semantic prompts consist of text and two-person interaction labels. The text prompt is the CLIP embedding " + }, + { + "bbox": [ + 313, + 630, + 555, + 714 + ], + "type": "inline_equation", + "content": "t_{i}" + }, + { + "bbox": [ + 313, + 630, + 555, + 714 + ], + "type": "text", + "content": " of a sentence describing the body shape. The interaction prompt is a binary variable " + }, + { + "bbox": [ + 313, + 630, + 555, + 714 + ], + "type": "inline_equation", + "content": "k_{i}" + }, + { + "bbox": [ + 313, + 630, + 555, + 714 + ], + "type": "text", + "content": " indicating whether two" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 72, + 550, + 188 + ], + "blocks": [ + { + "bbox": [ + 58, + 72, + 550, + 188 + ], + "lines": [ + { + "bbox": [ + 58, + 72, + 550, + 188 + ], + "spans": [ + { + "bbox": [ + 58, + 72, + 550, + 188 + ], + "type": "image", + "image_path": "5052c9e8ab63f19eb48f844b7af74060fa66875a0636e9684ea47923970d46f8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 192, + 555, + 258 + ], + "lines": [ + { + "bbox": [ + 55, + 192, + 555, + 258 + ], + "spans": [ + { + "bbox": [ + 55, + 192, + 555, + 258 + ], + "type": "text", + "content": "Figure 2. Method overview. PromptHMR estimates SMPL-X parameters for each person in an image based on various types of prompts, such as boxes, language descriptions, and person-person interaction cues. Given an image and prompts, we utilize a vision transformer to generate image embeddings and mask and prompt encoders to map different types of prompts to tokens. Optionally, camera intrinsics can be embedded along with the image embeddings. The image embeddings and prompt tokens are then fed to the SMPL-X decoder. The SMPL-X decoder is a transformer-based module that attends to both the image and prompt tokens to estimate SMPL-X parameters. Note that the language and interaction prompts are optional, but providing them enhances the accuracy of the estimated SMPL-X parameters." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 274, + 296, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 274, + 296, + 321 + ], + "spans": [ + { + "bbox": [ + 55, + 274, + 296, + 321 + ], + "type": "text", + "content": "people are in close contact. While semantic prompts are optional, each human needs at least one spatial prompt to be reconstructed. Overall, the input prompts are represented as " + }, + { + "bbox": [ + 55, + 274, + 296, + 321 + ], + "type": "inline_equation", + "content": "P_{i}" + }, + { + "bbox": [ + 55, + 274, + 296, + 321 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 326, + 294, + 344 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 326, + 294, + 344 + ], + "spans": [ + { + "bbox": [ + 133, + 326, + 294, + 344 + ], + "type": "interline_equation", + "content": "P _ {i} \\subseteq \\left\\{b _ {i}, m _ {i}, t _ {i}, k _ {i} \\right\\} \\tag {2}", + "image_path": "d10637f2cdf89f6480d91704e0fbdcfddc438a2a133c102c5510813375e9c9f7.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 342, + 217, + 353 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 342, + 217, + 353 + ], + "spans": [ + { + "bbox": [ + 138, + 342, + 217, + 353 + ], + "type": "interline_equation", + "content": "b _ {i} \\in P _ {i} \\text {o r} m _ {i} \\in P _ {i}", + "image_path": "c07db1a1be1b7572ff0cb1e6b085d4d5921d6ebfd8b78c274643e0514daa440c.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 357, + 296, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 357, + 296, + 393 + ], + "spans": [ + { + "bbox": [ + 55, + 357, + 296, + 393 + ], + "type": "text", + "content": "Promptable human mesh recovery (PromptHMR) is defined as a learnable function that maps an image and a set of prompts to a set of 3D humans" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 397, + 294, + 412 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 397, + 294, + 412 + ], + "spans": [ + { + "bbox": [ + 115, + 397, + 294, + 412 + ], + "type": "interline_equation", + "content": "f: \\left(I, \\left\\{P _ {i} \\right\\} _ {i = 1} ^ {N}\\right)\\rightarrow \\left\\{H _ {i} \\right\\} _ {i = 1} ^ {N}. \\tag {3}", + "image_path": "e34939023297e096bd1e236cab0e739bc791d6b9177d30fef27d56dfbf3b1f69.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 416, + 296, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 416, + 296, + 441 + ], + "spans": [ + { + "bbox": [ + 55, + 416, + 296, + 441 + ], + "type": "text", + "content": "This task definition integrates all available contexts to locate and reconstruct prompted humans in the image." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 445, + 108, + 457 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 445, + 108, + 457 + ], + "spans": [ + { + "bbox": [ + 55, + 445, + 108, + 457 + ], + "type": "text", + "content": "3.2. Model" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 464, + 296, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 464, + 296, + 488 + ], + "spans": [ + { + "bbox": [ + 55, + 464, + 296, + 488 + ], + "type": "text", + "content": "Image encoder. The image is first encoded as tokens by a vision transformer (ViT) encoder from DINOv2 [12, 44]:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 137, + 493, + 294, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 493, + 294, + 506 + ], + "spans": [ + { + "bbox": [ + 137, + 493, + 294, + 506 + ], + "type": "interline_equation", + "content": "F = \\operatorname {E n c o d e r} (I), \\tag {4}", + "image_path": "8a24309b5b6b128e1816351cbe5a8342b038c3d351e595a976a66e3fb2c9de9a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 511, + 296, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 511, + 296, + 582 + ], + "spans": [ + { + "bbox": [ + 55, + 511, + 296, + 582 + ], + "type": "text", + "content": "To ensure sufficient resolution for modeling humans at both near and far distances, we use " + }, + { + "bbox": [ + 55, + 511, + 296, + 582 + ], + "type": "inline_equation", + "content": "896 \\times 896" + }, + { + "bbox": [ + 55, + 511, + 296, + 582 + ], + "type": "text", + "content": " images. The encoder is run once per frame regardless of the number of people prompted. When camera intrinsics are provided, we add positional encoding of the camera rays to the image tokens to make them camera-aware [2, 15]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 583, + 296, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 583, + 296, + 642 + ], + "spans": [ + { + "bbox": [ + 55, + 583, + 296, + 642 + ], + "type": "text", + "content": "Mask encoder. When available, masks are first processed by an encoder consisting of stripped convolutional layers that downsample the masks. The output mask features are added to the image tokens. If no mask is provided, a learned \"no mask\" token is added instead." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 647, + 294, + 661 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 647, + 294, + 661 + ], + "spans": [ + { + "bbox": [ + 121, + 647, + 294, + 661 + ], + "type": "interline_equation", + "content": "F _ {i} = \\operatorname {E n c o d e r} _ {\\mathrm {m}} \\left(m _ {i}\\right) + F. \\tag {5}", + "image_path": "9b61919247d1a98b64ccde133d822184e564cd1b957aa34fb2292469afd31d76.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "type": "text", + "content": "Prompt encoder. The prompt encoder consists of a set of transformations that map different types of prompts to token vectors of the same dimension. When a prompt is not available, it is replaced with a learned null token." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 274, + 555, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 274, + 555, + 393 + ], + "spans": [ + { + "bbox": [ + 313, + 274, + 555, + 393 + ], + "type": "text", + "content": "For bounding boxes, we encode " + }, + { + "bbox": [ + 313, + 274, + 555, + 393 + ], + "type": "inline_equation", + "content": "b_{i}" + }, + { + "bbox": [ + 313, + 274, + 555, + 393 + ], + "type": "text", + "content": " using positional encoding summed with learned embeddings to form the box prompt tokens " + }, + { + "bbox": [ + 313, + 274, + 555, + 393 + ], + "type": "inline_equation", + "content": "T_{bi} = \\mathrm{PE}(b_{i})" + }, + { + "bbox": [ + 313, + 274, + 555, + 393 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 313, + 274, + 555, + 393 + ], + "type": "inline_equation", + "content": "T_{bi} \\in \\mathbb{R}^{2 \\times d}" + }, + { + "bbox": [ + 313, + 274, + 555, + 393 + ], + "type": "text", + "content": ". We design different box transformations during training to allow the model to use different boxes as a human identifier. In the training phase, each instance is prompted with either a whole-body bounding box, a face bounding box, or a truncated box covering part of the body. Gaussian noise is added to both corners. At inference time, the model accepts boxes without needing to know the box types." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 393, + 554, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 393, + 554, + 501 + ], + "spans": [ + { + "bbox": [ + 313, + 393, + 554, + 501 + ], + "type": "text", + "content": "Language is a natural way to supply semantic information, and in this paper, we use language to supplement spatial prompts with information on body shape. A sentence such as \"a muscular and tall male\" is encoded with the CLIP text encoder " + }, + { + "bbox": [ + 313, + 393, + 554, + 501 + ], + "type": "inline_equation", + "content": "T_{ti} = \\mathrm{CLIP}(t_i)" + }, + { + "bbox": [ + 313, + 393, + 554, + 501 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 313, + 393, + 554, + 501 + ], + "type": "inline_equation", + "content": "T_{ti} \\in \\mathbb{R}^{d}" + }, + { + "bbox": [ + 313, + 393, + 554, + 501 + ], + "type": "text", + "content": ". To generate paired (image, text) data, we run SHAPY's [8] shape-to-attribute method on the ground truth shape parameters to obtain shape attribute scores and randomly pick a subset of top attributes to form a sentence." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 501, + 554, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 501, + 554, + 536 + ], + "spans": [ + { + "bbox": [ + 313, + 501, + 554, + 536 + ], + "type": "text", + "content": "The interaction prompt " + }, + { + "bbox": [ + 313, + 501, + 554, + 536 + ], + "type": "inline_equation", + "content": "k_{i}" + }, + { + "bbox": [ + 313, + 501, + 554, + 536 + ], + "type": "text", + "content": " passes through the prompt encoder without modification and directly switches on-off the cross-person attention that is described in Sec. 3.3." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 536, + 554, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 536, + 554, + 572 + ], + "spans": [ + { + "bbox": [ + 313, + 536, + 554, + 572 + ], + "type": "text", + "content": "SMPL-X decoder. The SMPL-X decoder appends two query tokens " + }, + { + "bbox": [ + 313, + 536, + 554, + 572 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{spl}}, T_{\\mathrm{depth}}" + }, + { + "bbox": [ + 313, + 536, + 554, + 572 + ], + "type": "text", + "content": " with the prompt tokens " + }, + { + "bbox": [ + 313, + 536, + 554, + 572 + ], + "type": "inline_equation", + "content": "T_{bi}, T_{ti}" + }, + { + "bbox": [ + 313, + 536, + 554, + 572 + ], + "type": "text", + "content": " to form the person-specific prompt " + }, + { + "bbox": [ + 313, + 536, + 554, + 572 + ], + "type": "inline_equation", + "content": "T_i \\in \\mathbb{R}^{5 \\times d}" + }, + { + "bbox": [ + 313, + 536, + 554, + 572 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 573, + 554, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 573, + 554, + 597 + ], + "spans": [ + { + "bbox": [ + 313, + 573, + 554, + 597 + ], + "type": "text", + "content": "Finally, we use a standard transformer decoder and two MLP heads to produce the final output" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 358, + 601, + 497, + 617 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 601, + 497, + 617 + ], + "spans": [ + { + "bbox": [ + 358, + 601, + 497, + 617 + ], + "type": "interline_equation", + "content": "T _ {s m p l} ^ {\\prime}, T _ {d e p t h} ^ {\\prime} = \\mathrm {D e c o d e r} (F _ {i}, T _ {i})", + "image_path": "8dc9c8e1f968d7bd44a9a10dc3858f4276ea423d4a79dd669471b9a3517f9c4f.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 378, + 619, + 553, + 632 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 619, + 553, + 632 + ], + "spans": [ + { + "bbox": [ + 378, + 619, + 553, + 632 + ], + "type": "interline_equation", + "content": "\\phi_ {i}, \\theta_ {i}, \\beta_ {i} = \\operatorname {H e a d} _ {s m p l} \\left(T _ {s m p l} ^ {\\prime}\\right) \\tag {6}", + "image_path": "83a4c889e13f5a2cefc9fd875f9ef084a162f18ba23dbdac01b8922c059b7608.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 406, + 635, + 508, + 649 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 635, + 508, + 649 + ], + "spans": [ + { + "bbox": [ + 406, + 635, + 508, + 649 + ], + "type": "interline_equation", + "content": "\\tau_ {i} = \\operatorname {H e a d} _ {d e p t h} (T _ {d e p t h} ^ {\\prime}).", + "image_path": "d2b6b299311d4fd2290f405fa713aa218f885e6511c964db332d366023fac5df.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 654, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 554, + 713 + ], + "type": "text", + "content": "The transformer consists of three attention blocks. Each block applies self-attention on the tokens, cross-person attention (described in Sec. 3.3), and then two-way cross-attention between the tokens and the image embeddings [27]. The self-attention and cross-attention with the" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "content": "image are applied to each prompted person independently. We use separate tokens " + }, + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{spl}}" + }, + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{depth}}" + }, + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "content": " to make the location representation invariant to the 3D human pose and shape representation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 121, + 295, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 121, + 295, + 192 + ], + "spans": [ + { + "bbox": [ + 55, + 121, + 295, + 192 + ], + "type": "text", + "content": "Regressing the location of the human in the camera space is much more challenging than most prior work that models humans in a cropped image space. Therefore, we do not regress " + }, + { + "bbox": [ + 55, + 121, + 295, + 192 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 55, + 121, + 295, + 192 + ], + "type": "text", + "content": " directly. We regress focal length normalized 2D translation " + }, + { + "bbox": [ + 55, + 121, + 295, + 192 + ], + "type": "inline_equation", + "content": "p_{xy} \\in \\mathbb{R}^2" + }, + { + "bbox": [ + 55, + 121, + 295, + 192 + ], + "type": "text", + "content": " and inverse depth " + }, + { + "bbox": [ + 55, + 121, + 295, + 192 + ], + "type": "inline_equation", + "content": "p_z \\in \\mathbb{R}" + }, + { + "bbox": [ + 55, + 121, + 295, + 192 + ], + "type": "text", + "content": ", and then transform them to " + }, + { + "bbox": [ + 55, + 121, + 295, + 192 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 55, + 121, + 295, + 192 + ], + "type": "text", + "content": " as follows" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 86, + 198, + 295, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 198, + 295, + 224 + ], + "spans": [ + { + "bbox": [ + 86, + 198, + 295, + 224 + ], + "type": "interline_equation", + "content": "t _ {x y} = \\frac {p _ {x y}}{p _ {z}} \\quad t _ {z} = \\frac {1}{p _ {z}} \\times \\frac {f}{f _ {c}} \\quad \\tau = [ t _ {x y}, t z ], \\tag {7}", + "image_path": "2b22fb35cc8785c31aa5d8369e5105b52049a14b0fb90e57307f5f1a0ac3546b.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 231, + 296, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 231, + 296, + 316 + ], + "spans": [ + { + "bbox": [ + 55, + 231, + 296, + 316 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 231, + 296, + 316 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 55, + 231, + 296, + 316 + ], + "type": "text", + "content": " is the ground truth or estimated focal length of the image, and " + }, + { + "bbox": [ + 55, + 231, + 296, + 316 + ], + "type": "inline_equation", + "content": "f_{c}" + }, + { + "bbox": [ + 55, + 231, + 296, + 316 + ], + "type": "text", + "content": " is the canonical focal length. Predicting the normalized inverse depth follows the recent monocular depth literature [51] and is also intuitive since the inverse depth is linearly related to the size of the human in the image. Predicting " + }, + { + "bbox": [ + 55, + 231, + 296, + 316 + ], + "type": "inline_equation", + "content": "p_{xy}" + }, + { + "bbox": [ + 55, + 231, + 296, + 316 + ], + "type": "text", + "content": " is equivalent to predicting the 2D location of the human in a normalized image plane." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 322, + 187, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 322, + 187, + 335 + ], + "spans": [ + { + "bbox": [ + 55, + 322, + 187, + 335 + ], + "type": "text", + "content": "3.3. Two-person interaction" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 340, + 295, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 340, + 295, + 389 + ], + "spans": [ + { + "bbox": [ + 55, + 340, + 295, + 389 + ], + "type": "text", + "content": "We introduce promptable layers in the decoder to model two-person interaction. We describe the case where there are two people in the image, but the implementation can extend to model an interacting pair in a larger group." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 388, + 295, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 388, + 295, + 449 + ], + "spans": [ + { + "bbox": [ + 55, + 388, + 295, + 449 + ], + "type": "text", + "content": "The promptability is modeled as a flow control with a residual connection (Fig. 3). Specifically, if two humans are interacting (as indicated by " + }, + { + "bbox": [ + 55, + 388, + 295, + 449 + ], + "type": "inline_equation", + "content": "k_{i}" + }, + { + "bbox": [ + 55, + 388, + 295, + 449 + ], + "type": "text", + "content": "), their query tokens pass through an additional self-attention layer; otherwise, non-interacting humans skip this." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 449, + 295, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 449, + 295, + 532 + ], + "spans": [ + { + "bbox": [ + 55, + 449, + 295, + 532 + ], + "type": "text", + "content": "Applying attention to every person often creates unnecessary dependency in crowded scenes, and there is limited training data for large-group scenarios. However, there is high-quality data featuring two-person social interactions. By making the interaction layers promptable, we mitigate data diversity issues and increase flexibility, regardless of the number of people in the scene." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 532, + 295, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 532, + 295, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 532, + 295, + 628 + ], + "type": "text", + "content": "Our proposed interaction layer uses a standard self-attention mechanism. First, we add positional encodings to the query tokens to distinguish the two individuals. The encoded tokens then go through a self-attention layer, whose output is combined with the original tokens via a residual connection. Our experiments demonstrate that including these interaction layers significantly improves inter-person pose accuracy in two-person interaction benchmarks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 635, + 205, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 635, + 205, + 647 + ], + "spans": [ + { + "bbox": [ + 55, + 635, + 205, + 647 + ], + "type": "text", + "content": "3.4. PromptHMR video version" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 653, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 295, + 715 + ], + "type": "text", + "content": "In addition to the single-image variant of PromptHMR, we train an extended version that processes videos to estimate human motion in world coordinates. To achieve this, we introduce a simple and efficient temporal transformer module. Given a monocular video sequence " + }, + { + "bbox": [ + 55, + 653, + 295, + 715 + ], + "type": "inline_equation", + "content": "\\{I^t\\}_{t=0}^T" + }, + { + "bbox": [ + 55, + 653, + 295, + 715 + ], + "type": "text", + "content": ", we first run" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 316, + 72, + 550, + 159 + ], + "blocks": [ + { + "bbox": [ + 316, + 72, + 550, + 159 + ], + "lines": [ + { + "bbox": [ + 316, + 72, + 550, + 159 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 550, + 159 + ], + "type": "image", + "image_path": "8c6c59d99ccd878b1a6ed924cf39f0de820cb6afdd9d730c0a6e96d7833acb42.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 164, + 554, + 198 + ], + "lines": [ + { + "bbox": [ + 313, + 164, + 554, + 198 + ], + "spans": [ + { + "bbox": [ + 313, + 164, + 554, + 198 + ], + "type": "text", + "content": "Figure 3. SMPL-X decoder. The top row shows one attention block in the decoder. The cross-person interaction module can be turned on/off. The bottom row shows the cross-person attention." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "spans": [ + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "text", + "content": "PromptHMR to obtain per-subject SMPL-X decoder output tokens " + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{mpl}}^{\\prime}" + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{depth}}^{\\prime}" + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "text", + "content": ", assuming that the subject identities are provided with the prompts. These tokens, along with the positional encoding of time " + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "text", + "content": ", are fed to a decoder-only temporal transformer module with twelve attention blocks. The output tokens are converted to SMPL-X parameters " + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "inline_equation", + "content": "\\phi_t, \\theta_t, \\beta_t" + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "text", + "content": ", translation " + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "inline_equation", + "content": "\\tau_t" + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "text", + "content": ", and joint contact probabilities " + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "inline_equation", + "content": "c_t" + }, + { + "bbox": [ + 313, + 213, + 554, + 322 + ], + "type": "text", + "content": ". The contact probabilities indicate whether a given joint is in contact with the ground plane similar to [52-54]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 323, + 554, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 323, + 554, + 443 + ], + "spans": [ + { + "bbox": [ + 313, + 323, + 554, + 443 + ], + "type": "text", + "content": "To obtain results in world coordinates, we adopt the approach from TRAM [67]. Specifically, we use DROID-SLAM [62] and a monocular metric depth estimation model, ZoeDepth [3], to estimate camera motion in metric world coordinates. The translation parameters " + }, + { + "bbox": [ + 313, + 323, + 554, + 443 + ], + "type": "inline_equation", + "content": "\\tau_{t}" + }, + { + "bbox": [ + 313, + 323, + 554, + 443 + ], + "type": "text", + "content": " are then transformed to world coordinates using the estimated camera motion. To refine the human trajectory and mitigate foot-skating artifacts, we leverage the estimated contact probabilities and run a fast postprocessing that optimizes the contact joints to have zero velocity." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 455, + 368, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 455, + 368, + 466 + ], + "spans": [ + { + "bbox": [ + 314, + 455, + 368, + 466 + ], + "type": "text", + "content": "3.5. Losses" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 475, + 554, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 475, + 554, + 499 + ], + "spans": [ + { + "bbox": [ + 313, + 475, + 554, + 499 + ], + "type": "text", + "content": "PromptHMR is trained with a combination of 2D and 3D losses, following traditional HMR methods [23, 32]:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 327, + 511, + 541, + 525 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 511, + 541, + 525 + ], + "spans": [ + { + "bbox": [ + 327, + 511, + 541, + 525 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\lambda_ {1} \\mathcal {L} _ {2 D} + \\lambda_ {2} \\mathcal {L} _ {3 D} + \\lambda_ {3} \\mathcal {L} _ {\\mathrm {S M P L}} + \\lambda_ {4} \\mathcal {L} _ {V} + \\lambda_ {5} \\mathcal {L} _ {t}", + "image_path": "b8ababd12ee3e02589e61d1e939e094be5aef68d93977f7e94d2939122583623.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 536, + 430, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 536, + 430, + 548 + ], + "spans": [ + { + "bbox": [ + 313, + 536, + 430, + 548 + ], + "type": "text", + "content": "with each term calculated as" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 365, + 560, + 477, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 560, + 477, + 574 + ], + "spans": [ + { + "bbox": [ + 365, + 560, + 477, + 574 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {2 D} = \\left\\| \\hat {\\mathcal {J}} _ {2 D} - \\Pi \\left(\\mathcal {J} _ {3 D}\\right) \\right\\| _ {F} ^ {2}", + "image_path": "de787743b2bb7ce67c4891b0fbfa4cdc20aa290b5623a0ace587eccb9ae2782e.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 365, + 577, + 462, + 591 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 577, + 462, + 591 + ], + "spans": [ + { + "bbox": [ + 365, + 577, + 462, + 591 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {3 D} = \\left\\| \\hat {\\mathcal {J}} _ {3 D} - \\mathcal {J} _ {3 D} \\right\\| _ {F} ^ {2}", + "image_path": "0395bd1361ba1d9d5b420e84266a4d0dd1fc97cfc2cb0a78aedcf34a8c0f4e4f.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 353, + 594, + 440, + 609 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 594, + 440, + 609 + ], + "spans": [ + { + "bbox": [ + 353, + 594, + 440, + 609 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {S M P L}} = | | \\hat {\\Theta} - \\Theta | | _ {2} ^ {2}", + "image_path": "e220cdee247a380a95636ef96ef2843380003bc7c28c9936654f9da4a430c1d9.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 369, + 612, + 441, + 625 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 612, + 441, + 625 + ], + "spans": [ + { + "bbox": [ + 369, + 612, + 441, + 625 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {V} = \\left| \\left| \\hat {V} - V \\right| \\right| _ {F} ^ {2}", + "image_path": "2a3130479b6a0ee73927f3bbb9dfd224f660b0a3f08b9aa59d0f91bf3942754e.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 371, + 628, + 516, + 642 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 371, + 628, + 516, + 642 + ], + "spans": [ + { + "bbox": [ + 371, + 628, + 516, + 642 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {t} = \\left| \\left| \\hat {p} _ {x y} - p _ {x y} \\right| \\right| _ {F} ^ {2} + \\left| \\left| \\hat {p} _ {z} - p _ {z} \\right| \\right| _ {F} ^ {2}", + "image_path": "00757eb75783a41f1cbdfaa4aff4d3041cf964409225395beaa02325bd9fd550.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{J}_{3D}" + }, + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "text", + "content": " are the 3D joints and vertices of the SMPL-X model, with the hat operator denoting the ground truth. " + }, + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "inline_equation", + "content": "\\Pi" + }, + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "text", + "content": " is the camera reprojection operator. Additionally, on datasets with ground truth translation labels, we supervise the normalized translation " + }, + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "inline_equation", + "content": "p_{xy}" + }, + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "text", + "content": " and inverse depth " + }, + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "inline_equation", + "content": "p_z" + }, + { + "bbox": [ + 313, + 653, + 554, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 71, + 137, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 71, + 137, + 85 + ], + "spans": [ + { + "bbox": [ + 55, + 71, + 137, + 85 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 296, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 296, + 199 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 296, + 199 + ], + "type": "text", + "content": "Datasets. We train PromptHMR with standard datasets: BEDLAM [4], AGORA [46], 3DPW [65], COCO [35], and MPII [39]. Following 4DHumans, we add AIC [68] and InstaVariety [24] as in-the-wild data, with pseudoground truth from CamSMPLify [45]. Additionally, we add CHI3D [17] and HI4D [71] to enable learning two-person interaction following the train-test splits from BUDDI [43]. Including CHI3D and HI4D does not improve performance on other benchmarks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 201, + 296, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 201, + 296, + 261 + ], + "spans": [ + { + "bbox": [ + 55, + 201, + 296, + 261 + ], + "type": "text", + "content": "Implementation. We train PromptHMR with AdamW with a batch size of 96 images of resolution " + }, + { + "bbox": [ + 55, + 201, + 296, + 261 + ], + "type": "inline_equation", + "content": "896 \\times 896" + }, + { + "bbox": [ + 55, + 201, + 296, + 261 + ], + "type": "text", + "content": ". We use a learning rate of " + }, + { + "bbox": [ + 55, + 201, + 296, + 261 + ], + "type": "inline_equation", + "content": "1e^{-5}" + }, + { + "bbox": [ + 55, + 201, + 296, + 261 + ], + "type": "text", + "content": " for the image encoder and " + }, + { + "bbox": [ + 55, + 201, + 296, + 261 + ], + "type": "inline_equation", + "content": "3e^{-5}" + }, + { + "bbox": [ + 55, + 201, + 296, + 261 + ], + "type": "text", + "content": " for the prompt encoder and the SMPL-X decoder, with a weight decay of " + }, + { + "bbox": [ + 55, + 201, + 296, + 261 + ], + "type": "inline_equation", + "content": "5e^{-5}" + }, + { + "bbox": [ + 55, + 201, + 296, + 261 + ], + "type": "text", + "content": ". The training converges within 350K steps." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 262, + 296, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 262, + 296, + 333 + ], + "spans": [ + { + "bbox": [ + 55, + 262, + 296, + 333 + ], + "type": "text", + "content": "Evaluation. We evaluate camera space reconstruction accuracy on 3DPW [65], EMDB [25] and RICH [21], using MPJPE, Procrustes-aligned MPJPE (PA-MPJPE) and Per Vertex Error (PVE) [23]. We evaluate inter-person accuracy on HI4D and CHI3D by Pair-PA-MPJPE, which aligns the two people as a whole with the ground truth [43]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 335, + 296, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 335, + 296, + 383 + ], + "spans": [ + { + "bbox": [ + 55, + 335, + 296, + 383 + ], + "type": "text", + "content": "To evaluate world-grounded motion on EMDB with PromptHMR video (PromptHMR-vid), we compute World-aligned MPJPE " + }, + { + "bbox": [ + 55, + 335, + 296, + 383 + ], + "type": "inline_equation", + "content": "(\\mathrm{WA - MPJPE}_{100})" + }, + { + "bbox": [ + 55, + 335, + 296, + 383 + ], + "type": "text", + "content": ", World MPJPE " + }, + { + "bbox": [ + 55, + 335, + 296, + 383 + ], + "type": "inline_equation", + "content": "(\\mathrm{W - MPJPE}_{100})" + }, + { + "bbox": [ + 55, + 335, + 296, + 383 + ], + "type": "text", + "content": " and Root Translation Error (RTE in %) [54, 70]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 393, + 194, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 393, + 194, + 406 + ], + "spans": [ + { + "bbox": [ + 55, + 393, + 194, + 406 + ], + "type": "text", + "content": "4.1. Reconstruction accuracy" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 412, + 296, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 412, + 296, + 520 + ], + "spans": [ + { + "bbox": [ + 55, + 412, + 296, + 520 + ], + "type": "text", + "content": "For camera space reconstruction, as shown in Table 1, PromptHMR and PromptHMR-Vid demonstrate state-of-the-art performance, matching crop-based methods while achieving better results than other full-image methods. PromptHMR and CameraHMR use the same training data and have similar performance, which validates that this prompt-based approach can achieve metrically accurate results. For representative results, see Fig. 7, where PromptHMR recovers coherent 3D scenes of people." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 521, + 296, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 521, + 296, + 605 + ], + "spans": [ + { + "bbox": [ + 55, + 521, + 296, + 605 + ], + "type": "text", + "content": "For interaction reconstruction, PromptHMR achieves good accuracy as indicated in Table 2. Compared to BUDDI which is also trained on CHI3D and HI4D, our method achieves better overall accuracy on per-person and interperson metrics. We show qualitative results in Fig. 8. As a monocular regression method, PromptHMR still cannot avoid interpenetration between closely interacting people." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 605, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 296, + 713 + ], + "type": "text", + "content": "PromptHMR-Vid achieves SOTA performance among methods that estimate human motion in world coordinates, as shown in Table 4. Unlike TRAM, we estimate the joint contact probabilities similar to [53, 54]. Therefore, we achieve lower foot skating than TRAM, even though we use the same metric SLAM method to transform motion in camera space to world coordinates. Please refer to our supplementary material (SupMat) for qualitative results of PromptHMR-Vid." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 317, + 70, + 551, + 129 + ], + "blocks": [ + { + "bbox": [ + 317, + 70, + 551, + 129 + ], + "lines": [ + { + "bbox": [ + 317, + 70, + 551, + 129 + ], + "spans": [ + { + "bbox": [ + 317, + 70, + 551, + 129 + ], + "type": "image", + "image_path": "bed63959a43ef9b6517723265c8d98562acb316fc42046804d36f64950b13e1e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 133, + 554, + 155 + ], + "lines": [ + { + "bbox": [ + 313, + 133, + 554, + 155 + ], + "spans": [ + { + "bbox": [ + 313, + 133, + 554, + 155 + ], + "type": "text", + "content": "Figure 4. Effect of box prompts. Our method remains stable with different boxes, including noisy truncated boxes." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 316, + 162, + 551, + 228 + ], + "blocks": [ + { + "bbox": [ + 316, + 162, + 551, + 228 + ], + "lines": [ + { + "bbox": [ + 316, + 162, + 551, + 228 + ], + "spans": [ + { + "bbox": [ + 316, + 162, + 551, + 228 + ], + "type": "image", + "image_path": "d460b7604ea14b3e3eeca7c6c5ccbdf17a95d7dc5d115e02e0e0b1efc67f4b85.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 230, + 554, + 264 + ], + "lines": [ + { + "bbox": [ + 313, + 230, + 554, + 264 + ], + "spans": [ + { + "bbox": [ + 313, + 230, + 554, + 264 + ], + "type": "text", + "content": "Figure 5. Effect of mask prompts. Results are from the same model with different prompt inputs. Masks are better for close interaction scenarios where boxes are ambiguous." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 318, + 266, + 550, + 472 + ], + "blocks": [ + { + "bbox": [ + 318, + 266, + 550, + 472 + ], + "lines": [ + { + "bbox": [ + 318, + 266, + 550, + 472 + ], + "spans": [ + { + "bbox": [ + 318, + 266, + 550, + 472 + ], + "type": "image", + "image_path": "2964c98ed8bbefebb6c7c00d61ec76077da35ec78188b8515b97045c789f956e.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 477, + 554, + 521 + ], + "lines": [ + { + "bbox": [ + 313, + 477, + 554, + 521 + ], + "spans": [ + { + "bbox": [ + 313, + 477, + 554, + 521 + ], + "type": "text", + "content": "Figure 6. Effect of shape prompts. Compared to the baseline that does not incorporate shape description during training and testing, the model with shape prompts has better accuracy on HBW, especially in ambiguous images." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 537, + 476, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 537, + 476, + 551 + ], + "spans": [ + { + "bbox": [ + 313, + 537, + 476, + 551 + ], + "type": "text", + "content": "4.2. Effect of multimodal prompts" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 556, + 554, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 556, + 554, + 605 + ], + "spans": [ + { + "bbox": [ + 313, + 556, + 554, + 605 + ], + "type": "text", + "content": "We conduct qualitative and quantitative evaluations of the multimodal prompts. For efficient ablation, we train models with " + }, + { + "bbox": [ + 313, + 556, + 554, + 605 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 313, + 556, + 554, + 605 + ], + "type": "text", + "content": " input resolution and select the best model within 150K steps of training." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 605, + 554, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 554, + 688 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 554, + 688 + ], + "type": "text", + "content": "For box prompts, as shown in rows 3-4 of Fig. 7, our method is able to take a combination of different boxes from in-the-wild images to reconstruct crowded scenes. Figure 4 also shows an example with varying box inputs. PromptHMR remains stable when the boxes change and uses full image context to reconstruct the human even when the boxes are truncated." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "type": "text", + "content": "The mask prompt is more effective than boxes when people closely overlap (Fig. 5), as boxes are ambiguous in such" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 94, + 72, + 515, + 251 + ], + "blocks": [ + { + "bbox": [ + 94, + 72, + 515, + 251 + ], + "lines": [ + { + "bbox": [ + 94, + 72, + 515, + 251 + ], + "spans": [ + { + "bbox": [ + 94, + 72, + 515, + 251 + ], + "type": "table", + "html": "
Models3DPW (14)EMDB (24)RICH (24)
PA-MPJPEMPJPEPVEPA-MPJPEMPJPEPVEPA-MPJPEMPJPEPVE
cropped imageCLIFF* [33]43.069.081.268.3103.3123.768.1103.3128.0
HMR2.0a [18]44.469.882.261.597.8120.060.798.3120.8
TokenHMR [14]44.371.084.655.691.7109.4---
CameraHMR [45]35.156.065.943.370.281.734.055.764.4
full imageBEV [60]46.978.592.370.9112.2133.4---
Multi-HMR* [2]45.973.187.150.181.695.746.373.883.0
PromptHMR*36.658.769.441.071.784.537.356.665.5
videoWHAM [54]37.559.871.552.081.696.944.380.091.2
TRAM [67]35.659.369.645.774.486.6---
GVHMR [53]37.056.668.744.574.285.939.566.074.4
PromptHMR-Vid35.556.967.340.168.179.237.057.465.8
", + "image_path": "10b66dfbccf02f42f4e3b9a2ffd895e955ecf5bab5cfa84ddc478ead506cd34f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 58, + 301, + 292, + 381 + ], + "blocks": [ + { + "bbox": [ + 55, + 256, + 555, + 291 + ], + "lines": [ + { + "bbox": [ + 55, + 256, + 555, + 291 + ], + "spans": [ + { + "bbox": [ + 55, + 256, + 555, + 291 + ], + "type": "text", + "content": "Table 1. Comparison of mesh reconstruction on the 3DPW, EMDB and RICH datasets, with the number of joints in parenthesis. " + }, + { + "bbox": [ + 55, + 256, + 555, + 291 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 55, + 256, + 555, + 291 + ], + "type": "text", + "content": " denotes methods that use ground truth focal length during inference. Note that we remove the test-time flip augmentation from all of the video methods to ensure a fair comparison. All metrics are in mm." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 301, + 292, + 381 + ], + "lines": [ + { + "bbox": [ + 58, + 301, + 292, + 381 + ], + "spans": [ + { + "bbox": [ + 58, + 301, + 292, + 381 + ], + "type": "table", + "html": "
ModelsHI4D (14)CHI3D (14)
PA-MPJPEMPJPEPair-PA-MPJPEPA-MPJPEMPJPEPair-PA-MPJPE
BEV* [60]81-13651-96
BUDDI [43]73-9847-68
Multi-HMR* [2]49.867.880.631.754.0100.0
PromptHMR*39.263.978.127.248.058.5
PromptHMR30.139.639.524.746.545.3
", + "image_path": "0308ef773eef4ff74a60a1ebfdf87d0d192464007bb0372b2e2e0590beaefe8b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 58, + 449, + 291, + 534 + ], + "blocks": [ + { + "bbox": [ + 55, + 385, + 295, + 441 + ], + "lines": [ + { + "bbox": [ + 55, + 385, + 295, + 441 + ], + "spans": [ + { + "bbox": [ + 55, + 385, + 295, + 441 + ], + "type": "text", + "content": "Table 2. Comparison on interaction reconstruction. PromptHMR is more accurate in per-person and inter-person accuracy. * denote a method or baseline is not trained on HI4D or CHI3D. All metrics are in mm. The impact of HI4D and the interaction prompt are evaluated in Table 5." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 449, + 291, + 534 + ], + "lines": [ + { + "bbox": [ + 58, + 449, + 291, + 534 + ], + "spans": [ + { + "bbox": [ + 58, + 449, + 291, + 534 + ], + "type": "table", + "html": "
Train w/ textTest w/ textHBW
HeightChestWaistHipP2P-20k
××6951886326
×6948866026
6243765824
", + "image_path": "56545b98164f3d44d755701f2cd0c4d315ce4d8ea2e750b11b796f77a9f6c710.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 605, + 295, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 295, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 295, + 628 + ], + "type": "text", + "content": "cases. Ablation of HI4D (rows 1-2 in Tab. 5) shows that using masks as the spatial prompt improves accuracy." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 630, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 630, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 54, + 630, + 295, + 715 + ], + "type": "text", + "content": "Experiments on the HBW validation set (Tab. 3) show that text prompts effectively improve shape accuracy when used during both training and testing. Moreover, training with shape descriptions alone provides an accuracy boost even if prompts are not given at test time. As illustrated in Fig. 6, text prompts provide notable improvements, especially when large perspective effects create ambiguity." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 318, + 301, + 550, + 370 + ], + "blocks": [ + { + "bbox": [ + 55, + 540, + 295, + 586 + ], + "lines": [ + { + "bbox": [ + 55, + 540, + 295, + 586 + ], + "spans": [ + { + "bbox": [ + 55, + 540, + 295, + 586 + ], + "type": "text", + "content": "Table 3. Ablation of shape prompts using text. Training with shape prompts improves shape accuracy. Using shape prompts during inference further improves shape accuracy. The ablation study is conducted with a " + }, + { + "bbox": [ + 55, + 540, + 295, + 586 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 55, + 540, + 295, + 586 + ], + "type": "text", + "content": " model. Errors are in mm." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 301, + 550, + 370 + ], + "lines": [ + { + "bbox": [ + 318, + 301, + 550, + 370 + ], + "spans": [ + { + "bbox": [ + 318, + 301, + 550, + 370 + ], + "type": "table", + "html": "
ModelsEMDB-2 (24)
WA-MPJPE100W-MPJPE100RTEJitterFoot Skating
WHAM [54]135.6354.86.022.54.4
TRAM [67]76.4222.41.418.523.4
GVHMR [53]111.0276.52.016.73.5
PromptHMR-Vid71.0216.51.316.33.5
", + "image_path": "786e6190b2e7d369c95bb59543041ca8d17a5c27883afb76fcfa7fd758c59a17.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 317, + 435, + 553, + 535 + ], + "blocks": [ + { + "bbox": [ + 313, + 376, + 555, + 419 + ], + "lines": [ + { + "bbox": [ + 313, + 376, + 555, + 419 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 555, + 419 + ], + "type": "text", + "content": "Table 4. Evaluation of motion in world coordinates. PromptHMR-Vid combined with metric SLAM from TRAM [67] surpasses SOTA methods at predicting human motion in world coordinates." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 435, + 553, + 535 + ], + "lines": [ + { + "bbox": [ + 317, + 435, + 553, + 535 + ], + "spans": [ + { + "bbox": [ + 317, + 435, + 553, + 535 + ], + "type": "table", + "html": "
Trained withHI4D (14)
MaskInteractionHI4DPA-MPJPEMPJPEPair-PA-MPJPE
×××47.071.487.2
××43.460.583.0
××43.761.373.0
××36.349.452.6
36.547.147.9
", + "image_path": "c1fa09e71427461b1e5ebaacdbf8c8294d86641a77d6f9e8be765e32ec0660a4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 541, + 555, + 596 + ], + "lines": [ + { + "bbox": [ + 313, + 541, + 555, + 596 + ], + "spans": [ + { + "bbox": [ + 313, + 541, + 555, + 596 + ], + "type": "text", + "content": "Table 5. Ablation on interaction prompt. The interaction module improves inter-person reconstruction metrics Pair-PA-MPJPE on HI4D, especially when the method does not include HI4D in training. Ablation is conducted with a " + }, + { + "bbox": [ + 313, + 541, + 555, + 596 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 313, + 541, + 555, + 596 + ], + "type": "text", + "content": " model. All metrics are in mm." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "content": "For interaction prompts, we show an ablation in Table 5. The proposed interaction module is beneficial and largely improves inter-person accuracy on HI4D even without HI4D training, indicating out-off-domain generalization. When trained on HI4D, the interaction module does not improve per-person PA-MPJPE but still improves interperson Pair-PA-MPJPE. Please refer to our SupMat for more qualitative results on interaction prompts." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 71, + 549, + 350 + ], + "blocks": [ + { + "bbox": [ + 59, + 71, + 549, + 350 + ], + "lines": [ + { + "bbox": [ + 59, + 71, + 549, + 350 + ], + "spans": [ + { + "bbox": [ + 59, + 71, + 549, + 350 + ], + "type": "image", + "image_path": "f8f9cf2acc709924ae2bab9efea5c107298d4e5de4209433bf74abca05fca567.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 350, + 555, + 374 + ], + "lines": [ + { + "bbox": [ + 55, + 350, + 555, + 374 + ], + "spans": [ + { + "bbox": [ + 55, + 350, + 555, + 374 + ], + "type": "text", + "content": "Figure 7. Qualitative comparison: Multi-HMR vs PromptHMR. Our model can recover coherent 3D scenes of people. In crowded scenes, face detection provides reliable box prompts for our model. Please zoom in to see the details." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 384, + 539, + 536 + ], + "blocks": [ + { + "bbox": [ + 59, + 384, + 539, + 536 + ], + "lines": [ + { + "bbox": [ + 59, + 384, + 539, + 536 + ], + "spans": [ + { + "bbox": [ + 59, + 384, + 539, + 536 + ], + "type": "image", + "image_path": "793495b0ca62fcf69d0eb4f71001d3888dbb2dafe49de988b7bd3fb3108bb57d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 539, + 555, + 562 + ], + "lines": [ + { + "bbox": [ + 55, + 539, + 555, + 562 + ], + "spans": [ + { + "bbox": [ + 55, + 539, + 555, + 562 + ], + "type": "text", + "content": "Figure 8. Qualitative results. PromptHMR recovers coherent two-person close interaction. Despite suffering from some interpenetration, the relative positions of the interacting people are accurately recovered. More examples are provided in the Supplementary." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 575, + 130, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 575, + 130, + 588 + ], + "spans": [ + { + "bbox": [ + 55, + 575, + 130, + 588 + ], + "type": "text", + "content": "5. Limitations" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 594, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 296, + 715 + ], + "type": "text", + "content": "We see PromptHMR as a step towards a holistic perception model for 3D humans, but several limitations need to be addressed in future work. Currently, the shape description and interaction prompts are not automatically generated and need to be supplied by the user. Future work should explore how to effectively integrate our promptable model with VLMs to automate prompting. We show how semantic prompts can improve reconstruction accuracy, but many other potential types of side information such as action descriptions, 3D scene context, or body measurements may" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 577, + 512, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 577, + 512, + 588 + ], + "spans": [ + { + "bbox": [ + 313, + 577, + 512, + 588 + ], + "type": "text", + "content": "provide additional benefits in different scenarios." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 597, + 388, + 610 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 597, + 388, + 610 + ], + "spans": [ + { + "bbox": [ + 313, + 597, + 388, + 610 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": "We have presented PromptHMR, a promptable HPS estimation approach that leverages full image context with spatial and semantic prompts to infer 3D humans in the scene. Our method demonstrates state-of-the-art accuracy across diverse benchmarks and generalizes well in the wild. Our experiments show that incorporating diverse input information through flexible prompting enables robustness and adaptability in challenging scenarios." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 143 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 143 + ], + "type": "text", + "content": "Acknowledgement. The authors would like to thank Yan Zhang, Yao Feng, and Nitin Saini for their suggestions. The majority of the work was done when Yufu was an intern at Meshcapade. Yufu and Kostas thank the support of NSF NCS-FO 2124355, NSF FRR 2220868, and NSF IISRI 2212433." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 144, + 294, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 144, + 294, + 192 + ], + "spans": [ + { + "bbox": [ + 55, + 144, + 294, + 192 + ], + "type": "text", + "content": "Disclosure. While MJB is a co-founder and Chief Scientist at Meshcapade, his research in this project was performed solely at, and funded solely by, the Max Planck Society." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 202, + 115, + 215 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 202, + 115, + 215 + ], + "spans": [ + { + "bbox": [ + 56, + 202, + 115, + 215 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 222, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 222, + 295, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 222, + 295, + 255 + ], + "spans": [ + { + "bbox": [ + 61, + 222, + 295, + 255 + ], + "type": "text", + "content": "[1] Nikos Athanasiou, Alpar Ceske, Markos Diomataris, Michael J. Black, and Gül Varol. MotionFix: Text-driven 3D human motion editing. In SIGGRAPH Asia, 2024. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 256, + 296, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 256, + 296, + 310 + ], + "spans": [ + { + "bbox": [ + 61, + 256, + 296, + 310 + ], + "type": "text", + "content": "[2] Fabien Baradel, Matthieu Armando, Salma Galaoui, Romain Brégier, Philippe Weinzaepfel, Grégory Rogez, and Thomas Lucas. Multi-HMR: Multi-person whole-body human mesh recovery in a single shot. European Conference on Computer Vision, 2024. 3, 4, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 312, + 296, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 312, + 296, + 355 + ], + "spans": [ + { + "bbox": [ + 61, + 312, + 296, + 355 + ], + "type": "text", + "content": "[3] Shariq Farooq Bhat, Reiner Birkl, Diana Wofk, Peter Wonka, and Matthias Müller. ZoeDepth: Zero-shot transfer by combining relative and metric depth. arXiv preprint arXiv:2302.12288, 2023. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 357, + 295, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 357, + 295, + 411 + ], + "spans": [ + { + "bbox": [ + 61, + 357, + 295, + 411 + ], + "type": "text", + "content": "[4] Michael J Black, Priyanka Patel, Joachim Tesch, and Jinlong Yang. BEDLAM: A synthetic dataset of bodies exhibiting detailed lifelike animated motion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8726-8737, 2023. 6, 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 413, + 295, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 413, + 295, + 456 + ], + "spans": [ + { + "bbox": [ + 62, + 413, + 295, + 456 + ], + "type": "text", + "content": "[5] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229, 2020. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 458, + 294, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 458, + 294, + 511 + ], + "spans": [ + { + "bbox": [ + 62, + 458, + 294, + 511 + ], + "type": "text", + "content": "[6] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2Mesh: Graph convolutional network for 3D human pose and mesh recovery from a 2D human pose. In European Conference on Computer Vision, pages 769-787. Springer, 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 514, + 295, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 514, + 295, + 567 + ], + "spans": [ + { + "bbox": [ + 62, + 514, + 295, + 567 + ], + "type": "text", + "content": "[7] Hongsuk Choi, Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. Beyond static features for temporally consistent 3D human pose and shape from a video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1964-1973, 2021. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 569, + 295, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 569, + 295, + 634 + ], + "spans": [ + { + "bbox": [ + 62, + 569, + 295, + 634 + ], + "type": "text", + "content": "[8] Vasileios Choutas, Lea Müller, Chun-Hao P. Huang, Siyu Tang, Dimitrios Tzionas, and Michael J. Black. Accurate 3D body shape regression using metric and semantic attributes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2718-2728, 2022. 2, 3, 4, 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 636, + 295, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 636, + 295, + 690 + ], + "spans": [ + { + "bbox": [ + 62, + 636, + 295, + 690 + ], + "type": "text", + "content": "[9] Ginger Delmas, Philippe Weinzaepfel, Francesc Moreno-Noguer, and Grégory Rogez. PoseFix: correcting 3D human poses with natural language. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15018-15028, 2023. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 692, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 692, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 692, + 295, + 713 + ], + "type": "text", + "content": "[10] Ginger Delmas, Philippe Weinzaepfel, Francesc Moreno-Noguer, and Grégory Rogez. Posembroider: Towards a" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "text", + "content": "3D, visual, semantic-aware human pose representation. In European Conference on Computer Vision, 2024. 2, 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 96, + 553, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 96, + 553, + 160 + ], + "spans": [ + { + "bbox": [ + 316, + 96, + 553, + 160 + ], + "type": "text", + "content": "[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 162, + 553, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 162, + 553, + 226 + ], + "spans": [ + { + "bbox": [ + 316, + 162, + 553, + 226 + ], + "type": "text", + "content": "[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021. 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 228, + 553, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 228, + 553, + 281 + ], + "spans": [ + { + "bbox": [ + 316, + 228, + 553, + 281 + ], + "type": "text", + "content": "[13] Kaiwen Duan, Song Bai, Lingxi Xie, Honggang Qi, Qingming Huang, and Qi Tian. Centernet: Keypoint triplets for object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6569-6578, 2019. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 282, + 553, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 282, + 553, + 337 + ], + "spans": [ + { + "bbox": [ + 316, + 282, + 553, + 337 + ], + "type": "text", + "content": "[14] Sai Kumar Dwivedi, Yu Sun, Priyanka Patel, Yao Feng, and Michael J Black. TokenHMR: Advancing human mesh recovery with a tokenized pose representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1323-1333, 2024. 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 338, + 553, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 338, + 553, + 403 + ], + "spans": [ + { + "bbox": [ + 316, + 338, + 553, + 403 + ], + "type": "text", + "content": "[15] Jose M. Facil, Benjamin Ummenhofer, Huizhong Zhou, Luis Montesano, Thomas Brox, and Javier Civera. CAM-Convs: Camera-aware multi-scale convolutions for single-view depth. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, page 11818-11827, 2019. 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 404, + 553, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 404, + 553, + 448 + ], + "spans": [ + { + "bbox": [ + 316, + 404, + 553, + 448 + ], + "type": "text", + "content": "[16] Yao Feng, Jing Lin, Sai Kumar Dwivedi, Yu Sun, Priyanka Patel, and Michael J. Black. ChatPose: Chatting about 3D human pose. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 2, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 449, + 553, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 449, + 553, + 492 + ], + "spans": [ + { + "bbox": [ + 316, + 449, + 553, + 492 + ], + "type": "text", + "content": "[17] Mihai Fieraru, Mihai Zanfir, Elisabeta Oneata, Alin-Ionut Popa, Vlad Olaru, and Cristian Sminchisescu. Reconstructing three-dimensional models of interacting humans. arXiv preprint arXiv:2308.01854, 2023. 2, 3, 6, 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 494, + 553, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 494, + 553, + 547 + ], + "spans": [ + { + "bbox": [ + 316, + 494, + 553, + 547 + ], + "type": "text", + "content": "[18] Shubham Goel, Georgios Pavlakos, Jathushan Rajasegaran, Angjoo Kanazawa, and Jitendra Malik. Reconstructing and tracking humans with transformers. Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 2, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 548, + 553, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 548, + 553, + 592 + ], + "spans": [ + { + "bbox": [ + 316, + 548, + 553, + 592 + ], + "type": "text", + "content": "[19] Dorian F Henning, Tristan Laidlow, and Stefan Leutenegger. BodySLAM: joint camera localisation, mapping, and human motion tracking. In European Conference on Computer Vision, pages 656-673. Springer, 2022. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 593, + 553, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 593, + 553, + 647 + ], + "spans": [ + { + "bbox": [ + 316, + 593, + 553, + 647 + ], + "type": "text", + "content": "[20] Dorian F Henning, Christopher Choi, Simon Schaefer, and Stefan Leutenegger. BodySLAM++: Fast and tightly-coupled visual-inertial camera and human motion tracking. In IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 3781-3788. IEEE, 2023. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 647, + 555, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 647, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 647, + 555, + 713 + ], + "type": "text", + "content": "[21] Chun-Hao P Huang, Hongwei Yi, Markus Höschle, Matvey Safroshkin, Tsvetelina Alexiadis, Senya Polikovsky, Daniel Scharstein, and Michael J Black. Capturing and inferring dense full-body human-scene contact. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13274-13285, 2022. 2, 6" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 714 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 297, + 127 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 297, + 127 + ], + "type": "text", + "content": "[22] Wen Jiang, Nikos Kolotouros, Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Coherent reconstruction of multiple humans from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5579-5588, 2020. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 129, + 296, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 129, + 296, + 184 + ], + "spans": [ + { + "bbox": [ + 56, + 129, + 296, + 184 + ], + "type": "text", + "content": "[23] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7122-7131, 2018. 2, 3, 5, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 186, + 296, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 186, + 296, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 186, + 296, + 239 + ], + "type": "text", + "content": "[24] Angjoo Kanazawa, Jason Y Zhang, Panna Felsen, and Jitendra Malik. Learning 3D human dynamics from video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5614-5623, 2019. 3, 6, 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 241, + 296, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 241, + 296, + 308 + ], + "spans": [ + { + "bbox": [ + 56, + 241, + 296, + 308 + ], + "type": "text", + "content": "[25] Manuel Kaufmann, Jie Song, Chen Guo, Kaiyue Shen, Tianjian Jiang, Chengcheng Tang, Juan José Zárate, and Otmar Hilliges. EMDB: The electromagnetic database of global 3d human pose and shape in the wild. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14632-14643, 2023. 2, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 308, + 296, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 308, + 296, + 363 + ], + "spans": [ + { + "bbox": [ + 56, + 308, + 296, + 363 + ], + "type": "text", + "content": "[26] Rawal Khirodkar, Timur Bagautdinov, Julieta Martinez, Su Zhaoen, Austin James, Peter Selednik, Stuart Anderson, and Shunsuke Saito. Sapiens: Foundation for human vision models. In European Conference on Computer Vision, pages 206-228. Springer, 2025. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 365, + 296, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 365, + 296, + 409 + ], + "spans": [ + { + "bbox": [ + 56, + 365, + 296, + 409 + ], + "type": "text", + "content": "[27] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 410, + 296, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 410, + 296, + 464 + ], + "spans": [ + { + "bbox": [ + 56, + 410, + 296, + 464 + ], + "type": "text", + "content": "[28] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. VIBE: Video inference for human body pose and shape estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5253-5263, 2020. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 466, + 296, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 466, + 296, + 521 + ], + "spans": [ + { + "bbox": [ + 56, + 466, + 296, + 521 + ], + "type": "text", + "content": "[29] Muhammed Kocabas, Chun-Hao P Huang, Otmar Hilliges, and Michael J Black. PARE: Part attention regressor for 3D human body estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11127-11137, 2021. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 522, + 296, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 522, + 296, + 578 + ], + "spans": [ + { + "bbox": [ + 56, + 522, + 296, + 578 + ], + "type": "text", + "content": "[30] Muhammed Kocabas, Chun-Hao P. Huang, Joachim Tesch, Lea Müller, Otmar Hilliges, and Michael J. Black. SPEC: Seeing people in the wild with an estimated camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11035-11045, 2021. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 579, + 296, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 579, + 296, + 633 + ], + "spans": [ + { + "bbox": [ + 56, + 579, + 296, + 633 + ], + "type": "text", + "content": "[31] Muhammed Kocabas, Ye Yuan, Pavlo Molchanov, Yunrong Guo, Michael J Black, Otmar Hilliges, Jan Kautz, and Umar Iqbal. PACE: Human and camera motion estimation from inthe-wild videos. In International Conference on 3D Vision, pages 397-408, 2024. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 635, + 296, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 635, + 296, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 635, + 296, + 689 + ], + "type": "text", + "content": "[32] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2252-2261, 2019. 2, 3, 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 691, + 296, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 691, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 56, + 691, + 296, + 714 + ], + "type": "text", + "content": "[33] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. CLIFF: Carrying location information" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 333, + 72, + 555, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 72, + 555, + 106 + ], + "spans": [ + { + "bbox": [ + 333, + 72, + 555, + 106 + ], + "type": "text", + "content": "in full frames into human pose and shape estimation. In European Conference on Computer Vision, pages 590-606. Springer, 2022. 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 107, + 555, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 107, + 555, + 150 + ], + "spans": [ + { + "bbox": [ + 316, + 107, + 555, + 150 + ], + "type": "text", + "content": "[34] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12939-12948, 2021. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 152, + 554, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 152, + 554, + 207 + ], + "spans": [ + { + "bbox": [ + 316, + 152, + 554, + 207 + ], + "type": "text", + "content": "[35] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft COCO: Common objects in context. In European Conference on Computer Vision, pages 740-755. Springer, 2014. 6, 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 209, + 553, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 209, + 553, + 230 + ], + "spans": [ + { + "bbox": [ + 316, + 209, + 553, + 230 + ], + "type": "text", + "content": "[36] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning, 2023. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 232, + 554, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 232, + 554, + 275 + ], + "spans": [ + { + "bbox": [ + 316, + 232, + 554, + 275 + ], + "type": "text", + "content": "[37] Thomas Lucas, Fabien Baradel, Philippe Weinzaepfel, and Grégory Rogez. Posegpt: Quantization-based 3d human motion generation and forecasting. In European Conference on Computer Vision, pages 417-435, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 277, + 554, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 277, + 554, + 320 + ], + "spans": [ + { + "bbox": [ + 316, + 277, + 554, + 320 + ], + "type": "text", + "content": "[38] Zhengyi Luo, S. Alireza Golestaneh, and Kris M. Kitani. 3d human motion estimation via motion compression and refinement. In Proceedings of the Asian Conference on Computer Vision, 2020. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 322, + 554, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 322, + 554, + 376 + ], + "spans": [ + { + "bbox": [ + 316, + 322, + 554, + 376 + ], + "type": "text", + "content": "[39] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved cnn supervision. In International Conference on 3D Vision, pages 506-516. IEEE, 2017. 6, 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 378, + 554, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 378, + 554, + 433 + ], + "spans": [ + { + "bbox": [ + 316, + 378, + 554, + 433 + ], + "type": "text", + "content": "[40] Gyeongsik Moon and Kyoung Mu Lee. I2L-MeshNet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single RGB image. In European Conference on Computer Vision, pages 752-768. Springer, 2020. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 434, + 554, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 434, + 554, + 477 + ], + "spans": [ + { + "bbox": [ + 316, + 434, + 554, + 477 + ], + "type": "text", + "content": "[41] Raul Mur-Artal and Juan D Tardós. ORB-SLAM: An opensource slam system for monocular, stereo, and RGB-D cameras. IEEE Transactions on Robotics, 33(5):1255-1262, 2017. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 479, + 554, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 479, + 554, + 522 + ], + "spans": [ + { + "bbox": [ + 316, + 479, + 554, + 522 + ], + "type": "text", + "content": "[42] Raul Mur-Artal, Jose Maria Martinez Montiel, and Juan D Tardos. ORB-SLAM: A versatile and accurate monocular SLAM system. IEEE Transactions on Robotics, 31(5):1147-1163, 2015. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 524, + 555, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 524, + 555, + 579 + ], + "spans": [ + { + "bbox": [ + 316, + 524, + 555, + 579 + ], + "type": "text", + "content": "[43] Lea Müller, Vickie Ye, Georgios Pavlakos, Michael J. Black, and Angjoo Kanazawa. Generative proxemics: A prior for 3D social interaction from images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 3, 6, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 581, + 555, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 581, + 555, + 679 + ], + "spans": [ + { + "bbox": [ + 316, + 581, + 555, + 679 + ], + "type": "text", + "content": "[44] Maxime Oquab, Timothée Darcet, Theo Moutakanni, Huy V. Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, Russell Howes, Po-Yao Huang, Hu Xu, Vasu Sharma, Shang-Wen Li, Wojciech Galuba, Mike Rabbat, Mido Assran, Nicolas Ballas, Gabriel Synnaeve, Ishan Misra, Herve Jegou, Julien Mairal, Patrick Labatut, Armand Joulin, and Piotr Bojanowski. DINoV2: Learning robust visual features without supervision, 2023. 4, 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 681, + 554, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 681, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 681, + 554, + 713 + ], + "type": "text", + "content": "[45] Priyanka Patel and Michael J. Black. Camerahrm: Aligning people with perspective. International Conference on 3D Vision (3DV), 2025. 6, 7" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 138 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 138 + ], + "type": "text", + "content": "[46] Priyanka Patel, Chun-Hao P Huang, Joachim Tesch, David T Hoffmann, Shashank Tripathi, and Michael J Black. AGORA: Avatars in geography optimized for regression analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13468-13478, 2021. 6, 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 140, + 295, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 295, + 206 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 295, + 206 + ], + "type": "text", + "content": "[47] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3D hands, face, and body from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10975-10985, 2019. 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 208, + 294, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 208, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 56, + 208, + 294, + 251 + ], + "type": "text", + "content": "[48] Mathis Petrovich, Michael J Black, and Gül Varol. Action-conditioned 3D human motion synthesis with transformer VAE. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10985-10995, 2021. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 252, + 294, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 252, + 294, + 285 + ], + "spans": [ + { + "bbox": [ + 56, + 252, + 294, + 285 + ], + "type": "text", + "content": "[49] Baldomero R. Árbol and Dan Casas. BodyShapeGPT: SMPL body shape manipulation with LLMs. In European Conference on Computer Vision Workshops, 2024. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 286, + 294, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 286, + 294, + 341 + ], + "spans": [ + { + "bbox": [ + 56, + 286, + 294, + 341 + ], + "type": "text", + "content": "[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 342, + 294, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 342, + 294, + 397 + ], + "spans": [ + { + "bbox": [ + 56, + 342, + 294, + 397 + ], + "type": "text", + "content": "[51] Rene Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(3):1623-1637, 2022. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 399, + 294, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 399, + 294, + 453 + ], + "spans": [ + { + "bbox": [ + 56, + 399, + 294, + 453 + ], + "type": "text", + "content": "[52] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J Guibas. HUMOR: 3D human motion model for robust pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11488-11499, 2021. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 455, + 294, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 455, + 294, + 498 + ], + "spans": [ + { + "bbox": [ + 56, + 455, + 294, + 498 + ], + "type": "text", + "content": "[53] Zehong Shen, Huajin Pi, Yan Xia, Zhi Cen, Sida Peng, Zechen Hu, Hujun Bao, Ruizhen Hu, and Xiaowei Zhou. World-grounded human motion recovery via gravity-view coordinates. In SIGGRAPH Asia, 2024. 3, 6, 7, 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 500, + 294, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 500, + 294, + 543 + ], + "spans": [ + { + "bbox": [ + 56, + 500, + 294, + 543 + ], + "type": "text", + "content": "[54] Soyong Shin, Juyong Kim, Eni Halilaj, and Michael J Black. WHAM: Reconstructing world-grounded humans with accurate 3D motion. arXiv preprint arXiv:2312.07531, 2023. 3, 5, 6, 7, 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 544, + 294, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 544, + 294, + 588 + ], + "spans": [ + { + "bbox": [ + 56, + 544, + 294, + 588 + ], + "type": "text", + "content": "[55] Stephan Streuber, M Alejandra Quiros-Ramirez, Matthew Q Hill, Carina A Hahn, Silvia Zuffi, Alice O'Toole, and Michael J Black. Body talk: Crowdshaping realistic 3D avatars with words. ACM TOG, 35(4):1-14, 2016. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 590, + 294, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 590, + 294, + 623 + ], + "spans": [ + { + "bbox": [ + 56, + 590, + 294, + 623 + ], + "type": "text", + "content": "[56] Sanjay Subramanian, Evonne Ng, Lea Müller, Dan Klein, Shiry Ginosar, and Trevor Darrell. Pose priors from language models. arXiv preprint arXiv:2405.03689, 2024. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 624, + 294, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 624, + 294, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 624, + 294, + 689 + ], + "type": "text", + "content": "[57] Qingping Sun, Yanjun Wang, Ailing Zeng, Wanqi Yin, Chen Wei, Wenjia Wang, Haiyi Mei, Chi-Sing Leung, Ziwei Liu, Lei Yang, and Zhongang Cai. AiOS: All-in-one-stage expressive human pose and shape estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, page 1834-1843, 2024. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "type": "text", + "content": "[58] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 105 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 105 + ], + "type": "text", + "content": "skeleton-disentangled representation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 106, + 553, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 106, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 316, + 106, + 553, + 150 + ], + "type": "text", + "content": "[59] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J Black, and Tao Mei. Monocular, one-stage, regression of multiple 3D people. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11179-11188, 2021. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 152, + 553, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 152, + 553, + 204 + ], + "spans": [ + { + "bbox": [ + 316, + 152, + 553, + 204 + ], + "type": "text", + "content": "[60] Yu Sun, Wu Liu, Qian Bao, Yili Fu, Tao Mei, and Michael J Black. Putting people in their place: Monocular regression of 3D people in depth. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13243-13252, 2022. 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 206, + 555, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 206, + 555, + 260 + ], + "spans": [ + { + "bbox": [ + 316, + 206, + 555, + 260 + ], + "type": "text", + "content": "[61] Yu Sun, Qian Bao, Wu Liu, Tao Mei, and Michael J Black. TRACE: 5D temporal regression of avatars with dynamic cameras in 3D environments. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8856-8866, 2023. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 261, + 553, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 261, + 553, + 303 + ], + "spans": [ + { + "bbox": [ + 316, + 261, + 553, + 303 + ], + "type": "text", + "content": "[62] Zachary Teed and Jia Deng. DRPOID-SLAM: Deep visual slam for monocular, stereo, and RGB-D cameras. Advances in Neural Information Processing Systems, 34:16558-16569, 2021. 3, 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 305, + 553, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 305, + 553, + 338 + ], + "spans": [ + { + "bbox": [ + 316, + 305, + 553, + 338 + ], + "type": "text", + "content": "[63] Zachary Teed, Lahav Lipson, and Jia Deng. Deep patch visual odometry. Advances in Neural Information Processing Systems, 36, 2024. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 338, + 553, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 338, + 553, + 380 + ], + "spans": [ + { + "bbox": [ + 316, + 338, + 553, + 380 + ], + "type": "text", + "content": "[64] Guy Tevet, Sigal Raab, Brian Gordon, Yoni Shafir, Daniel Cohen-or, and Amit Haim Bermano. Human motion diffusion model. In International Conference on Learning Representations, 2023. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 383, + 553, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 383, + 553, + 436 + ], + "spans": [ + { + "bbox": [ + 316, + 383, + 553, + 436 + ], + "type": "text", + "content": "[65] Timo Von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In European Conference on Computer Vision, pages 601-617, 2018. 2, 6, 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 438, + 553, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 438, + 553, + 480 + ], + "spans": [ + { + "bbox": [ + 316, + 438, + 553, + 480 + ], + "type": "text", + "content": "[66] Yufu Wang and Kostas Daniilidis. ReFit: Recurrent fitting network for 3D human recovery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14644-14654, 2023. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 482, + 553, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 482, + 553, + 525 + ], + "spans": [ + { + "bbox": [ + 316, + 482, + 553, + 525 + ], + "type": "text", + "content": "[67] Yufu Wang, Ziyun Wang, Lingjie Liu, and Kostas Daniilidis. TRAM: Global trajectory and motion of 3d humans from inthe-wild videos. In European Conference on Computer Vision, 2024. 2, 3, 5, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 526, + 553, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 526, + 553, + 580 + ], + "spans": [ + { + "bbox": [ + 316, + 526, + 553, + 580 + ], + "type": "text", + "content": "[68] Jiahong Wu, He Zheng, Bo Zhao, Yixin Li, Baoming Yan, Rui Liang, Wenjia Wang, Shipei Zhou, Guosen Lin, Yanwei Fu, et al. AI challenger: A large-scale dataset for going deeper in image understanding. arXiv preprint arXiv:1711.06475, 2017. 6, 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 582, + 553, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 582, + 553, + 624 + ], + "spans": [ + { + "bbox": [ + 316, + 582, + 553, + 624 + ], + "type": "text", + "content": "[69] Hu Xu, Saining Xie, Xiaqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. arXiv preprint arXiv:2309.16671, 2023. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 625, + 553, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 625, + 553, + 679 + ], + "spans": [ + { + "bbox": [ + 316, + 625, + 553, + 679 + ], + "type": "text", + "content": "[70] Vickie Ye, Georgios Pavlakos, Jitendra Malik, and Angjoo Kanazawa. Decoupling human and camera motion from videos in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21222-21232, 2023. 3, 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 681, + 555, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 681, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 681, + 555, + 713 + ], + "type": "text", + "content": "[71] Yifei Yin, Chen Guo, Manuel Kaufmann, Juan Jose Zarate, Jie Song, and Otmar Hilliges. Hi4D: 4D instance segmentation of close human interaction. In Proceedings of" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 274 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 76, + 72, + 294, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 294, + 95 + ], + "type": "text", + "content": "the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17016-17027, 2023. 2, 3, 6, 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 295, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 295, + 151 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 295, + 151 + ], + "type": "text", + "content": "[72] Ye Yuan, Umar Iqbal, Pavlo Molchanov, Kris Kitani, and Jan Kautz. GLAMR: Global occlusion-aware human mesh recovery with dynamic cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11038-11049, 2022. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 152, + 295, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 152, + 295, + 217 + ], + "spans": [ + { + "bbox": [ + 56, + 152, + 295, + 217 + ], + "type": "text", + "content": "[73] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. PyMAF: 3D human pose and shape regression with pyramidal mesh alignment feedback loop. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11446-11456, 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 219, + 294, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 219, + 294, + 274 + ], + "spans": [ + { + "bbox": [ + 56, + 219, + 294, + 274 + ], + "type": "text", + "content": "[74] Yizhou Zhao, Tuanfeng Yang Wang, Bhiksha Raj, Min Xu, Jimei Yang, and Chun-Hao Paul Huang. Synergistic global-space camera and human reconstruction from videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1216-1226, 2024. 3" + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 149, + 68, + 462, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 68, + 462, + 110 + ], + "spans": [ + { + "bbox": [ + 149, + 68, + 462, + 110 + ], + "type": "text", + "content": "PromptHMR: Promptable Human Mesh Recovery Supplementary Material" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 123, + 167, + 136 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 123, + 167, + 136 + ], + "spans": [ + { + "bbox": [ + 55, + 123, + 167, + 136 + ], + "type": "text", + "content": "7. Additional Results" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 144, + 295, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 144, + 295, + 192 + ], + "spans": [ + { + "bbox": [ + 55, + 144, + 295, + 192 + ], + "type": "text", + "content": "In this section, we demonstrate more qualitative results to show the effects of interaction prompting and the video module. Please refer to the supplementary video to see the results from PromptHMR-Vid." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 200, + 185, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 200, + 185, + 213 + ], + "spans": [ + { + "bbox": [ + 55, + 200, + 185, + 213 + ], + "type": "text", + "content": "7.1. Interaction Prompting" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 219, + 296, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 219, + 296, + 410 + ], + "spans": [ + { + "bbox": [ + 55, + 219, + 296, + 410 + ], + "type": "text", + "content": "We perform qualitative and quantitative ablation studies of interaction prompting on the HI4D dataset. In Tab. 5 of the main paper, we demonstrate that introducing interaction prompting improves the quantitative results on HI4D. In Fig. 9, we present more qualitative results to show the effect of the interaction module. As shown in the first column of Fig. 9, without the interaction module, the model does not learn to reconstruct close interaction effectively, even when trained with CHI3D interaction data. By adding the proposed interaction module, in the second column, the relative position and orientation of the interacting people are improved, and the penetration is reduced. Note that if we turn off the interaction module via the proposed flow control, the results will become similar to the first column. Finally, training with both CHI3D and HI4D leads to better results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 422, + 170, + 436 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 422, + 170, + 436 + ], + "spans": [ + { + "bbox": [ + 55, + 422, + 170, + 436 + ], + "type": "text", + "content": "8. Experiment Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 443, + 118, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 443, + 118, + 454 + ], + "spans": [ + { + "bbox": [ + 55, + 443, + 118, + 454 + ], + "type": "text", + "content": "8.1. Datasets" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 461, + 295, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 461, + 295, + 568 + ], + "spans": [ + { + "bbox": [ + 55, + 461, + 295, + 568 + ], + "type": "text", + "content": "The training set of the image model includes BEDLAM [4], AIC [68], InstaVariety [24], HI4D [71], CHI3D [17], AGORA [46], 3DPW [65], COCO [35], and MPII [39], with the sampling rate of " + }, + { + "bbox": [ + 55, + 461, + 295, + 568 + ], + "type": "inline_equation", + "content": "\\{0.2, 0.2, 0.3, 0.08, 0.08, 0.06, 0.06, 0.01, 0.01\\}" + }, + { + "bbox": [ + 55, + 461, + 295, + 568 + ], + "type": "text", + "content": ". All input images are padded and resized to " + }, + { + "bbox": [ + 55, + 461, + 295, + 568 + ], + "type": "inline_equation", + "content": "896 \\times 896" + }, + { + "bbox": [ + 55, + 461, + 295, + 568 + ], + "type": "text", + "content": ". During training, we employ rotation and color jitter augmentation. For PromptHMR-Vid, we use BEDLAM and 3DPW datasets following [53, 54]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 570, + 295, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 295, + 653 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 295, + 653 + ], + "type": "text", + "content": "To use datasets with different annotations for training, we adopt different losses described in Sec.3.5 of the main paper. For the ones (e.g. BEDLAM, AGORA, CHI3D, HI4D) with ground truth SMPL/SMPL-X annotations, we employ all loss items. While on AIC, InstaVariety, and 3DPW, we drop the translation loss. On COCO and MPII, we only compute 2D keypoint reprojection loss." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 654, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 296, + 714 + ], + "type": "text", + "content": "We generate the whole-body bounding boxes by projecting the ground-truth SMPL-X meshes onto the image plane. To generate the face bounding boxes, we project the head vertices. To generate truncated boxes, we take groups of keypoints (e.g. upper body keypoints) and compute their" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 124, + 553, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 124, + 553, + 147 + ], + "spans": [ + { + "bbox": [ + 313, + 124, + 553, + 147 + ], + "type": "text", + "content": "bounding boxes. Gaussian noise is then added to both corners." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 148, + 555, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 148, + 555, + 244 + ], + "spans": [ + { + "bbox": [ + 313, + 148, + 555, + 244 + ], + "type": "text", + "content": "On BEDLAM, AGORA, and AIC, we follow SHAPY [8] to compute the shape attribute scores. During training, we compose a shape description for each instance, such as \"a tall and broad-shoulder female\" with a few augmentation rules. Each sentence will randomly sample 1-3 top attributes. The gender information is augmented with synonyms, such as \"female\", \"woman\", \"girl\", etc." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 253, + 397, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 253, + 397, + 264 + ], + "spans": [ + { + "bbox": [ + 313, + 253, + 397, + 264 + ], + "type": "text", + "content": "8.2. Architecture" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "spans": [ + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "text", + "content": "We adopt the ViT-L [11], pretrained by DINOv2 [44], as our image encoder. We use an input image size of 896 and a patch size of 14, leading to the same spatial resolution as the recent Sapiens models [26]. The text encoder is from MetaCLIP [69]. The SMPL-X decoder consists of 3 attention blocks with an embedding dimension of 1024. From the output tokens " + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "inline_equation", + "content": "(T_{smpl}^{\\prime}" + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "inline_equation", + "content": "T_{depth}^{\\prime}" + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "text", + "content": "), we use separate 2-layer MLPs to regress " + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "inline_equation", + "content": "p_{xy}" + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "inline_equation", + "content": "p_z" + }, + { + "bbox": [ + 313, + 271, + 555, + 378 + ], + "type": "text", + "content": " as introduced in Sec.3.2." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 387, + 378, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 387, + 378, + 399 + ], + "spans": [ + { + "bbox": [ + 313, + 387, + 378, + 399 + ], + "type": "text", + "content": "8.3. Training" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 406, + 554, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 406, + 554, + 477 + ], + "spans": [ + { + "bbox": [ + 313, + 406, + 554, + 477 + ], + "type": "text", + "content": "We train the PromptHMR image model using 8 H100 GPUs, with a batch size of 96 (12 images on each GPU). We use AdamW with a learning rate of 1e-5 for the image encoder, a learning rate of 3e-5 for the prompt encoder, and the SMPL-X decoder, " + }, + { + "bbox": [ + 313, + 406, + 554, + 477 + ], + "type": "inline_equation", + "content": "\\beta_{1}" + }, + { + "bbox": [ + 313, + 406, + 554, + 477 + ], + "type": "text", + "content": " of 0.9, " + }, + { + "bbox": [ + 313, + 406, + 554, + 477 + ], + "type": "inline_equation", + "content": "\\beta_{2}" + }, + { + "bbox": [ + 313, + 406, + 554, + 477 + ], + "type": "text", + "content": " of 0.999, and a weight decay of 5e-5." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 478, + 554, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 478, + 554, + 514 + ], + "spans": [ + { + "bbox": [ + 313, + 478, + 554, + 514 + ], + "type": "text", + "content": "The losses presented in Sec.3.5 are weighted differently. For " + }, + { + "bbox": [ + 313, + 478, + 554, + 514 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{2D},\\mathcal{L}_{3D},\\mathcal{L}_{\\mathrm{SMPL}},\\mathcal{L}_V" + }, + { + "bbox": [ + 313, + 478, + 554, + 514 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 478, + 554, + 514 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{trans}" + }, + { + "bbox": [ + 313, + 478, + 554, + 514 + ], + "type": "text", + "content": ", the weights are set to " + }, + { + "bbox": [ + 313, + 478, + 554, + 514 + ], + "type": "inline_equation", + "content": "\\{50.0,5.0,1.0,1.0,10.0\\}" + }, + { + "bbox": [ + 313, + 478, + 554, + 514 + ], + "type": "text", + "content": " respectively." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 530, + 555, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 530, + 555, + 602 + ], + "spans": [ + { + "bbox": [ + 313, + 530, + 555, + 602 + ], + "type": "text", + "content": "PromptHMR-Vid We train the PromptHMR video model on 2 H100 GPUs with a batch size of 512 samples consisting of 120 frames each. We use AdamW with a learning rate of 2e-4 and a weight decay of 5e-5. We use the same losses as the image-based version in addition to binary cross-entropy loss for joint contact predictions." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 610, + 369, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 610, + 369, + 622 + ], + "spans": [ + { + "bbox": [ + 313, + 610, + 369, + 622 + ], + "type": "text", + "content": "8.4. Metric" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 629, + 553, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 553, + 653 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 553, + 653 + ], + "type": "text", + "content": "In this section, we provide more details on the evaluation metric used in Sec.4 of the main paper." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 654, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 554, + 713 + ], + "type": "text", + "content": "Mean Per Joint Position Error (MPJPE) is calculated by aligning the 3D joints obtained from SMPL-X with the ground truth at the pelvis before computing the mean square error. For historical reasons, different datasets use a different set of joints. Additionally, the pelvis definition could" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 64, + 537, + 559 + ], + "blocks": [ + { + "bbox": [ + 59, + 64, + 537, + 559 + ], + "lines": [ + { + "bbox": [ + 59, + 64, + 537, + 559 + ], + "spans": [ + { + "bbox": [ + 59, + 64, + 537, + 559 + ], + "type": "image", + "image_path": "0ce1f2959db11ef87e142908954c88a9bbc66d4fd12da3a4358a967120790538.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 563, + 555, + 597 + ], + "lines": [ + { + "bbox": [ + 55, + 563, + 555, + 597 + ], + "spans": [ + { + "bbox": [ + 55, + 563, + 555, + 597 + ], + "type": "text", + "content": "Figure 9. Ablation of interaction module. When fine-tuning the image model on CHI3D, adding the interaction module improves two-person interaction reconstruction on HI4D, which demonstrates the out-of-domain generalization ability of interaction prompting. Fine-tuning on both CHI3D and HI4D further improves results." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 611, + 298, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 611, + 298, + 708 + ], + "spans": [ + { + "bbox": [ + 55, + 611, + 298, + 708 + ], + "type": "text", + "content": "be different. To evaluate methods that predict SMPL-X on the datasets with SMPL labels, it's customary to convert the SMPL-X vertices to SMPL vertices and use a joint regressor on the converted vertices to obtain the 3D joints comparable to the labels. Note that all the above choices could alter the results and sometimes produce large \"artificial\" improvements. So we strictly follow the most recent methods in the evaluation procedure. It's reported in the unit of mm." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 313, + 611, + 556, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 611, + 556, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 611, + 556, + 658 + ], + "type": "text", + "content": "Per Vertex error (PVE) computes mean square error on the vertices after pelvis alignment. Compared to MPJPE, it measures the combined pose and shape error. It's reported in the unit of mm." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 556, + 715 + ], + "type": "text", + "content": "Procrustes-aligned MPJPE (PA-MPJPE) performs general Procrustes alignment on the 3D joints before computing MPJPE. It measures purely the local articulated pose error. It's reported in the unit of mm." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 143 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 143 + ], + "type": "text", + "content": "Paired PA-MPJPE (Pair-PA-MPJPE) aligns two people as a whole with the ground truth before computing MPJPE. In addition to per-person error, it also measures the error in the relative position and orientation of the two people. It's used in HI4D and CHI3D to evaluate interaction reconstruction. It's reported in the unit of mm." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 144, + 294, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 144, + 294, + 191 + ], + "spans": [ + { + "bbox": [ + 55, + 144, + 294, + 191 + ], + "type": "text", + "content": "World-aligned " + }, + { + "bbox": [ + 55, + 144, + 294, + 191 + ], + "type": "inline_equation", + "content": "\\mathbf{MPJPE}_{100}" + }, + { + "bbox": [ + 55, + 144, + 294, + 191 + ], + "type": "text", + "content": " (WA-MPJPE" + }, + { + "bbox": [ + 55, + 144, + 294, + 191 + ], + "type": "inline_equation", + "content": "_{100}" + }, + { + "bbox": [ + 55, + 144, + 294, + 191 + ], + "type": "text", + "content": ") measures the world-grounded motion accuracy. It aligns a segment of 100 frames of predictions with the ground truth before computing MPJPE. It's reported in the unit of mm." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 192, + 294, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 192, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 55, + 192, + 294, + 251 + ], + "type": "text", + "content": "World " + }, + { + "bbox": [ + 55, + 192, + 294, + 251 + ], + "type": "inline_equation", + "content": "\\mathrm{MPJPE}_{100}" + }, + { + "bbox": [ + 55, + 192, + 294, + 251 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 55, + 192, + 294, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{W} - \\mathbf{MPJPE}_{100}" + }, + { + "bbox": [ + 55, + 192, + 294, + 251 + ], + "type": "text", + "content": ") is similar to WA-MPJPE but only aligns the first two frames of the 100-frame segment. Therefore, it provides a better measurement of the drifting in the direction and scale of the trajectories. It's reported in the unit of mm." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 251, + 294, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 251, + 294, + 299 + ], + "spans": [ + { + "bbox": [ + 55, + 251, + 294, + 299 + ], + "type": "text", + "content": "Root Trajectory Error (RTE) measures the accuracy of the whole trajectory including the scale. It performs rigid alignment on the trajectory of the root and computes the mean square error. It's reported in the unit of " + }, + { + "bbox": [ + 55, + 251, + 294, + 299 + ], + "type": "inline_equation", + "content": "\\%" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 300, + 294, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 300, + 294, + 335 + ], + "spans": [ + { + "bbox": [ + 55, + 300, + 294, + 335 + ], + "type": "text", + "content": "Motion Jitter (Jitter) uses finite difference to compute the jerk " + }, + { + "bbox": [ + 55, + 300, + 294, + 335 + ], + "type": "inline_equation", + "content": "(3^{rd}" + }, + { + "bbox": [ + 55, + 300, + 294, + 335 + ], + "type": "text", + "content": " derivative) on the 3D joints. It measures rapid abrupt changes. It's reported in the unit of " + }, + { + "bbox": [ + 55, + 300, + 294, + 335 + ], + "type": "inline_equation", + "content": "10m / s^3" + }, + { + "bbox": [ + 55, + 300, + 294, + 335 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 335, + 294, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 335, + 294, + 394 + ], + "spans": [ + { + "bbox": [ + 55, + 335, + 294, + 394 + ], + "type": "text", + "content": "Foot Skating measures erroneous foot sliding. It thresholds the velocity of the ground truth foot vertices to compute contact frames, and calculates the displacement on the predicted foot vertices during contact. It's reported in the unit of " + }, + { + "bbox": [ + 55, + 335, + 294, + 394 + ], + "type": "inline_equation", + "content": "\\text{mm}" + }, + { + "bbox": [ + 55, + 335, + 294, + 394 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_content_list.json b/data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..98f5df5d51f253f09065ac5ec962f655511125bd --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_content_list.json @@ -0,0 +1,2986 @@ +[ + { + "type": "text", + "text": "PosterMaker: Towards High-Quality Product Poster Generation with Accurate Text Rendering", + "text_level": 1, + "bbox": [ + 174, + 130, + 823, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yifan $\\mathrm{Gao}^{1,2*^{\\dagger}}$ , Zihang Lin $^{2*}$ , Chuanbin Liu $^{1\\ddagger}$ , Min Zhou $^{2}$", + "bbox": [ + 261, + 202, + 733, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tiezheng Ge², Bo Zheng², Hongtao Xie¹", + "bbox": [ + 328, + 220, + 666, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1University of Science and Technology of China 2Taubao & Tmall Group of Alibaba", + "bbox": [ + 163, + 239, + 834, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "eafn@mail.ustc.edu.cn {liucb92, htxie}@ustc.edu.cn", + "bbox": [ + 274, + 258, + 722, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{linzihang.lzh, yunqi.zm, tiezheng.gtz, bozheng}@alibaba-inc.com", + "bbox": [ + 217, + 277, + 781, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project page: https://poster-maker.github.io", + "bbox": [ + 295, + 295, + 696, + 309 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Prompt", + "text_level": 1, + "bbox": [ + 163, + 311, + 218, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The box of fish oil supplements is placed on a wooden table, with a background of a serene ocean and clear sky, symbolizing purity and the natural source of the product", + "bbox": [ + 116, + 335, + 256, + 393 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The subject rests on a smooth, dark wooden table, surrounded by a few scattered leaves and delicate flowers, with a serene garden scene complete with blooming flowers and lush greenery in the background.", + "bbox": [ + 116, + 428, + 256, + 498 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Subject", + "text_level": 1, + "bbox": [ + 259, + 311, + 331, + 325 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1e079d3f112cc12b9f993bc37690dfe7939ba2bbd0baf1a45063ce19c15ac543.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 261, + 319, + 331, + 397 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a58e688e8dedf31932fdfcb7201e6f920b0c0e8366cfc7da9d05f237165b5659.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 261, + 445, + 348, + 489 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Text", + "text_level": 1, + "bbox": [ + 383, + 311, + 421, + 324 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/17f2983698b127a00d4e62597e010fa56f7291a6f66ec063e7f2f40531d73f5f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 324, + 460, + 411 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f4521cddeceba886319b3866a3f1acd34aea14164e3e0df48fbea79cc2442101.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 349, + 412, + 460, + 526 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Poster", + "text_level": 1, + "bbox": [ + 488, + 311, + 535, + 324 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6ba6efda80ae24712f5628d9c9a49d3b0c5a7601c44fea4221ad6127c7f0c711.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 462, + 324, + 575, + 411 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/664f809e620ed5313bd043a4f137e29ea847a04403fad44ef5ab96a28af07f67.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 462, + 412, + 575, + 526 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Previous: two stage", + "text_level": 1, + "bbox": [ + 650, + 311, + 795, + 325 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3d43dd864821885fae7c2db8123d1c1ed34fd14792cb07a03aaaf5258e5c537b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 325, + 772, + 409 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0acb3c0d5fed3a3f8193e9127851af5894725354a094709aea933134f3e3b7b0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 776, + 325, + 867, + 409 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ours: end to end", + "text_level": 1, + "bbox": [ + 658, + 419, + 779, + 433 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5aafc7be6f249efe6e065066c893a2da912a408f5a9da7a7962c20d68194f624.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 455, + 754, + 518 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0881d676bb6c6f70842f5ed1ac298edb0c67e1c6d95af2b034679feb230f99be.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 774, + 436, + 867, + 518 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/84cb058d0e8dc1bbdb90f7e73f2e5a9985e9375393a90b68108134dc28db47f5.jpg", + "image_caption": [ + "Figure 1. (a) Definition of the advertising product poster generation task. The input includes the prompt, subject image, and the texts to be rendered with their layouts. The output is the poster image. (b) The comparison of our method with the previous method. PosterMaker generates posters end-to-end, while previous methods first generate poster backgrounds and then render texts. (c) Visualization results demonstrate that PosterMaker can generate harmonious and aesthetically pleasing posters with accurate texts and maintain subject fidelity." + ], + "image_footnote": [], + "bbox": [ + 114, + 527, + 292, + 734 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e6705e40e73fa87f1df270a6d2380df0e9a7efa0f5d05b1e5a0f2a835d460f14.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 295, + 529, + 428, + 632 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/dfbca06eace34b9928322d5af9014b4a18479b600dfa3a56f5147f507be663cf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 632, + 426, + 734 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8860ac0793d8245add147225b1af9376c5b05f22070e3ad2315d5b0d55f1fb48.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 429, + 529, + 560, + 632 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d57e8b3cd2f71d6889bca5f2899b15a010f1cd79ba9d11b597c7b10a57738729.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 429, + 635, + 560, + 734 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/76fb2ebcebf1e1c35721cea4542b4bfcb7234470893d55ce6c3bf73c0f43ce28.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 560, + 529, + 696, + 632 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b86755819730d39a871b328f8d3b51e66352c042590e7172bf79246b96c76c85.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 560, + 635, + 696, + 734 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a37a51ba7e3c0388312024f2864b9a532c649ad3b79c31574d200575e0d7adf1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 697, + 529, + 875, + 734 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 806, + 326, + 821 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Product posters, which integrate subject, scene, and text, are crucial promotional tools for attracting customers. Cre", + "bbox": [ + 89, + 833, + 482, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ating such posters using modern image generation methods is valuable, while the main challenge lies in accurately rendering text, especially for complex writing systems like Chinese, which contains over 10,000 individual characters. In this work, we identify the key to precise text rendering", + "bbox": [ + 511, + 811, + 906, + 887 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06632v1 [cs.CV] 9 Apr 2025", + "bbox": [ + 22, + 268, + 60, + 700 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contribution. ‡ Corresponding author.", + "bbox": [ + 112, + 875, + 362, + 888 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Work done during the internship at Alibaba Group.", + "bbox": [ + 112, + 888, + 393, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "as constructing a character-discriminative visual feature as a control signal. Based on this insight, we propose a robust character-wise representation as control and we develop TextRenderNet, which achieves a high text rendering accuracy of over $90\\%$ . Another challenge in poster generation is maintaining the fidelity of user-specific products. We address this by introducing SceneGenNet, an inpainting-based model, and propose subject fidelity feedback learning to further enhance fidelity. Based on TextRenderNet and SceneGenNet, we present PosterMaker, an end-to-end generation framework. To optimize PosterMaker efficiently, we implement a two-stage training strategy that decouples text rendering and background generation learning. Experimental results show that PosterMaker outperforms existing baselines by a remarkable margin, which demonstrates its effectiveness.", + "bbox": [ + 89, + 90, + 480, + 330 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 345, + 222, + 359 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Product posters, which showcase items for sale within well-chosen background scenes and include descriptive text, play a vital role in e-commerce advertising by capturing customers' attention and boosting sales. Creating such posters necessitates photographing the product in carefully selected environments that highlight its features, as well as thoughtfully choosing text colors and fonts to ensure that the text is appealing, legible, and harmonious with the background. This process can be quite expensive. With the significant advancements in large-scale text-to-image (T2I) models [13, 35, 39], synthesizing such product posters with image generation models attracts increasing attention. In this paper, we focus on the product poster generation task. Specifically, given a prompt describing the background scene, the foreground image of the user-specified subject and some texts together with their layouts, we aim to develop a model to generate the subject into the desired scene background and accurately render the text in an end-to-end manner (as shown in Fig. 1 (a)).", + "bbox": [ + 89, + 369, + 482, + 657 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A straightforward solution for this task is to first generate the subject into the desired scene [2, 11, 40], and then predict the text attributes (such as color and font) [14, 23] and render them on the image. However, such two-stage approach suffers from disharmony between the text and the poster background(as shown in Fig. 2 (b)). And collecting training data is also challenging since the text attributes, especially the text font, are difficult to extract from the poster. Another solution is learning to generate the poster using a per-pixel synthesis approach, which can benefit from directly learning the distribution of professionally designed posters. We focus on such one-stage solution. The main challenge is how to ensure the text rendering accuracy.", + "bbox": [ + 89, + 659, + 482, + 854 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Many recent works [13, 25, 42, 49] have been proposed to improve the text rendering accuracy for large diffusion models. Great progress has been made and some", + "bbox": [ + 89, + 854, + 482, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "recent work can achieve high rendering accuracy for English. However, for non-Latin languages like Chinese, one of the most widely spoken languages, achieving high rendering accuracy remains challenging. This difficulty stems from the existence of over 10,000 characters, with Chinese characters characterized by complex and diverse stroke patterns, making it extremely difficult to train a model to memorize the rendering of each individual character. Recent studies [4, 28, 42] have focused on extracting visual features of text as control signals. Typically, these approaches render text lines into glyph images and extract line-level text visual features to guide generation.", + "bbox": [ + 511, + 90, + 903, + 272 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Nevertheless, line-level visual features often lack the discriminative power to capture character-level visual nuances. To address this limitation, GlyphByT5 [25, 26] introduced a box-level contrastive loss with sophisticated glyph augmentation strategies to enhance character-level discriminativeness, achieving promising results. In this paper, we point out that the key to high-accuracy text rendering lies in constructing character-discriminative visual features as control signals. Specifically, we render each character as a glyph image and extract visual features via a visual encoder. These features are then concatenated with positional embeddings to form a character-level representation. Then we propose TextRenderNet, an SD3 [13] controlnet-like [53] architecture that takes the character-level representation as the control signal to render visual text. Our experiments demonstrate that the proposed character-level representation is effectively capable of achieving accurate text rendering.", + "bbox": [ + 511, + 275, + 903, + 547 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In the task of poster generation, another important thing is to generate the user-specific subject into a desired scene while keeping high subject fidelity. Recent subject-driven controllable generation methods [40, 44, 51] can synthesize such images, but they still cannot ensure that the user-specified subject is completely consistent in the generated details (e.g., the logo on the product may be inaccurately generated), which could potentially mislead customers. Therefore, we follow poster generation methods [5, 11, 22] to address this task via introducing an inpainting-based module named SceneGenNet. However, we found that even using inpainting methods, subject consistency is not always achieved as the inpainting model sometimes extends the subject shape (as shown in Fig. 2 (a)). Similar phenomenon is also observed in [11, 12]. To address this issue, we elaboratively develop a detector to detect the foreground extension cases. Then we employ the detector as a reward model to train the SceneGenNet via feedback learning for further improving subject fidelity.", + "bbox": [ + 511, + 550, + 903, + 837 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Combining the proposed TextRenderNet and SceneGenNet, we develop a framework named PosterMaker that can synthesize the product poster in an end-to-end manner. To efficiently optimize PosterMaker, we introduce a two-stage", + "bbox": [ + 511, + 839, + 903, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/cc4fa3e524b81f1d124f3a83bd3a2d6b24e7b0990c9c1eab0e4379791445aebf.jpg", + "image_caption": [ + "User-Specified Texts", + "Figure 2. The illustration of the three challenges faced by poster generation, which seriously hinder the practical application." + ], + "image_footnote": [], + "bbox": [ + 133, + 89, + 251, + 181 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/cc26ae10209f45c45f172ef4c0e2705be1dbb550c1fbbfc2c45f08dbbb262431.jpg", + "image_caption": [ + "User-Specified Subject" + ], + "image_footnote": [], + "bbox": [ + 254, + 90, + 372, + 181 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/e14fa0fb52af6019d2f7fd749129efe78e4f649029e674de79c8f0fa553d9312.jpg", + "image_caption": [ + "(a) Foreground Extension" + ], + "image_footnote": [], + "bbox": [ + 375, + 90, + 496, + 181 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/25b6e838f24b2f49dde5dbd24af262e442a4c225a7be8405132847f005181a21.jpg", + "image_caption": [ + "(b)Text-Scene Disharmony" + ], + "image_footnote": [], + "bbox": [ + 496, + 90, + 617, + 181 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/26a7703ae2cdb9597f122eb7e7605316ed457d62788d2b67698dee17ecbe5f74.jpg", + "image_caption": [ + "(c) Poor Text Rendering" + ], + "image_footnote": [], + "bbox": [ + 619, + 90, + 738, + 181 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/88324587a54ed90f04e0beebb66bcd3330c40f445ac07add6ffd62a1ee4555da.jpg", + "image_caption": [ + "Our Result" + ], + "image_footnote": [], + "bbox": [ + 741, + 90, + 862, + 181 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "training strategy to separately train TextRenderNet and SceneGenNet. This training strategy decouples the learning of text rendering and background image generation, thus TextRenderNet and SceneGenNet can focus on their specific tasks. Qualitative results (as shown in Fig. 1 (c)) demonstrate our training strategy is effective for training PosterMaker and it achieves promising poster generation results.", + "bbox": [ + 89, + 215, + 483, + 321 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To summarize, our contributions are as follows:", + "bbox": [ + 109, + 323, + 426, + 335 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We proposed a novel framework named PosterMaker, which mainly consists of a TextRenderNet and a SceneGenNet. With a two-stage training strategy, PosterMaker can synthesis aesthetically product posters with texts accurately and harmoniously rendered on it.", + "- We reveal the core of achieving accurate Chinese text rendering is to construct a robust character-level text representation as the control condition. These findings can inspire future research on improving the text rendering abilities of T2I models.", + "- We improve the subject fidelity via subject fidelity feedback learning, which is shown effective in addressing the subject inconsistency issue." + ], + "bbox": [ + 89, + 339, + 483, + 536 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 89, + 547, + 232, + 561 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1. Poster Generation", + "text_level": 1, + "bbox": [ + 89, + 574, + 267, + 588 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Generating posters involves combining various elements like a subject image, a background scene image, and text to ensure the subject and text are prominently and accurately displayed while maintaining an appealing look. Automating this process is quite complex and challenging. Methods like AutoPoster [23], Prompt2Poster [45], and COLE [16] break it down into stages: creating images and layout, predicting the visual properties of text, and rendering the poster. These approaches have several steps and often struggle to precisely obtain all the necessary visual attributes like font and color gradients. With the emergence of more advanced generative models [35], methods like JoyType [19], Glyphbyt5 [25], and GlyphDraw2 [28] can directly generate the image and text together at the pixel level based on the poster prompt, text content, and layout. This more streamlined approach can leverage more readily available poster pixel data for training, but there is still room for improvement in terms of the overall poster cohesion and text accuracy. Our method is also a one-stage, direct pixel-level generation approach that simultaneously creates the image and", + "bbox": [ + 89, + 598, + 483, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "text. However, our focus is on generating posters for a given product subject, where the input includes the subject image, prompt, text content, and layout. In addition to considering text rendering accuracy and overall poster harmony, we also need to maintain the fidelity of the product.", + "bbox": [ + 511, + 215, + 906, + 292 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Visual Text Rendering", + "text_level": 1, + "bbox": [ + 511, + 300, + 720, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, text-to-image (T2I) models [1, 13, 41] have made significant strides in enhancing English text rendering by introducing stronger text encoders, such as T5 [38]. However, multilingual text image generation still faces significant challenges due to the large number of non-Latin characters and complex stroke structures. Early work [49] has explored the ControlNet-based method [53], using low-level visual images such as glyph images as the control signal for text image generation. However, glyph images are easily affected by text size and shape, especially complex stroke details. Besides, some recent works [4, 27, 28, 42, 52, 55] utilize more robust visual features, such as line-level OCR features as control conditions to further improve the text accuracy. But the line-level visual features still perform poorly in representing stroke details for each character. To address this issue, GlyphByT5 [25, 26] proposes a method with box-level contrastive learning to align the text features extracted from the language model with the features extracted from the visual encoder. To effectively learn such alignment, GlyphByT5 relies on collecting massive amounts of data and developing complex data augmentation strategies for the alignment pre-training, which lacks flexibility. In contrast, in this paper, we reveal that the key to high-accuracy text rendering lies in constructing discriminative character-level visual features. Thus we propose a plug-and-play and robust character-level text representation derived from off-the-shelf OCR encoders, which can accurately represent the visual structure of the text without additional training and enable precise text rendering.", + "bbox": [ + 511, + 323, + 906, + 762 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Subject-Preserved Scene Generation", + "text_level": 1, + "bbox": [ + 511, + 771, + 828, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To create a scene image with a product subject while ensuring subject fidelity, two main methods are commonly used. One is the subject-driven method [3, 6, 20, 36, 40], which adjusts the position, angle and lighting of the subject based on the prompt to create a harmonious image. However, it often struggles to preserve the significant features of the subject. The other utilizes inpainting-based background com", + "bbox": [ + 511, + 795, + 905, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/8b46bbeabdcde0195c3c0ba74ad57a536fc5d0d9fa001be5a351e582462f2f21.jpg", + "image_caption": [ + "Figure 3. The framework of the PosterMaker, which is based on the SD3. To precisely generate multilingual texts and create aesthetically pleasing poster scenes, TextRenderNet and SenceGenNet are introduced, whose outputs are used as control conditions added to the SD3." + ], + "image_footnote": [], + "bbox": [ + 99, + 92, + 898, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "pletion techniques [2, 11, 43]. It only generates the non-subject areas of an image and naturally keeps consistency in the original subject area. But it sometimes extends the foreground subject [11, 12], such as adding an extra handle to a cup, which also reduces subject fidelity. To maximize subject fidelity, our method uses background completion and a reward model to determine whether the foreground extension occurred, thereby enhancing subject fidelity.", + "bbox": [ + 89, + 301, + 483, + 421 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 89, + 426, + 181, + 441 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Problem Formulation", + "text_level": 1, + "bbox": [ + 89, + 450, + 294, + 465 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This paper focuses on the creation of product posters, which typically consist of multiple elements such as text, subjects, and scenes, as illustrated in Fig. 1 (a). The central challenge of this task is to generate these elements accurately and harmoniously, offering both research and practical applications. The task is defined as:", + "bbox": [ + 89, + 474, + 483, + 565 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nI _ {g} = f \\left(I _ {s}, M _ {s}, T, P\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 573, + 480, + 589 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $I_{g}$ denotes the generated poster image, $I_{s}$ represents the subject image, and $M_{s}$ is the subject mask. The variable $T$ signifies the content and the position of text and $P$ is the prompt describing the background scene. Subsequent sections will detail the design of PosterMaker, and our proposed solution to this task.", + "bbox": [ + 89, + 597, + 483, + 686 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Framework", + "text_level": 1, + "bbox": [ + 89, + 696, + 218, + 709 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Fig. 3, PosterMaker is developed based on Stable Diffusion 3 (SD3) [13], which contains a strong VAE for reconstructing the image details like text stroke. And we propose two modules, i.e., TextRenderNet and SceneGenNet, to address the poster generation task. TextRenderNet is specifically designed to learn visual text rendering, taking character-level visual text representations as input to achieve precise and controllable text rendering. SceneGenNet, on the other hand, accepts a masked image (indicating which content should remain unchanged) and a prompt, learning to generate the foreground subject within the desired scene described by the prompt. Both TextRenderNet", + "bbox": [ + 89, + 719, + 483, + 900 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f8b790dd38d0e52b2ec90ca9820adac388e4331906f3563714af637d4a26fe5d.jpg", + "image_caption": [ + "Figure 4. The details of TextRenderNet and SceneGenNet, showcasing their model architectures and their interactions with SD3." + ], + "image_footnote": [], + "bbox": [ + 531, + 297, + 893, + 455 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and SceneGenNet are grounded in a ControlNet-like [52] architecture derived from SD3 and their architectures are detailed in Fig. 4. They share the same internal structure, comprising several cascaded MM-DiT blocks [13], with weights copied from the base model for initialization. The output of each MM-DiT block is added to the corresponding block of the base model after passing through a zero convolution layer [53]. The key distinction between the two modules lies in their input configurations. SceneGenNet takes the prompt as input to the text condition branch, and for the visual branch, the input is derived by the latent feature at timestep $t$ , the subject mask, and the masked latent to preserve the foreground area. In contrast, TextRenderNet receives text representations (detailed in the next section) in the text condition branch for text rendering. An adapter, consisting of a linear layer and layer normalization, adjusts the feature dimensions of these text representations before they are input to TextRenderNet. The outputs of each block in TextRenderNet and SceneGenNet are directly added to the corresponding block outputs of the SD3 base model.", + "bbox": [ + 511, + 494, + 906, + 797 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Character-level Visual Representation for Precise Text Rendering", + "text_level": 1, + "bbox": [ + 511, + 804, + 903, + 837 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Recently, some works have explored multilingual visual text generation. Among them, a promising approach is based on ControlNet-like methods [42], which utilize both glyph images and line-level OCR features as conditions.", + "bbox": [ + 511, + 839, + 905, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4d77d17c9b67ac938b6d26b556a4f1a609b222eff4de3596b373fda50aac6eab.jpg", + "image_caption": [ + "Figure 5. The distinction between the multilingual character-level text representation we proposed and the line-level methods of previous works like AnyText [42] and GlyphDraw2 [28]." + ], + "image_footnote": [], + "bbox": [ + 94, + 89, + 483, + 152 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "However, this control information cannot accurately represent characters: 1) glyph images are easily affected by text size and shape, making them less robust. 2) line-level visual features lack fine-grained stroke features and are limited by the OCR model's poor capability to recognize long texts. To address these challenges, this paper proposes a plug-and-play and robust character-level text representation, where each character is precisely represented by one token.", + "bbox": [ + 89, + 203, + 483, + 324 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Specifically, the text $C$ has $n$ characters. For each character $c_{i}$ , its feature is separately extracted by a pre-trained OCR encoder $f_{v}$ and then averaged and pooled to obtain a compact character representation vector $r_{c_i} \\in \\mathbb{R}^c$ . Thus, the character-level text representation is defined as follows:", + "bbox": [ + 89, + 324, + 483, + 400 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {c i} = \\operatorname {a v g p o o l} \\left(f _ {v} \\left(I _ {c i}\\right)\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 405, + 480, + 422 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nR _ {c} = \\left[ r _ {c _ {1}}, r _ {c _ {2}}, \\dots , r _ {c _ {n}} \\right], \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 422, + 480, + 441 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $I_{c_i}$ is the $i$ -th character image rendered in a fixed font, and $R_{c} \\in \\mathbb{R}^{n \\times c}$ is the char-level text representation.", + "bbox": [ + 89, + 452, + 482, + 482 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Fig. 5, compared to previous methods, our key difference is extracting representations from character glyph images. This enables the model to perceive character stroke structures and achieve high text accuracy. Additionally, since the number of characters is fixed, we can pre-extract the representations of each character and store them in a dictionary, eliminating the need for online rendering and feature extraction. This significantly simplifies the training and inference pipeline.", + "bbox": [ + 89, + 483, + 483, + 618 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, this text representation lacks order and positional information. Thus, the character order encoding $P_{rank}$ is introduced to represent the order of characters in the text, which is implemented through a sinusoidal position encoding of the char order. Besides, inspired by GLIGEN [21], the text position coordinates are mapped to sinusoidal position encoding $P_{bbox}$ to control the position of the text. Then we concatenate $P_{rank}$ , $P_{bbox}$ and $R_c$ along the feature dimension to construct the final text representation.", + "bbox": [ + 89, + 619, + 483, + 753 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Improving Subject Fidelity", + "text_level": 1, + "bbox": [ + 89, + 760, + 334, + 776 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the task of generating product posters, it is crucial to maintain subject fidelity, i.e., ensuring that the subject in the generated poster remains consistent with the user-specified subject. To achieve this goal, we employ SceneGenNet to perform background inpainting, which is trained to precisely preserve the foreground subject and only inpaint the background according to the prompt. However, inpainting-based models sometimes extend the foreground subject into", + "bbox": [ + 89, + 779, + 483, + 900 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/7b1ab2581a2637e361192dadf19b149350be83c2f5e81ad624f504a04833ae50.jpg", + "image_caption": [ + "Figure 6. The model details of the foreground extension detector." + ], + "image_footnote": [], + "bbox": [ + 524, + 87, + 885, + 210 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/cba135ab6536ff93c30644eef668d3f30af7317c5bf63b7a1b66f5d7419fe1cf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 234, + 686, + 329 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/ac592ba23bddb135c0915fc0128194478bb41293ec4e39dc475919d75340602d.jpg", + "image_caption": [ + "Figure 7. The illustration of our two-stage training strategy for efficiently optimizing PosterMaker." + ], + "image_footnote": [], + "bbox": [ + 699, + 234, + 898, + 332 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "another subject (as shown in Fig. 2 (b)) thereby compromising subject fidelity. We refer to this as \"foreground extension\". To mitigate this issue, we develop a model to detect foreground extension and employ it as a reward model to fine-tune PosterMaker to improve subject fidelity.", + "bbox": [ + 511, + 372, + 905, + 448 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Foreground Extension Detector. We develop the foreground extension detector $S_{\\theta}$ based on HQ-SAM [17]. As shown in Fig. 6, we input the generated image $I_{g}$ to SAM [18] image encoder. The subject mask $M_{s}$ and box $B_{s}$ are provided as mask prompt and box prompt, respectively, to the HQ-SAM decoder to obtain an intermediate mask $M_{i}$ . Next, we concatenate the image features extracted from SAM encoder with $M_{s}$ , $M_{i}$ and $M_{s} - M_{i}$ at the channel dimension. The concatenated features are processed through convolutional layers and MLP layers to predict whether the foreground has been extended in the generated image. We collected 20k manually annotated images to train the foreground extension detector $S_{\\theta}$ .", + "bbox": [ + 511, + 449, + 905, + 645 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Subject Fidelity Feedback Learning. The foreground extension detector $S_{\\theta}$ , after the offline training, is used as a reward model to supervise PosterMaker to improve subject fidelity. Specifically, assuming the reverse process has a total of $T'$ steps, we follow ReFL [47] to first sample $z_{T'} \\sim \\mathcal{N}(0,1)$ and after $T' - t'$ steps of inference $(z_{T'} \\rightarrow z_{T'-1} \\rightarrow \\dots \\rightarrow z_{t'})$ , we obtain $z_{t'}$ , where $t' \\sim [1, t_1]$ . Then, we directly perform a one-step inference $z_{t'} \\rightarrow z_0$ to accelerate the reverse process. Furthermore, $z_0$ is decoded to the generated image $x_0$ . The detector $S_{\\theta}$ predicts the foreground extension score for $x_0$ , and this score is used as the reward loss to optimize the generator $G_{\\phi}$ (i.e., PostMaker). The reward loss is defined as follows:", + "bbox": [ + 511, + 647, + 906, + 843 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\text {r e w a r d}} (\\phi) = - \\mathbb {E} _ {(x, c, m) \\sim \\mathcal {D} _ {\\text {t r a i n}}, t ^ {\\prime} \\sim [ 1, t _ {1} ], z _ {T ^ {\\prime}} \\sim \\mathcal {N} (0, 1)} \\\\ \\log \\sigma \\left(1 - S _ {\\theta} \\left(G _ {\\phi} \\left(z _ {T ^ {\\prime}}, x, c, m, t ^ {\\prime}\\right), m\\right)\\right), \\tag {4} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 517, + 849, + 903, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/4aaf4cadc0bd2e76d849fa02a6386076ba7a57a8b840bee0a63daad304225b64.jpg", + "image_caption": [ + "Figure 8. Qualitative comparison with different methods. Best viewed on Screen. To aid comprehension, Chinese text lines in the image are translated into English and annotated using corresponding colors." + ], + "image_footnote": [], + "bbox": [ + 96, + 89, + 903, + 375 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $x, c, m$ sampled from the train data $\\mathcal{D}_{\\mathrm{train}}$ , represent the subject image, control conditions, and subject mask respectively. To avoid overfitting, we don't calculate reward loss for the cases where the foreground extension score is below 0.3. Our total training loss is defined as:", + "bbox": [ + 89, + 407, + 483, + 484 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {d e n o i s e}} + \\lambda \\mathcal {L} _ {\\text {r e w a r d}}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 501, + 482, + 517 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\lambda$ is the hyperparameter to adjust the weight of reward loss and the denoise loss.", + "bbox": [ + 89, + 535, + 482, + 565 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.5. Training Strategy", + "text_level": 1, + "bbox": [ + 89, + 590, + 264, + 608 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To efficiently train PosterMaker, this paper introduces a two-stage training strategy, as shown in Fig. 7, aimed at decoupling the learning for text rendering and background image generation. Specifically, in the first stage, the training task is local text editing. We freeze SceneGenNet and only the TextRenderNet and adapter are optimized. Since we initialize SceneGenNet with pre-trained weights of inpainting-controlnet [7], it can fill the local background well thus TextRenderNet can focus on learning text generation. In the second stage, the training task is subject-based text-to-image generation. Here we froze TextRenderNet and only train the SceneGenNet. In this stage, SceneGenNet focuses on learning poster scenes and creative design from the train data. Notably, Stage 1 learns local text editing/inpainting and Stage 2 learns background inpainting, thus the input images indicating the area to inpaint are different (See Fig. 7). With such a two-stage training strategy, TextRenderNet and SceneGenNet can be efficiently optimized since they can focus on their specific tasks.", + "bbox": [ + 89, + 617, + 483, + 905 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 511, + 405, + 645, + 422 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 511, + 426, + 702, + 443 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dataset. We crawl product posters from online e-commerce platforms to construct our training set. Our training data mainly consists of Chinese posters, we first employ PPOCRv4 model [34] to extract the text content and their bounding boxes from the images as a coarse annotation. And we ask some annotators to further refine the bounding boxes and correct the text content to improve the annotation quality. Resulting in a dataset containing 160k images. We generate image captions with GPT4-o [32] and extract foreground subject masks with $\\mathrm{U}^2$ -Net [37] and VitMatte [50]. We randomly select 302 images for evaluation and leave the rest for training. To better evaluate the performance of our method, we use LLM [10] to generate some background prompts and text layouts as evaluation samples, after manually checking and removing those irrational ones, we obtain another 198 evaluation samples to form a final evaluation set named PosterBenchmark containing 500 samples.", + "bbox": [ + 511, + 446, + 906, + 703 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation Metrics. We follow Anytext [42] to evaluate text rendering accuracy using two metrics: sentence accuracy (Sen. Acc) and normalized edit distance (NED). Specifically, we crop the text line from the generated image according to the provided bounding box and utilize the OCR model [31] to predict the content $s_{\\mathrm{pred}}$ of the generated text line. We denote the ground truth text content as $s_{\\mathrm{gt}}$ . A text line is considered to be correctly generated if $s_{\\mathrm{pred}} = s_{\\mathrm{gt}}$ ; this condition is used to calculate Sen. Acc. Additionally, we compute the normalized edit distance (NED) between $s_{\\mathrm{pred}}$ and $s_{\\mathrm{gt}}$ to measure their similarity. We further calculate FID [15] to measure the visual quality and CLIP-T [40] metric for evaluating text-image alignment.", + "bbox": [ + 511, + 704, + 908, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 504, + 936 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/feef9c7b9f80eb27c54a517336ee0da7210d0f42e9a0a3a0f5135fc86b1da784.jpg", + "image_caption": [ + "Figure 9. Qualitative comparison using various text features. It is obvious that the character-level OCR features we used (PPOCR Char) are the most effective at maintaining character accuracy." + ], + "image_footnote": [], + "bbox": [ + 96, + 89, + 903, + 371 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Implementation Details. Our SceneGenNet is initialized from pre-trained SD3 Inpainting-Controlnet [7] and TextRenderNet is initialized from SD3 [13] weight with the same configuration as in [8]. For Subject Fidelity Feedback Learning, we follow existing work [47] to uniformly sample $t'$ between [1, 10]. Within this range, the one-step inference result of image $x_0$ from $t'$ is close to the full inference result. The weight coefficient of $\\lambda$ is set to 0.0005. The learning rate is set to 1e-4 and the batch size is set to 192. We train our framework for 26k and 29.5k steps for training stage1 and stage2, respectively. Finally, PosterMaker was trained on 32 A100 GPUs for 3 days. During the sampling process, based on the statistical information, a maximum of 7 lines of text and 16 characters per line of text are selected from each image to render onto the image, as this setting can cover most situations in the dataset.", + "bbox": [ + 88, + 407, + 485, + 650 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Comparison with Prior Works", + "text_level": 1, + "bbox": [ + 89, + 657, + 362, + 672 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Baseline methods. We carefully designed the following baseline approaches based on existing open-sourced techniques for comparative analysis. SD3_inpaint_byt5: We encode the text content into prompt embeddings using ByT5 [48] and employ an adapter to map these embeddings to the original prompt embedding space of SD3 before feeding them into the controlnet, which enables the controlnet to render multilingual text. SD3_canny&inpaint: First render the text into a white-background image and extract the canny edge from it as control. Then finetune a pre-trained SD3 canny controlnet together with an inpainting controlnet to achieve multilingual text rendering. Anytext: It is the SOTA open-sourced T2I method that supports multilin", + "bbox": [ + 89, + 676, + 483, + 875 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/447d088256e1c87d6ae91a0c725ac663c352ccd8596de25616f41d878902395b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑FG Ext. Ratio ↓
SD3 inpaint_AnyText52.78%75.27%100.8726.9014.82%
SD3 inpaint_byt552.28%86.57%65.4526.7114.60%
AnyText63.90%82.81%71.2726.6919.25%
Glyph-ByT5-v269.54%87.65%79.2326.6018.91%
SD3_canny&inpaint80.75%92.75%67.1927.0314.38%
GlyphDraw286.14%96.78%72.4926.7216.52%
GT (w/ SD1.5 Rec.)76.95%89.91%---
GT (w/ SD3 Rec.)98.09%99.36%---
GT98.53%99.59%---
Ours (SD1.5)72.12%88.01%68.1726.93-
Ours93.36%98.39%65.3527.0411.57%
", + "bbox": [ + 517, + 404, + 898, + 534 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1. Comparison with baseline methods.", + "bbox": [ + 575, + 535, + 841, + 547 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "gual text rendering and its text editing mode supports text inpainting [42]. So we directly finetune it on our data using its text editing training pipeline. SD3_inpaint_Anytext: First generate the background with SD3 inpainting control-net, then render the text on the corresponding region using Anytext. Glyph-ByT5-v2 and GlyphDraw2: They are both the SOTA T2I methods that support multilingual text rendering [26, 28]. However, they don't have open-sourced pre-trained weights, so we reproduced them on our dataset. And we added an inpainting controlnet for them to support subject-preserved generation.", + "bbox": [ + 511, + 551, + 906, + 717 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Quantitative Comparison. We trained all baseline models on the same dataset, and then quantitatively compared all methods on the PosterBenchmark, as shown in Tab. 1. It is worth noting that SD3 is used as the base model by default, but since we observed that the SD1.5 VAE leads to significant error in reconstruction, to enable a more equitable comparison between our method and AnyText (SD1.5 architecture), we also implemented an SD1.5 version of PosterMaker with the same experimental setup as AnyText. As the VAEs, especially SD1.5, introduce some reconstruction error and the OCR model may incorrectly recognize some characters, we also report the metrics on ground truth", + "bbox": [ + 511, + 719, + 908, + 901 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "Details can be found in the Appendix.", + "bbox": [ + 107, + 886, + 316, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c202934d10fe0a130d64ae0ba56d2d6b00f5173e31d456711996a78dfc2d1cb4.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Text FeatureTypeSen. ACCNED
ByT5textual feat.33.48%54.50%
Cannyimg81.50%92.72%
TrOCR Linevisual feat.26.58%49.46%
TrOCR Charvisual feat.94.27%98.54%
PPOCR Linevisual feat.38.91%53.86%
PPOCR Char (Ours)visual feat.95.15%98.75%
GT (w/o Rec.)-98.53%99.59%
GT (w/ SD3 Rec.)-98.09%99.36%
", + "bbox": [ + 130, + 88, + 441, + 200 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/e20e8244690f5e01dcd53e455d92f3a60769ab100ea63c696af31a4b42c9de1c.jpg", + "table_caption": [ + "Table 2. Quantitative comparison using various text features." + ], + "table_footnote": [], + "table_body": "
MethodFG Ext. Ratio↓Sen. ACC ↑NED↑FID↓CLIP-T↑
Ours11.57%93.36%98.39%65.3527.04
Ours w/o Lreward15.05%93.11%98.21%65.1027.04
", + "bbox": [ + 96, + 218, + 475, + 253 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3. Evaluation on the subject fidelity feedback learning.", + "bbox": [ + 104, + 253, + 465, + 268 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "images as an upper bound. As shown in Tab. 1, our method achieves the best performance on all metrics. Notably, on text rendering metrics Sen. ACC and NED, our model outperforms the baselines by an impressive margin and is already close to the upper bound. The promising results demonstrate the effectiveness of the proposed PosterMaker. Qualitative Comparison. The results are shown in Fig. 8. Compared to the baselines, our PosterMaker generates more readable and accurate poster images with texts, particularly for smaller texts. Notably, as an end-to-end generation method, PosterMaker automatically creates underlays to enhance the contrast between text and background, effectively highlighting the text. This feature is crucial in product poster design for capturing viewers' attention. These findings demonstrate that our PosterMaker successfully learns the distribution of posters created by human designers.", + "bbox": [ + 89, + 271, + 482, + 513 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Ablation Study and Analysis", + "text_level": 1, + "bbox": [ + 89, + 518, + 346, + 534 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "How to achieve high text rendering accuracy? We conduct experiments to explore the effectiveness of different control conditions for visual text rendering. Due to the fact that text rendering accuracy is primarily determined by the first training stage, we discard the second training stage in this experiment to save computational resources. The results are summarized in Tab. 2. We observed several valuable experimental results: 1) The use of char-level features significantly outperforms previous line-level features, benefiting from finer-grained representation. This explains why previous methods [4, 28, 42], achieve inferior performance (PPOCR Line is used in [28, 42], TrOCR Line is used in [4]). Recent concurrent works [29, 46] have also found similar experimental findings as ours. 2) Char-level feature representation is superior to low-level image features such as Canny. 3) PPOCR outperforms TrOCR, which is attributed to PPOCR being a multi-language OCR model, while TrOCR is an English version model. 4) Even though TrOCR has not been trained on multi-language text data, it still achieves decent results, likely because it extracts universal visual structural features. 5) ByT5 extracts char-level features but the performance is inferior to OCR features, because it extracts semantic features rather than character structural features, while T2I models' text rendering", + "bbox": [ + 89, + 537, + 483, + 901 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c241b8495f20c789e14637a4732870b94dd037c3ecd9e058762ced2a4aab2709.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 88, + 630, + 151 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/213ffce2dd7c950b4a730fb1079c8eecdbe8ab5edf2fd70d17dc6203496226e0.jpg", + "image_caption": [ + "Figure 10. Visual examples showing the effect of $\\mathcal{L}_{reward}$ ." + ], + "image_footnote": [], + "bbox": [ + 633, + 88, + 901, + 236 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "capability relies more on character structural features. We present visualization results in Fig. 9. We observe that when using line-level features as a control, the generated text occasionally becomes completely unrecognizable. This suggests that line-level features are insufficient for achieving precise text rendering. Additionally, it is evident that using canny control always introduces stroke artifacts, particularly in smaller texts (as seen in row 3 of Fig. 9). This further demonstrates that canny control is also not an ideal condition for text rendering. In summary, the char-level feature extracted by PPOCR performs optimally and the accuracy is already close to the upper bound, indicating the discriminative char-level visual feature is the key to achieve high text rendering accuracy.", + "bbox": [ + 511, + 263, + 906, + 474 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effectiveness of subject fidelity feedback learning. We calculate the foreground extension ratio (termed as FG Ext. Ratio) by asking human annotators to manually check each generated image whether the foreground subject is incorrectly extended. As demonstrated in Tab. 3, training our model with $\\mathcal{L}_{reward}$ effectively reduces FG Ext. Ratio by $3.4\\%$ , while maintaining subtle variations in other performance metrics. Representative visual examples are presented in Fig. 10. Besides, our model outperforms baseline methods in FG Ext. Ratio (see Tab. 1). These results show the efficacy of our proposed subject fidelity feedback learning approach in mitigating foreground extension artifacts.", + "bbox": [ + 511, + 474, + 908, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 666, + 633, + 681 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The application of image generation in poster creation is often impeded by subpar text rendering and inconsistent subjects. To address these challenges, this paper introduces a novel framework, PosterMaker, which synthesizes aesthetically pleasing product posters with accurate and harmonious texts and contents. Moreover, we reveal that the key underlying successful multilingual text rendering is the construction of robust character-level visual text representations. Additionally, we propose subject fidelity feedback learning to mitigate inconsistencies in subjects. Through extensive experiments, our method demonstrates a significant improvement in both high-precision text generation and subject fidelity. These findings not only advance poster generation but also inspire future research on T2I models.", + "bbox": [ + 511, + 688, + 906, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 91, + 90, + 250, + 107 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This work was supported by the National Nature Science Foundation of China (62425114, 62121002, U23B2028, 62232006, 62272436) and Alibaba Group (Alibaba Research Intern Program).", + "bbox": [ + 89, + 114, + 485, + 176 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 188, + 187, + 204 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Qinsheng Zhang, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, et al. ediff-i: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 3", + "[2] Tingfeng Cao, Junsheng Kong, Xue Zhao, Wenqing Yao, Junwei Ding, Jinhui Zhu, and Jiandong Zhang. Product2img: Prompt-free e-commerce product background generation with diffusion model and self-improved LMM. In Proceedings of the 32nd ACM International Conference on Multimedia, MM 2024, Melbourne, VIC, Australia, 28 October 2024 - 1 November 2024, pages 10774-10783. ACM, 2024. 2, 4", + "[3] Kelvin C. K. Chan, Yang Zhao, Xuhui Jia, Ming-Hsuan Yang, and Huisheng Wang. Improving subject-driven image synthesis with subject-agnostic guidance. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2024, Seattle, WA, USA, June 16-22, 2024, pages 6733-6742. IEEE, 2024. 3", + "[4] Haoxing Chen, Zhuoer Xu, Zhangxuan Gu, Jun Lan, Xing Zheng, Yaohui Li, Changhua Meng, Huijia Zhu, and Weiqiang Wang. Diffuse: Universal text editing diffusion model. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 2, 3, 8", + "[5] Ruidong Chen, Lanjun Wang, Weizhi Nie, Yongdong Zhang, and An-An Liu. Anyscene: Customized image synthesis with composited foreground. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8724-8733, 2024. 2", + "[6] Wenhu Chen, Hexiang Hu, Yandong Li, Nataniel Ruiz, Xuhui Jia, Ming-Wei Chang, and William W. Cohen. Subject-driven text-to-image generation via apprenticeship learning. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 3", + "[7] Alimama Creative. Sd3-controlnet-inpainting. https://huggingface.co/alamama-creative/SD3-Controlnet-Inpainting, 2024.6,7,2,4", + "[8] Alimama Creative. Sd3-controlnet-softedge. https://huggingface.co/alamama-creative/SD3-Controlnet-Softedge, 2024.7, 2", + "[9] Alimama Creative. Ecomxl-controlnet-inpaint. https://huggingface.co/alimama-creative/EcomXL_controlnet_inpaint, 2024.2", + "[10] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Xilin Wei, Songyang Zhang, Haodong" + ], + "bbox": [ + 93, + 213, + 483, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Duan, Maosong Cao, Wenwei Zhang, Yining Li, Hang Yan, Yang Gao, Xinyue Zhang, Wei Li, Jingwen Li, Kai Chen, Conghui He, Xingcheng Zhang, Yu Qiao, Dahua Lin, and Jiaqi Wang. Internlm-xcomposer2: Mastering free-form text-image composition and comprehension in vision-language large model. arXiv preprint arXiv:2401.16420, 2024. 6, 1", + "[11] Zhenbang Du, Wei Feng, Haohan Wang, Yaoyu Li, Jingsen Wang, Jian Li, Zheng Zhang, Jingjing Lv, Xin Zhu, Junsheng Jin, et al. Towards reliable advertising image generation using human feedback. In European Conference on Computer Vision, pages 399-415. Springer, 2024. 2, 4, 3", + "[12] Amir Erfan Eshratifar, Joao V.B. Soares, Kapil Thadani, Shaunak Mishra, Mikhail Kuznetsov, Yueh-Ning Ku, and Paloma De Juan. Salient object-aware background generation using text-guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 7489-7499, 2024. 2, 4", + "[13] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 2, 3, 4, 7, 1", + "[14] Yifan Gao, Jinpeng Lin, Min Zhou, Chuanbin Liu, Hongtao Xie, Tiezheng Ge, and Yuning Jiang. Textpainter: Multimodal text image generation with visual-harmony and text-comprehension for poster design. In Proceedings of the 31st ACM International Conference on Multimedia, pages 7236-7246, 2023. 2", + "[15] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 6626-6637, 2017. 6", + "[16] Peidong Jia, Chenxuan Li, Yuhui Yuan, Zeyu Liu, Yichao Shen, Bohan Chen, Xingru Chen, Yinglin Zheng, Dong Chen, Ji Li, Xiaodong Xie, Shanghang Zhang, and Baining Guo. Cole: A hierarchical generation framework for multilayered and editable graphic design, 2024. 3", + "[17] Lei Ke, Mingqiao Ye, Martin Danelljan, Yifan liu, Yu-Wing Tai, Chi-Keung Tang, and Fisher Yu. Segment anything in high quality. In Advances in Neural Information Processing Systems, pages 29914–29934. Curran Associates, Inc., 2023. 5", + "[18] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dollar, and Ross Girshick. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4015-4026, 2023. 5, 3", + "[19] Chao Li, Chen Jiang, Xiaolong Liu, Jun Zhao, and Guoxin Wang. Joytype: A robust design for multilingual visual text creation. arXiv preprint arXiv:2409.17524, 2024. 3", + "[20] Dongxu Li, Junnan Li, and Steven C. H. Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to" + ], + "bbox": [ + 516, + 92, + 908, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "image generation and editing. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 3", + "[21] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee. Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22511-22521, 2023. 5", + "[22] Zhaochen Li, Fengheng Li, Wei Feng, Honghe Zhu, An Liu, Yaoyu Li, Zheng Zhang, Jingjing Lv, Xin Zhu, Junjie Shen, et al. Planning and rendering: Towards end-to-end product poster generation. arXiv preprint arXiv:2312.08822, 2023. 2", + "[23] Jinpeng Lin, Min Zhou, Ye Ma, Yifan Gao, Chenxi Fei, Yangjian Chen, Zhang Yu, and Tiezheng Ge. Autoposter: A highly automatic and content-aware design system for advertising poster generation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 1250–1260, 2023. 2, 3", + "[24] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 1", + "[25] Zeyu Liu, Weicong Liang, Zhanhao Liang, Chong Luo, Ji Li, Gao Huang, and Yuhui Yuan. Glyph-byt5: A customized text encoder for accurate visual text rendering. In European Conference on Computer Vision, pages 361-377. Springer, 2024. 2, 3", + "[26] Zeyu Liu, Weicong Liang, Yiming Zhao, Bohan Chen, Ji Li, and Yuhui Yuan. Glyph-byt5-v2: A strong aesthetic baseline for accurate multilingual visual text rendering. arXiv preprint arXiv:2406.10208, 2024. 2, 3, 7", + "[27] Zhiying Lu, Chuanbin Liu, Xiaojun Chang, Yongdong Zhang, and Hongtao Xie. Dhvt: Dynamic hybrid vision transformer for small dataset recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2025. 3", + "[28] Jian Ma, Yonglin Deng, Chen Chen, Haonan Lu, and Zhenyu Yang. Glyphdraw2: Automatic generation of complex glyph posters with diffusion models and large language models. arXiv preprint arXiv:2407.02252, 2024. 2, 3, 5, 7, 8", + "[29] Lichen Ma, Tiezhu Yue, Pei Fu, Yujie Zhong, Kai Zhou, Xiaoming Wei, and Jie Hu. Chargen: High accurate character-level visual text generation model with multimodal encoder. arXiv preprint arXiv:2412.17225, 2024. 8", + "[30] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. SDEdit: Guided image synthesis and editing with stochastic differential equations. In International Conference on Learning Representations, 2022. 4", + "[31] ModelScope. https://modelscope.cn/models/damo/cv_convnextTinyOCR-recognition-general_damo/summary,2023.6", + "[32] OpenAI. https://openai.com/index/hello-gpt-4o/, 2024.6", + "[33] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al." + ], + "bbox": [ + 91, + 90, + 483, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 3", + "[34] PaddlePaddle. https://github.com/PaddlePaddle/PaddleOCR, 2023.6,2,3", + "[35] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. 2, 3", + "[36] Tianhao Qi, Shancheng Fang, Yanze Wu, Hongtao Xie, Jiawei Liu, Lang Chen, Qian He, and Yongdong Zhang. Deadiff: An efficient stylization diffusion model with disentangled representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8693-8702, 2024. 3", + "[37] Xuebin Qin, Zichen Zhang, Chenyang Huang, Masood Dehghan, Osmar Zaiane, and Martin Jagersand. U2-net: Going deeper with nested u-structure for salient object detection. page 107404, 2020. 6", + "[38] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and PeterJ. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv: Learning, arXiv: Learning, 2019. 3", + "[39] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 10674-10685. IEEE, 2022. 2", + "[40] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22500-22510, 2023. 2, 3, 6", + "[41] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022. 3", + "[42] Yuxiang Tuo, Wangmeng Xiang, Jun-Yan He, Yifeng Geng, and Xuansong Xie. Anytext: Multilingual visual text generation and editing. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. 2, 3, 4, 5, 6, 7, 8", + "[43] Haohan Wang, Wei Feng, Yaoyu Li, Zheng Zhang, Jingjing Lv, Junjie Shen, Zhangang Lin, and Jingping Shao. Generate e-commerce product background by integrating category commonality and personalized style. In ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2025. 4", + "[44] Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, and Anthony Chen. Instantid: Zero-shot identity-preserving gener" + ], + "bbox": [ + 516, + 90, + 903, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ation in seconds. arXiv preprint arXiv:2401.07519, 2024. 2", + "[45] Shaodong Wang, Yunyang Ge, Liuhan Chen, Haiyang Zhou, Qian Wang, Xinhua Cheng, and Li Yuan. Prompt2poster: Automatically artistic chinese poster creation from prompt only. In Proceedings of the 32nd ACM International Conference on Multimedia, MM 2024, Melbourne, VIC, Australia, 28 October 2024 - 1 November 2024, pages 10716-10724. ACM, 2024. 3", + "[46] Tong Wang, Xiaochao Qu, and Ting Liu. Textmastero: Mastering high-quality scene text editing in diverse languages and styles. arXiv preprint arXiv:2408.10623, 2024. 8", + "[47] Jiazheng Xu, Xiao Liu, Yuchen Wu, Yuxuan Tong, Qinkai Li, Ming Ding, Jie Tang, and Yuxiao Dong. Imagereward: Learning and evaluating human preferences for text-to-image generation. In Advances in Neural Information Processing Systems, pages 15903-15935. Curran Associates, Inc., 2023. 5, 7, 1", + "[48] Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, and Colin Raffel. ByT5: Towards a token-free future with pre-trained byte-to-byte models. Transactions of the Association for Computational Linguistics, 10:291-306, 2022. 7, 3", + "[49] Yukang Yang, Dongnan Gui, Yuhui Yuan, Weicong Liang, Haisong Ding, Han Hu, and Kai Chen. Glyphcontrol: Glyph conditional control for visual text generation. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 2, 3", + "[50] Jingfeng Yao, Xinggang Wang, Shusheng Yang, and Baoyuan Wang. Vitmatte: Boosting image matting with pretrained plain vision transformers. Information Fusion, 103: 102091, 2024. 6", + "[51] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 2", + "[52] Boqiang Zhang, Zuan Gao, Yadong Qu, and Hongtao Xie. How control information influences multilingual text image generation and editing? arXiv preprint arXiv:2407.11502, 2024. 3, 4", + "[53] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 2, 3, 4", + "[54] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In Computer Vision and Pattern Recognition, 2016. 4", + "[55] Yuanzhi Zhu, Jiawei Liu, Feiyu Gao, Wenyu Liu, Xinggang Wang, Peng Wang, Fei Huang, Cong Yao, and Zhibo Yang. Visual text generation in the wild. In European Conference on Computer Vision, pages 89-106. Springer, 2024. 3" + ], + "bbox": [ + 91, + 90, + 482, + 853 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "PosterMaker: Towards High-Quality Product Poster Generation with Accurate Text Rendering", + "text_level": 1, + "bbox": [ + 174, + 85, + 823, + 130 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Supplementary Material", + "bbox": [ + 380, + 141, + 614, + 162 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Due to space limitations, we were unable to present all experimental results in the main text. In this supplementary material, we will give more details about our experiments and present additional results.", + "bbox": [ + 89, + 179, + 483, + 241 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6. Implementation Details", + "text_level": 1, + "bbox": [ + 89, + 256, + 312, + 273 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Training and Inference. We fully follow the settings of SD3 [13]. During training, the denoise loss $\\mathcal{L}_{\\mathrm{denoise}}$ uses simplified flow matching, also known as 0-rectified flow matching loss [24]. In inference, we also use the inference method of flow matching, with 28 inference steps.", + "bbox": [ + 89, + 281, + 483, + 356 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "TextRenderNet and SceneGenNet. TextRenderNet and SceneGenNet have an architecture similar to SD3 [13], composed of multiple MM-DiT Blocks. In our implementation, TextRenderNet consists of 12 layers of MM-DiT Blocks, while SceneGenNet consists of 23 layers of MM-DiT Blocks. The output of the $N_{i}$ -th block of SceneGenNet is first added with the output of the $\\left\\lceil \\frac{N_i}{2} \\right\\rceil$ -th block of TextRenderNet, and then add to the $N_{i}$ -th SD3 block.", + "bbox": [ + 89, + 357, + 483, + 478 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Classifier-Free Guidance. We use CFG during inference, with a CFG scale of 5. Additionally, since the \"prompt\" inputted to TextRenderNet is not a caption but a text representation, the negative one for CFG is set to a zero vector. During training, we randomly drop the text representation to a zero vector with $10\\%$ probability.", + "bbox": [ + 89, + 479, + 483, + 569 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The Setting of $t_1$ in Reward Loss. We follow [47] to train the reward loss at the last 10 inference steps, i.e., we set $t_1$ to 10. Within the range of $t' \\sim [1, t_1]$ , the result of the image $x_0$ obtained by one-step inference is close to the result of complete inference.", + "bbox": [ + 89, + 570, + 483, + 645 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Details about Metric Calculation. Our evaluation benchmark contains samples generated by LLM [10] thus there is no ground truth for these samples. Therefore, we exclude these LLM-generated samples when calculating metrics that depend on ground truth images, i.e., FID metric for all experiments, text accuracy metrics for GT (with and without VAE reconstruction) and results for ablation on different text features.", + "bbox": [ + 89, + 646, + 483, + 766 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "About ground truth for training Foreground Extension Detector. We treat the task of detecting foreground extension as a binary classification problem and ask annotators to manually label the ground truth.", + "bbox": [ + 89, + 768, + 483, + 829 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "7. Baseline Details", + "text_level": 1, + "bbox": [ + 89, + 844, + 248, + 859 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We carefully designed 6 baseline approaches based on existing techniques for comparative analysis. The de", + "bbox": [ + 89, + 869, + 483, + 900 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d6744c5d9caa6bc83f69bb57ecb1d9daafd4f9a7d94f1863a5c86d9575225be2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 176, + 924, + 261 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d83b09b24b46dfeee4c8c54a89e5dbf00003f3dc7e316e42c12abbfdcf799593.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 262, + 815, + 366 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d7751a41607c78a3558de3d5bbf74fffe5b47dff69d5f6c0fb1aec1026926f20.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 369, + 883, + 481 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/efadff1d49693fc289e772cae52adb6dc2a92ba704dca7a62cfbaedaf7a7cadf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 484, + 813, + 551 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/879af9b19d382fa98252e55347ac57b0349226e8c666c4bc696de805ba1b0522.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 553, + 810, + 643 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/235f3cdda602d017551430d3d9e0d84d734901b0f8a30523be01b46932a2c2dd.jpg", + "image_caption": [ + "Figure 11. Detailed illustration of the implementation of the different baseline methods." + ], + "image_footnote": [], + "bbox": [ + 519, + 645, + 805, + 748 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "tails are shown in Fig. 11. For 1) SD3_inpaint_byt5, 2) SD3_canny&inpaint, and 4) AnyText, we fine-tune them on our 160K dataset for the poster generation task. Meanwhile, 3) SD3_inpaint_Anytext is a two-stage inference method. In the first stage, the pre-trained Inpaint ControlNet gener", + "bbox": [ + 511, + 824, + 906, + 900 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "ates the background, and in the second stage, AnyText performs the text editing task, with AnyText also fine-tuned on the 160K dataset specifically for the text editing task. The Inpainting ControlNet is initialized from pre-trained SD3 Inpainting-ControlNet [7] and Canny ControlNet is initialized from [8]. For 5) GlyphDraw2 [28] and 6) Glyph-ByT5-v2 [26] are both the SOTA T2I methods that support multilingual text rendering. However, they neither have open-source pre-trained weights nor support subject input, so we reproduced them on our dataset by adding the pre-trained inpainting controlnet [9] to support the subject input.", + "bbox": [ + 89, + 90, + 483, + 257 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "8. Scalable Training for Text Rendering", + "text_level": 1, + "bbox": [ + 89, + 271, + 426, + 289 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Our proposed two-stage training strategy allows the model to learn two different capabilities (i.e., text rendering and scene generation) separately, enabling more flexibility with distinct datasets for each phase. Recent text rendering methods [4, 25, 26, 42] typically train their models on datasets containing millions of samples. To verify the potential of further improving our performance with more training data, we build a large dataset with 1 million samples and we directly obtain the text annotations with PPOCRv4 [34] without manually annotating. And we use this dataset for the first stage of text rendering training and use the same 160k data for the second stage of scene generation learning. Compared to using 160k data in both of the previous stages, the text sentence accuracy significantly improved by $4.48\\%$ (as shown in Tab. 4), demonstrating that the multistage training strategy is flexible and scalable. However, in the main experiments, we select to report the performance of our model training only on 160k data for fair comparison with the baselines.", + "bbox": [ + 91, + 297, + 483, + 583 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/164a4b0b551f6078712682cff0d2fad4136ca43265769cddadb2be6d386ef572.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Data Size (St.1 & St.2)Sen. ACCNED
160k & 160k93.11%98.21%
1M & 160k97.59%99.38%
", + "bbox": [ + 135, + 594, + 437, + 638 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "9. Discussion on advantages of end-to-end over two-stage methods.", + "text_level": 1, + "bbox": [ + 89, + 686, + 482, + 722 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The main weakness of two-stage methods (first inpaint background, then render text) is their inability to consistently provide a clean background for texts (see Fig. 12, reducing text readability, especially with complex backgrounds. In contrast, one-stage methods generate texts and backgrounds simultaneously, enabling them to create a clean backdrop or underlays that enhance text visibility.", + "bbox": [ + 89, + 731, + 482, + 838 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "10. Text Position Control", + "text_level": 1, + "bbox": [ + 91, + 844, + 303, + 859 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The position control of PosterMaker uses a very straightforward approach (as shown in Fig. 13), mapping the text", + "bbox": [ + 89, + 869, + 483, + 901 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/1d93b0891749cf4e584bd0d8c2a0366ce2a33c0bed4b72a2ba222d7d57d42ce0.jpg", + "image_caption": [ + "Figure 12. Showcases for end-to-end and two-stage methods." + ], + "image_footnote": [], + "bbox": [ + 516, + 83, + 640, + 237 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/7255cb2de3ecc45d951a2703e4df897c410c3a500d2cbea9e40d0630fb329c24.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 82, + 774, + 238 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/70b4fc4b9ea93367b5f8c88d331426b2da2ceffe70c8e631ce18d9d27c1b4f4c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 777, + 82, + 905, + 238 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/7a184ebc870702926c0d50fabb66d44c3e79daadc427cc87722d2cdb992e8db9.jpg", + "table_caption": [ + "Table 4. Quantitative comparison with different data sizes for text rendering training." + ], + "table_footnote": [], + "table_body": "
MethodmIoUIoU@0.5IoU@0.7
Ours84.65%97.18%93.94%
", + "bbox": [ + 568, + 276, + 849, + 306 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 5. Evaluation on text location accuracy.", + "bbox": [ + 573, + 316, + 844, + 330 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "bounding box to cosine position encoding, which is then concatenated with text features and used as the input to TextRenderNet. To demonstrate our method's effectiveness, we evaluate the bounding box IoU (Intersection of Union) metric as follows: 1) we employ OCR model to extract texts from the generated image. 2) For each ground truth text, we identify the best-matched OCR-detected text based on edit distance and then calculate the IoU between their corresponding bounding boxes. We average the IoU score over all the samples to obtain mean IoU (termed mIoU). And we also report IoU@R which indicates the proportion of samples with IoU higher than $R$ . As shown in Tab. 5, our method achieves a high mIoU of $84.65\\%$ and $93.94\\%$ samples have an IoU score higher than 0.7. These promising results prove that our text position control method is simple yet effective.", + "bbox": [ + 511, + 354, + 906, + 598 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/b8c56939fa446816f17a9f9e6141f67f07ea783277973f441764a0c8d18d3221.jpg", + "image_caption": [ + "Figure 13. Detailed illustration of how we construct the position embedding for controlling the text position." + ], + "image_footnote": [], + "bbox": [ + 521, + 613, + 916, + 712 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "11. Comparison Between GlyphByT5 and PosterMaker", + "text_level": 1, + "bbox": [ + 514, + 781, + 906, + 815 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "GlyphByT5 [25, 26] are recently proposed visual text rendering methods that achieve high text rendering accuracy. And we will discuss some differences and internal connections between our PosterMaker and GlyphByT5 on how to control text rendering.", + "bbox": [ + 511, + 824, + 905, + 900 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "- Text position control: GlyphByT5 achieve text position control by modifying the original cross-attention module with their proposed region-wise multi-head cross-attention. In contrast, our PosterMaker encodes the text location directly into the character-level text representation to accomplish text position control. As discussed in Sec. 10, our approach is both simple and effective for precise text location control.", + "bbox": [ + 89, + 90, + 480, + 210 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "- Text content control: both GlyphByT5 and our PosterMaker control the generation of text content by constructing suitable text representation. Specifically, in this work, we claim that the key to achieve accurate text rendering is to extract character-level visual features as the control condition and carefully construct a robust text representation based on off-the-shelf OCR model [34]. In GlyphByT5, the authors also extract character-level text features, but with a textual encoder named ByT5 [48]. Then they propose glyph-alignment pre-training to align these textual features with pre-trained visual encoders DINOv2 [33]. Additionally, they employ box-level contrastive learning with complex augmentations and a hard-mining strategy to enhance character-level discriminativeness. We hypothesize that the primary reason both our method and GlyphByT5 achieve high text rendering accuracy is our shared goal of constructing a robust character-level visual representation. In fact, the ability of GlyphByT5's character-level visual representation is distilled from the pre-trained visual encoder DINOv2, rather than inherited from the pre-trained textual encoder ByT5 itself. In order to verify our hypothesis and insights, we adopt a more direct approach to directly replace the PPOCR encoder in PosterMaker with DINOv2. As shown in Tab. 6, simply extracting character-wise visual features with DINOv2 can also achieve precise text rendering. This result further verifies our claim: the key to precise text rendering is to extract character-level visual features as the control condition.", + "bbox": [ + 89, + 212, + 485, + 650 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/6d387ff5c8a05c8017ac5a154369d50fe2bc44b6362e2366fba384dd7ded98aa.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Text FeatureTypeSen. ACCNED
PPOCR Linevisual feat.38.91%53.86%
PPOCR Charvisual feat.95.15%98.75%
DINOv2 Linevisual feat.4.25%20.59%
DINOv2 Charvisual feat.94.92%98.66%
GT (w/o Rec.)-98.53%99.59%
GT (w/ SD3 Rec.)-98.09%99.36%
", + "bbox": [ + 109, + 659, + 464, + 760 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "12. Visualization of Training Samples", + "text_level": 1, + "bbox": [ + 91, + 829, + 408, + 847 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We present example training images from our dataset in Fig. 17. The dataset predominantly consists of Chinese text, with a small portion of English text. Additionally, it in", + "bbox": [ + 89, + 854, + 482, + 902 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "cludes challenging cases with small-sized text elements.", + "bbox": [ + 511, + 90, + 883, + 107 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "13. The Generalization of Text Representation.", + "text_level": 1, + "bbox": [ + 513, + 119, + 903, + 138 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "PosterMaker is trained primarily on common Chinese data, with only a minimal amount of English data. Despite this, it demonstrates a notable level of generalization, enabling it to generate English, Japanese, and uncommon Chinese characters that were not included in the training set (as shown in Fig. 16). In order to quantitatively evaluate the generalization capability of PosterMaker, we compared the accuracy of different text representations on uncommon characters using a randomly sampled uncommon character benchmark. The results show that our method can also generalize well to some characters that are unseen in the training set. Our performance is inferior to the canny baseline, likely because the canny baseline has been pre-trained on large-scale image data.", + "bbox": [ + 511, + 146, + 906, + 357 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/94f522890ac89db5cbced1996353dcdd9ce9d6d4ee56253dc26bff58a3b35ab3.jpg", + "table_caption": [ + "Table 6. Quantitative comparison using various text features." + ], + "table_footnote": [], + "table_body": "
Text FeatureTypeSen. ACCNED
ByT5textual feat.2.01%10.27%
Cannyimg65.12%74.56%
PPOCR Linevisual feat.8.34%15.84%
PPOCR Charvisual feat.61.54%70.38%
", + "bbox": [ + 544, + 369, + 877, + 443 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "14. Ablation about Foreground Extension Detector", + "text_level": 1, + "bbox": [ + 513, + 523, + 903, + 558 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We collected $20\\mathrm{k}$ manually annotated images to train the foreground extension detector. We randomly selected $10\\%$ samples as a validation set, while using the remaining $90\\%$ for model training. We conduct ablation experiments on different architecture designs of the detector to verify the effectiveness of the proposed architecture. We implement 2 baselines: 1) RFNet [11]: we reimplemented RFNet based on the description in their paper [11]. Since we could not access their depth and saliency detection models, we modified our implementation to only use the product image and generated image as input, excluding the depth and saliency maps. 2) RFNet(SAM): in this baseline, we replace the image encoder used in RFNet with the same SAM[18] im", + "bbox": [ + 511, + 568, + 908, + 765 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/35f87e04b1510ca15f297ae621021c0892cb6465a983b42340d3dcacb4511d73.jpg", + "table_caption": [ + "Table 7. Quantitative comparison of the rendering results of different text features on uncommon characters." + ], + "table_footnote": [], + "table_body": "
MethodPrecisionRecallF1 Score
RFNet (our impl.)76.52%75.52%76.02%
RFNet (SAM)81.35%80.99%81.17%
Ours83.52%84.81%84.16%
", + "bbox": [ + 550, + 787, + 870, + 847 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 8. Evaluation on different architectures of foreground extension detector.", + "bbox": [ + 511, + 854, + 903, + 883 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/acced411c7664b213472fb0ffbfb1a259736f4feed475108678b21a9429b879f.jpg", + "image_caption": [ + "Subject" + ], + "image_footnote": [], + "bbox": [ + 117, + 136, + 214, + 189 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/bd46d644d9015e5b410203395e465ed73688ec000cef69633c7ab447a9200c8f.jpg", + "image_caption": [ + "Generated Image" + ], + "image_footnote": [], + "bbox": [ + 228, + 104, + 352, + 196 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/ffedaf24c35862b032dc99b4a85857797a2e5b2a367e7bd1415005d7a390fe76.jpg", + "image_caption": [ + "Activation Map" + ], + "image_footnote": [], + "bbox": [ + 354, + 104, + 478, + 294 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/19a2fb38e515248ee4b45a3ab38d79f63e2f74ea44d6f7b97387045908418125.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 99, + 238, + 222, + 287 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/af6d3635434938a34b71ba93e460bad374edc1de60303ed9f7c30405bf9390ec.jpg", + "image_caption": [ + "Figure 14. Class activation map of the foreground extension detector." + ], + "image_footnote": [], + "bbox": [ + 228, + 292, + 333, + 388 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/8c9d4514fd6a4a9d1ec9f5ff58b54853662ebc113a4a253a827240e56b0b06da.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 375, + 292, + 460, + 388 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "age encoder used in our method. As summarized in Tab. 8, our proposed foreground extension detector outperforms the baselines by a considerable margin, which demonstrates its effectiveness.", + "bbox": [ + 89, + 452, + 482, + 511 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In Fig. 14, we visualize the class activation map [54] of our proposed foreground extension detector. As shown, we can observe a notably higher activation score in the extended foreground regions compared to other areas. This compelling evidence demonstrates that our detector has effectively learned to discern foreground extension cases, thereby it can serve as a robust reward model for fine-tuning PosterMaker to mitigate the foreground extension problem.", + "bbox": [ + 89, + 513, + 483, + 633 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "15. Ablation about SceneGenNet", + "text_level": 1, + "bbox": [ + 91, + 646, + 369, + 662 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "SceneGenNet enables our model to perform background inpainting while preserve the subject so we cannot directly remove it. We replace it by SDEdit [30] to achieve inpainting. As the results shown in Sec. 15, replacing it results in a significant drop of performance.", + "bbox": [ + 89, + 672, + 482, + 748 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/6057f34d7b5bcdecb9710dc1ea7043989105c23d15c139875fc1f6224f4cc489.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑
Ours w/o SceneGenNet90.53%97.95%79.4426.67
Ours93.36%98.39%65.3527.04
", + "bbox": [ + 94, + 758, + 480, + 797 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "16. Discussion on the impact of the test set size.", + "text_level": 1, + "bbox": [ + 91, + 844, + 482, + 862 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "To ensure a fairer comparison between PosterMaker and the baseline methods, we expanded the test set to 5,000 sam", + "bbox": [ + 89, + 869, + 483, + 900 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "plies(10x the previous PosterBenchmark). The results are shown in Tab. 10, and the experimental conclusions remain consistent with the previous test set. Due to the calculation principle of the FID metric, increasing the test size leads to a significant decrease in the FID scores for all methods, but still maintains the same conclusion.", + "bbox": [ + 511, + 90, + 905, + 181 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/c0827822b7a1d65a2caaebda643136502f5b4bdb61348e9306f863b50c1f13e3.jpg", + "table_caption": [ + "Table 9. Comparison between SceneGenNet and SDEdit" + ], + "table_footnote": [], + "table_body": "
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑
Glyph-ByT5-v267.87%86.23%20.3721.08
SD3_canny&inpaint74.49%88.78%17.9120.79
GlyphDraw283.81%96.49%15.2420.67
Ours90.20%97.58%13.3621.36
", + "bbox": [ + 517, + 191, + 903, + 256 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 10. Comparison with baseline methods on 5,000 test samples.", + "bbox": [ + 511, + 262, + 903, + 290 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "17. Discussion on the meaningless texts generated outside target position.", + "text_level": 1, + "bbox": [ + 513, + 315, + 903, + 349 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In our early experimental attempts about text rendering in poster generation, we found that the trained model sometimes generates meaningless texts outside the target area of the text, which will seriously affect the aesthetics. We conjecture that the main reason is that the ground truth images sometimes contain text outside the specified position. To solve this problem, we masked out the extra text during training and it solved most cases.", + "bbox": [ + 511, + 358, + 905, + 478 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Specifically, SceneGenNet is initialized from pre-trained SD3 Inpainting-Controlnet [7]. In the second stage of training, we simultaneously mask out the regions of the untrained texts (usually those that are too small or just logos) both in the subject mask input to SceneGenNet and in the ground truth image used for loss calculation(as shown in Fig. 15). It is worth noting that although these small texts and logos are not included in the training, we have also annotated them to address the aforementioned issues. Finally, this technique makes the loss corresponding to the masked-out regions very close to zero so that the model will not learn these meaningless texts.", + "bbox": [ + 511, + 479, + 906, + 660 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/ac56ce7a14fe06acfd1f9d373213ba03b55a7a52cc249287d4914e6f752251c1.jpg", + "image_caption": [ + "Figure 15. Example of our solution technique for meaningless texts and logos that generated outside target position." + ], + "image_footnote": [], + "bbox": [ + 535, + 676, + 650, + 800 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/773f2b5721a7dfd43745f510e4aa2a780733a1f155cb049f71c66cfa0f62e74a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 676, + 766, + 800 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/62ab0255279e0013a7a9e0ffe8d85575ce34f5e5f9e399dfd33d8dbdb10447d6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 769, + 676, + 885, + 800 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/5e80a7606f80a52667546bcd4d7872be923bd45b4c53b620e658b4b3e64d671f.jpg", + "image_caption": [ + "Figure 16. Visualization results on texts in English, Japanese, and uncommon Chinese characters." + ], + "image_footnote": [], + "bbox": [ + 91, + 87, + 251, + 212 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/74039fd0e173fe7d59038a7f229de2e88e5cbcf1d43d3440544e155caba17099.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 251, + 88, + 413, + 212 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/4a2e2c8c0a9d1d6a8fb03c52c4c0a557c6b5d165113dc13020b29cdf24ce825f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 413, + 88, + 573, + 212 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/6260cbe319bf3452d748f869daf3b8586b2edf5abe2fe1e22740950281fb35db.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 573, + 88, + 684, + 212 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/5377a14a7ca31602338199f71cf74f38cce1d22b831db83a4f7e315ec63825c7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 684, + 88, + 795, + 212 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/285d0c0eb47d0b7c3ec34802cd6ff44913974f5624cce623b3c1b4d5858a5444.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 795, + 88, + 903, + 212 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/5ec602cf40fe1938ef2768c493fab20a46c6a41971b9cf7f91249bab5e7ac082.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 114, + 257, + 305, + 406 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/484ea3a158871455f056f770a790db31f20761ccaec686d36278db567da63f36.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 308, + 262, + 496, + 406 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/5a04c3f6873570243f276bd503122b3e1e26e195d7e7a21ae5c52320d5f8336e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 262, + 687, + 406 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/ca0895ab89b57898376a5b8d1f4963a9a9aea4e850914a42fd1b3fa9e73a9d7b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 261, + 880, + 406 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/d19d64c86c9f00cf6a484d8797879b816195a46db8a3fb1ba3ee4edbd33c9c0c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 116, + 417, + 302, + 604 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/f65aecae76910308dbf4fdfd3db0dfa49557ea379c1fb2f97b2a0d19123d85b8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 308, + 417, + 493, + 619 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/c9aac91754c4899933bcf0e740e1bfc3817b1d9f38f73587ff56903fd9479333.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 417, + 687, + 625 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/d13c740981bc7e2e64e6ebf40a3f42a41a3a67a6706cdbdd1ae3d96c325cc0f1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 417, + 879, + 627 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/f1edcbe6ebae9de62998064f33c802d0608f55c5748245719098ae0035f7178c.jpg", + "image_caption": [ + "Figure 17. Visualization of ground truth for some samples in the dataset." + ], + "image_footnote": [], + "bbox": [ + 114, + 635, + 305, + 823 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/1f80e0402f89e1240e016e8362ce19565d42bf67373ccd3cffd4465e9d757ab9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 310, + 637, + 491, + 839 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/95f067b8f9adc2c7f64f770fe86a8a0c33167d2b1220013b4f0653e183b12bdd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 636, + 684, + 843 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/30723d6c96c7f0e3a399f301dfdc4277f4fd49d43e6db8e55aa926072df85f2c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 692, + 636, + 877, + 843 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_model.json b/data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_model.json new file mode 100644 index 0000000000000000000000000000000000000000..28411a36c8f76ad6a770a5aeb25bf09220001809 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_model.json @@ -0,0 +1,3719 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.269, + 0.061, + 0.702 + ], + "angle": 270, + "content": "arXiv:2504.06632v1 [cs.CV] 9 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.175, + 0.131, + 0.825, + 0.177 + ], + "angle": 0, + "content": "PosterMaker: Towards High-Quality Product Poster Generation with Accurate Text Rendering" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.203, + 0.735, + 0.221 + ], + "angle": 0, + "content": "Yifan \\(\\mathrm{Gao}^{1,2*^{\\dagger}}\\), Zihang Lin\\(^{2*}\\), Chuanbin Liu\\(^{1\\ddagger}\\), Min Zhou\\(^{2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.329, + 0.222, + 0.667, + 0.24 + ], + "angle": 0, + "content": "Tiezheng Ge², Bo Zheng², Hongtao Xie¹" + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.24, + 0.835, + 0.258 + ], + "angle": 0, + "content": "1University of Science and Technology of China 2Taubao & Tmall Group of Alibaba" + }, + { + "type": "text", + "bbox": [ + 0.275, + 0.26, + 0.723, + 0.275 + ], + "angle": 0, + "content": "eafn@mail.ustc.edu.cn {liucb92, htxie}@ustc.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.278, + 0.782, + 0.293 + ], + "angle": 0, + "content": "{linzihang.lzh, yunqi.zm, tiezheng.gtz, bozheng}@alibaba-inc.com" + }, + { + "type": "text", + "bbox": [ + 0.297, + 0.296, + 0.697, + 0.31 + ], + "angle": 0, + "content": "Project page: https://poster-maker.github.io" + }, + { + "type": "title", + "bbox": [ + 0.165, + 0.312, + 0.219, + 0.327 + ], + "angle": 0, + "content": "Prompt" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.336, + 0.258, + 0.395 + ], + "angle": 0, + "content": "The box of fish oil supplements is placed on a wooden table, with a background of a serene ocean and clear sky, symbolizing purity and the natural source of the product" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.429, + 0.258, + 0.499 + ], + "angle": 0, + "content": "The subject rests on a smooth, dark wooden table, surrounded by a few scattered leaves and delicate flowers, with a serene garden scene complete with blooming flowers and lush greenery in the background." + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.312, + 0.333, + 0.327 + ], + "angle": 0, + "content": "Subject" + }, + { + "type": "image", + "bbox": [ + 0.262, + 0.32, + 0.333, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.262, + 0.446, + 0.349, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.385, + 0.313, + 0.423, + 0.325 + ], + "angle": 0, + "content": "Text" + }, + { + "type": "image", + "bbox": [ + 0.35, + 0.325, + 0.462, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.35, + 0.414, + 0.462, + 0.527 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.489, + 0.313, + 0.537, + 0.325 + ], + "angle": 0, + "content": "Poster" + }, + { + "type": "image", + "bbox": [ + 0.464, + 0.325, + 0.576, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.464, + 0.413, + 0.576, + 0.527 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.651, + 0.312, + 0.796, + 0.326 + ], + "angle": 0, + "content": "Previous: two stage" + }, + { + "type": "image", + "bbox": [ + 0.583, + 0.326, + 0.774, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.777, + 0.326, + 0.868, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.659, + 0.42, + 0.78, + 0.434 + ], + "angle": 0, + "content": "Ours: end to end" + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.456, + 0.755, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.776, + 0.438, + 0.868, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.323, + 0.515, + 0.339, + 0.528 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.528, + 0.294, + 0.736 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.297, + 0.53, + 0.429, + 0.633 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.633, + 0.428, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.43, + 0.53, + 0.562, + 0.633 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.43, + 0.636, + 0.561, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.562, + 0.53, + 0.697, + 0.633 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.562, + 0.636, + 0.697, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.53, + 0.877, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.489, + 0.735, + 0.504, + 0.745 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.743, + 0.907, + 0.798 + ], + "angle": 0, + "content": "Figure 1. (a) Definition of the advertising product poster generation task. The input includes the prompt, subject image, and the texts to be rendered with their layouts. The output is the poster image. (b) The comparison of our method with the previous method. PosterMaker generates posters end-to-end, while previous methods first generate poster backgrounds and then render texts. (c) Visualization results demonstrate that PosterMaker can generate harmonious and aesthetically pleasing posters with accurate texts and maintain subject fidelity." + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.807, + 0.327, + 0.823 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.834, + 0.483, + 0.864 + ], + "angle": 0, + "content": "Product posters, which integrate subject, scene, and text, are crucial promotional tools for attracting customers. Cre" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.812, + 0.907, + 0.888 + ], + "angle": 0, + "content": "ating such posters using modern image generation methods is valuable, while the main challenge lies in accurately rendering text, especially for complex writing systems like Chinese, which contains over 10,000 individual characters. In this work, we identify the key to precise text rendering" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.876, + 0.363, + 0.889 + ], + "angle": 0, + "content": "* Equal contribution. ‡ Corresponding author." + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.889, + 0.395, + 0.901 + ], + "angle": 0, + "content": "† Work done during the internship at Alibaba Group." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.876, + 0.395, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.332 + ], + "angle": 0, + "content": "as constructing a character-discriminative visual feature as a control signal. Based on this insight, we propose a robust character-wise representation as control and we develop TextRenderNet, which achieves a high text rendering accuracy of over \\(90\\%\\). Another challenge in poster generation is maintaining the fidelity of user-specific products. We address this by introducing SceneGenNet, an inpainting-based model, and propose subject fidelity feedback learning to further enhance fidelity. Based on TextRenderNet and SceneGenNet, we present PosterMaker, an end-to-end generation framework. To optimize PosterMaker efficiently, we implement a two-stage training strategy that decouples text rendering and background generation learning. Experimental results show that PosterMaker outperforms existing baselines by a remarkable margin, which demonstrates its effectiveness." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.346, + 0.223, + 0.361 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.371, + 0.483, + 0.658 + ], + "angle": 0, + "content": "Product posters, which showcase items for sale within well-chosen background scenes and include descriptive text, play a vital role in e-commerce advertising by capturing customers' attention and boosting sales. Creating such posters necessitates photographing the product in carefully selected environments that highlight its features, as well as thoughtfully choosing text colors and fonts to ensure that the text is appealing, legible, and harmonious with the background. This process can be quite expensive. With the significant advancements in large-scale text-to-image (T2I) models [13, 35, 39], synthesizing such product posters with image generation models attracts increasing attention. In this paper, we focus on the product poster generation task. Specifically, given a prompt describing the background scene, the foreground image of the user-specified subject and some texts together with their layouts, we aim to develop a model to generate the subject into the desired scene background and accurately render the text in an end-to-end manner (as shown in Fig. 1 (a))." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.66, + 0.483, + 0.856 + ], + "angle": 0, + "content": "A straightforward solution for this task is to first generate the subject into the desired scene [2, 11, 40], and then predict the text attributes (such as color and font) [14, 23] and render them on the image. However, such two-stage approach suffers from disharmony between the text and the poster background(as shown in Fig. 2 (b)). And collecting training data is also challenging since the text attributes, especially the text font, are difficult to extract from the poster. Another solution is learning to generate the poster using a per-pixel synthesis approach, which can benefit from directly learning the distribution of professionally designed posters. We focus on such one-stage solution. The main challenge is how to ensure the text rendering accuracy." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Many recent works [13, 25, 42, 49] have been proposed to improve the text rendering accuracy for large diffusion models. Great progress has been made and some" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.273 + ], + "angle": 0, + "content": "recent work can achieve high rendering accuracy for English. However, for non-Latin languages like Chinese, one of the most widely spoken languages, achieving high rendering accuracy remains challenging. This difficulty stems from the existence of over 10,000 characters, with Chinese characters characterized by complex and diverse stroke patterns, making it extremely difficult to train a model to memorize the rendering of each individual character. Recent studies [4, 28, 42] have focused on extracting visual features of text as control signals. Typically, these approaches render text lines into glyph images and extract line-level text visual features to guide generation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.276, + 0.905, + 0.548 + ], + "angle": 0, + "content": "Nevertheless, line-level visual features often lack the discriminative power to capture character-level visual nuances. To address this limitation, GlyphByT5 [25, 26] introduced a box-level contrastive loss with sophisticated glyph augmentation strategies to enhance character-level discriminativeness, achieving promising results. In this paper, we point out that the key to high-accuracy text rendering lies in constructing character-discriminative visual features as control signals. Specifically, we render each character as a glyph image and extract visual features via a visual encoder. These features are then concatenated with positional embeddings to form a character-level representation. Then we propose TextRenderNet, an SD3 [13] controlnet-like [53] architecture that takes the character-level representation as the control signal to render visual text. Our experiments demonstrate that the proposed character-level representation is effectively capable of achieving accurate text rendering." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.551, + 0.905, + 0.838 + ], + "angle": 0, + "content": "In the task of poster generation, another important thing is to generate the user-specific subject into a desired scene while keeping high subject fidelity. Recent subject-driven controllable generation methods [40, 44, 51] can synthesize such images, but they still cannot ensure that the user-specified subject is completely consistent in the generated details (e.g., the logo on the product may be inaccurately generated), which could potentially mislead customers. Therefore, we follow poster generation methods [5, 11, 22] to address this task via introducing an inpainting-based module named SceneGenNet. However, we found that even using inpainting methods, subject consistency is not always achieved as the inpainting model sometimes extends the subject shape (as shown in Fig. 2 (a)). Similar phenomenon is also observed in [11, 12]. To address this issue, we elaboratively develop a detector to detect the foreground extension cases. Then we employ the detector as a reward model to train the SceneGenNet via feedback learning for further improving subject fidelity." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.905, + 0.901 + ], + "angle": 0, + "content": "Combining the proposed TextRenderNet and SceneGenNet, we develop a framework named PosterMaker that can synthesize the product poster in an end-to-end manner. To efficiently optimize PosterMaker, we introduce a two-stage" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.134, + 0.09, + 0.252, + 0.183 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.151, + 0.183, + 0.232, + 0.19 + ], + "angle": 0, + "content": "User-Specified Texts" + }, + { + "type": "image", + "bbox": [ + 0.255, + 0.091, + 0.374, + 0.183 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.266, + 0.183, + 0.351, + 0.191 + ], + "angle": 0, + "content": "User-Specified Subject" + }, + { + "type": "image", + "bbox": [ + 0.376, + 0.091, + 0.497, + 0.183 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.391, + 0.183, + 0.488, + 0.191 + ], + "angle": 0, + "content": "(a) Foreground Extension" + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.091, + 0.618, + 0.183 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.183, + 0.61, + 0.191 + ], + "angle": 0, + "content": "(b)Text-Scene Disharmony" + }, + { + "type": "image", + "bbox": [ + 0.62, + 0.091, + 0.74, + 0.183 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.633, + 0.183, + 0.724, + 0.191 + ], + "angle": 0, + "content": "(c) Poor Text Rendering" + }, + { + "type": "image", + "bbox": [ + 0.743, + 0.091, + 0.864, + 0.183 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.781, + 0.183, + 0.822, + 0.19 + ], + "angle": 0, + "content": "Our Result" + }, + { + "type": "image_caption", + "bbox": [ + 0.13, + 0.193, + 0.865, + 0.207 + ], + "angle": 0, + "content": "Figure 2. The illustration of the three challenges faced by poster generation, which seriously hinder the practical application." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.216, + 0.484, + 0.322 + ], + "angle": 0, + "content": "training strategy to separately train TextRenderNet and SceneGenNet. This training strategy decouples the learning of text rendering and background image generation, thus TextRenderNet and SceneGenNet can focus on their specific tasks. Qualitative results (as shown in Fig. 1 (c)) demonstrate our training strategy is effective for training PosterMaker and it achieves promising poster generation results." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.324, + 0.427, + 0.337 + ], + "angle": 0, + "content": "To summarize, our contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.34, + 0.483, + 0.415 + ], + "angle": 0, + "content": "- We proposed a novel framework named PosterMaker, which mainly consists of a TextRenderNet and a SceneGenNet. With a two-stage training strategy, PosterMaker can synthesis aesthetically product posters with texts accurately and harmoniously rendered on it." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.416, + 0.483, + 0.49 + ], + "angle": 0, + "content": "- We reveal the core of achieving accurate Chinese text rendering is to construct a robust character-level text representation as the control condition. These findings can inspire future research on improving the text rendering abilities of T2I models." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.491, + 0.484, + 0.537 + ], + "angle": 0, + "content": "- We improve the subject fidelity via subject fidelity feedback learning, which is shown effective in addressing the subject inconsistency issue." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.34, + 0.484, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.548, + 0.233, + 0.563 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.575, + 0.268, + 0.589 + ], + "angle": 0, + "content": "2.1. Poster Generation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.599, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Generating posters involves combining various elements like a subject image, a background scene image, and text to ensure the subject and text are prominently and accurately displayed while maintaining an appealing look. Automating this process is quite complex and challenging. Methods like AutoPoster [23], Prompt2Poster [45], and COLE [16] break it down into stages: creating images and layout, predicting the visual properties of text, and rendering the poster. These approaches have several steps and often struggle to precisely obtain all the necessary visual attributes like font and color gradients. With the emergence of more advanced generative models [35], methods like JoyType [19], Glyphbyt5 [25], and GlyphDraw2 [28] can directly generate the image and text together at the pixel level based on the poster prompt, text content, and layout. This more streamlined approach can leverage more readily available poster pixel data for training, but there is still room for improvement in terms of the overall poster cohesion and text accuracy. Our method is also a one-stage, direct pixel-level generation approach that simultaneously creates the image and" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.216, + 0.907, + 0.293 + ], + "angle": 0, + "content": "text. However, our focus is on generating posters for a given product subject, where the input includes the subject image, prompt, text content, and layout. In addition to considering text rendering accuracy and overall poster harmony, we also need to maintain the fidelity of the product." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.301, + 0.722, + 0.318 + ], + "angle": 0, + "content": "2.2. Visual Text Rendering" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.324, + 0.908, + 0.763 + ], + "angle": 0, + "content": "Recently, text-to-image (T2I) models [1, 13, 41] have made significant strides in enhancing English text rendering by introducing stronger text encoders, such as T5 [38]. However, multilingual text image generation still faces significant challenges due to the large number of non-Latin characters and complex stroke structures. Early work [49] has explored the ControlNet-based method [53], using low-level visual images such as glyph images as the control signal for text image generation. However, glyph images are easily affected by text size and shape, especially complex stroke details. Besides, some recent works [4, 27, 28, 42, 52, 55] utilize more robust visual features, such as line-level OCR features as control conditions to further improve the text accuracy. But the line-level visual features still perform poorly in representing stroke details for each character. To address this issue, GlyphByT5 [25, 26] proposes a method with box-level contrastive learning to align the text features extracted from the language model with the features extracted from the visual encoder. To effectively learn such alignment, GlyphByT5 relies on collecting massive amounts of data and developing complex data augmentation strategies for the alignment pre-training, which lacks flexibility. In contrast, in this paper, we reveal that the key to high-accuracy text rendering lies in constructing discriminative character-level visual features. Thus we propose a plug-and-play and robust character-level text representation derived from off-the-shelf OCR encoders, which can accurately represent the visual structure of the text without additional training and enable precise text rendering." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.772, + 0.83, + 0.789 + ], + "angle": 0, + "content": "2.3. Subject-Preserved Scene Generation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.796, + 0.906, + 0.901 + ], + "angle": 0, + "content": "To create a scene image with a product subject while ensuring subject fidelity, two main methods are commonly used. One is the subject-driven method [3, 6, 20, 36, 40], which adjusts the position, angle and lighting of the subject based on the prompt to create a harmonious image. However, it often struggles to preserve the significant features of the subject. The other utilizes inpainting-based background com" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.1, + 0.093, + 0.899, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.265, + 0.905, + 0.294 + ], + "angle": 0, + "content": "Figure 3. The framework of the PosterMaker, which is based on the SD3. To precisely generate multilingual texts and create aesthetically pleasing poster scenes, TextRenderNet and SenceGenNet are introduced, whose outputs are used as control conditions added to the SD3." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.302, + 0.484, + 0.422 + ], + "angle": 0, + "content": "pletion techniques [2, 11, 43]. It only generates the non-subject areas of an image and naturally keeps consistency in the original subject area. But it sometimes extends the foreground subject [11, 12], such as adding an extra handle to a cup, which also reduces subject fidelity. To maximize subject fidelity, our method uses background completion and a reward model to determine whether the foreground extension occurred, thereby enhancing subject fidelity." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.428, + 0.182, + 0.443 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.452, + 0.295, + 0.467 + ], + "angle": 0, + "content": "3.1. Problem Formulation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.476, + 0.484, + 0.566 + ], + "angle": 0, + "content": "This paper focuses on the creation of product posters, which typically consist of multiple elements such as text, subjects, and scenes, as illustrated in Fig. 1 (a). The central challenge of this task is to generate these elements accurately and harmoniously, offering both research and practical applications. The task is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.574, + 0.482, + 0.59 + ], + "angle": 0, + "content": "\\[\nI _ {g} = f \\left(I _ {s}, M _ {s}, T, P\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.598, + 0.484, + 0.688 + ], + "angle": 0, + "content": "where \\( I_{g} \\) denotes the generated poster image, \\( I_{s} \\) represents the subject image, and \\( M_{s} \\) is the subject mask. The variable \\( T \\) signifies the content and the position of text and \\( P \\) is the prompt describing the background scene. Subsequent sections will detail the design of PosterMaker, and our proposed solution to this task." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.697, + 0.219, + 0.71 + ], + "angle": 0, + "content": "3.2. Framework" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.484, + 0.901 + ], + "angle": 0, + "content": "As shown in Fig. 3, PosterMaker is developed based on Stable Diffusion 3 (SD3) [13], which contains a strong VAE for reconstructing the image details like text stroke. And we propose two modules, i.e., TextRenderNet and SceneGenNet, to address the poster generation task. TextRenderNet is specifically designed to learn visual text rendering, taking character-level visual text representations as input to achieve precise and controllable text rendering. SceneGenNet, on the other hand, accepts a masked image (indicating which content should remain unchanged) and a prompt, learning to generate the foreground subject within the desired scene described by the prompt. Both TextRenderNet" + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.299, + 0.895, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.459, + 0.905, + 0.486 + ], + "angle": 0, + "content": "Figure 4. The details of TextRenderNet and SceneGenNet, showcasing their model architectures and their interactions with SD3." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.496, + 0.907, + 0.799 + ], + "angle": 0, + "content": "and SceneGenNet are grounded in a ControlNet-like [52] architecture derived from SD3 and their architectures are detailed in Fig. 4. They share the same internal structure, comprising several cascaded MM-DiT blocks [13], with weights copied from the base model for initialization. The output of each MM-DiT block is added to the corresponding block of the base model after passing through a zero convolution layer [53]. The key distinction between the two modules lies in their input configurations. SceneGenNet takes the prompt as input to the text condition branch, and for the visual branch, the input is derived by the latent feature at timestep \\( t \\), the subject mask, and the masked latent to preserve the foreground area. In contrast, TextRenderNet receives text representations (detailed in the next section) in the text condition branch for text rendering. An adapter, consisting of a linear layer and layer normalization, adjusts the feature dimensions of these text representations before they are input to TextRenderNet. The outputs of each block in TextRenderNet and SceneGenNet are directly added to the corresponding block outputs of the SD3 base model." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.805, + 0.905, + 0.838 + ], + "angle": 0, + "content": "3.3. Character-level Visual Representation for Precise Text Rendering" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.906, + 0.902 + ], + "angle": 0, + "content": "Recently, some works have explored multilingual visual text generation. Among them, a promising approach is based on ControlNet-like methods [42], which utilize both glyph images and line-level OCR features as conditions." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.09, + 0.484, + 0.153 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.154, + 0.483, + 0.197 + ], + "angle": 0, + "content": "Figure 5. The distinction between the multilingual character-level text representation we proposed and the line-level methods of previous works like AnyText [42] and GlyphDraw2 [28]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.204, + 0.484, + 0.325 + ], + "angle": 0, + "content": "However, this control information cannot accurately represent characters: 1) glyph images are easily affected by text size and shape, making them less robust. 2) line-level visual features lack fine-grained stroke features and are limited by the OCR model's poor capability to recognize long texts. To address these challenges, this paper proposes a plug-and-play and robust character-level text representation, where each character is precisely represented by one token." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.325, + 0.484, + 0.401 + ], + "angle": 0, + "content": "Specifically, the text \\(C\\) has \\(n\\) characters. For each character \\(c_{i}\\), its feature is separately extracted by a pre-trained OCR encoder \\(f_{v}\\) and then averaged and pooled to obtain a compact character representation vector \\(r_{c_i} \\in \\mathbb{R}^c\\). Thus, the character-level text representation is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.203, + 0.406, + 0.482, + 0.423 + ], + "angle": 0, + "content": "\\[\nr _ {c i} = \\operatorname {a v g p o o l} \\left(f _ {v} \\left(I _ {c i}\\right)\\right), \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.214, + 0.424, + 0.482, + 0.442 + ], + "angle": 0, + "content": "\\[\nR _ {c} = \\left[ r _ {c _ {1}}, r _ {c _ {2}}, \\dots , r _ {c _ {n}} \\right], \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.453, + 0.483, + 0.483 + ], + "angle": 0, + "content": "where \\(I_{c_i}\\) is the \\(i\\)-th character image rendered in a fixed font, and \\(R_{c} \\in \\mathbb{R}^{n \\times c}\\) is the char-level text representation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.484, + 0.484, + 0.619 + ], + "angle": 0, + "content": "As shown in Fig. 5, compared to previous methods, our key difference is extracting representations from character glyph images. This enables the model to perceive character stroke structures and achieve high text accuracy. Additionally, since the number of characters is fixed, we can pre-extract the representations of each character and store them in a dictionary, eliminating the need for online rendering and feature extraction. This significantly simplifies the training and inference pipeline." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.62, + 0.484, + 0.755 + ], + "angle": 0, + "content": "Finally, this text representation lacks order and positional information. Thus, the character order encoding \\( P_{rank} \\) is introduced to represent the order of characters in the text, which is implemented through a sinusoidal position encoding of the char order. Besides, inspired by GLIGEN [21], the text position coordinates are mapped to sinusoidal position encoding \\( P_{bbox} \\) to control the position of the text. Then we concatenate \\( P_{rank} \\), \\( P_{bbox} \\) and \\( R_c \\) along the feature dimension to construct the final text representation." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.761, + 0.336, + 0.777 + ], + "angle": 0, + "content": "3.4. Improving Subject Fidelity" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.484, + 0.901 + ], + "angle": 0, + "content": "In the task of generating product posters, it is crucial to maintain subject fidelity, i.e., ensuring that the subject in the generated poster remains consistent with the user-specified subject. To achieve this goal, we employ SceneGenNet to perform background inpainting, which is trained to precisely preserve the foreground subject and only inpaint the background according to the prompt. However, inpainting-based models sometimes extend the foreground subject into" + }, + { + "type": "image", + "bbox": [ + 0.525, + 0.088, + 0.887, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.516, + 0.215, + 0.903, + 0.228 + ], + "angle": 0, + "content": "Figure 6. The model details of the foreground extension detector." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.235, + 0.687, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.236, + 0.9, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.335, + 0.905, + 0.362 + ], + "angle": 0, + "content": "Figure 7. The illustration of our two-stage training strategy for efficiently optimizing PosterMaker." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.373, + 0.906, + 0.449 + ], + "angle": 0, + "content": "another subject (as shown in Fig. 2 (b)) thereby compromising subject fidelity. We refer to this as \"foreground extension\". To mitigate this issue, we develop a model to detect foreground extension and employ it as a reward model to fine-tune PosterMaker to improve subject fidelity." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.45, + 0.906, + 0.646 + ], + "angle": 0, + "content": "Foreground Extension Detector. We develop the foreground extension detector \\( S_{\\theta} \\) based on HQ-SAM [17]. As shown in Fig. 6, we input the generated image \\( I_{g} \\) to SAM [18] image encoder. The subject mask \\( M_{s} \\) and box \\( B_{s} \\) are provided as mask prompt and box prompt, respectively, to the HQ-SAM decoder to obtain an intermediate mask \\( M_{i} \\). Next, we concatenate the image features extracted from SAM encoder with \\( M_{s} \\), \\( M_{i} \\) and \\( M_{s} - M_{i} \\) at the channel dimension. The concatenated features are processed through convolutional layers and MLP layers to predict whether the foreground has been extended in the generated image. We collected 20k manually annotated images to train the foreground extension detector \\( S_{\\theta} \\)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.648, + 0.907, + 0.844 + ], + "angle": 0, + "content": "Subject Fidelity Feedback Learning. The foreground extension detector \\( S_{\\theta} \\), after the offline training, is used as a reward model to supervise PosterMaker to improve subject fidelity. Specifically, assuming the reverse process has a total of \\( T' \\) steps, we follow ReFL [47] to first sample \\( z_{T'} \\sim \\mathcal{N}(0,1) \\) and after \\( T' - t' \\) steps of inference \\( (z_{T'} \\rightarrow z_{T'-1} \\rightarrow \\dots \\rightarrow z_{t'}) \\), we obtain \\( z_{t'} \\), where \\( t' \\sim [1, t_1] \\). Then, we directly perform a one-step inference \\( z_{t'} \\rightarrow z_0 \\) to accelerate the reverse process. Furthermore, \\( z_0 \\) is decoded to the generated image \\( x_0 \\). The detector \\( S_{\\theta} \\) predicts the foreground extension score for \\( x_0 \\), and this score is used as the reward loss to optimize the generator \\( G_{\\phi} \\) (i.e., PostMaker). The reward loss is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.518, + 0.85, + 0.905, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\text {r e w a r d}} (\\phi) = - \\mathbb {E} _ {(x, c, m) \\sim \\mathcal {D} _ {\\text {t r a i n}}, t ^ {\\prime} \\sim [ 1, t _ {1} ], z _ {T ^ {\\prime}} \\sim \\mathcal {N} (0, 1)} \\\\ \\log \\sigma \\left(1 - S _ {\\theta} \\left(G _ {\\phi} \\left(z _ {T ^ {\\prime}}, x, c, m, t ^ {\\prime}\\right), m\\right)\\right), \\tag {4} \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.098, + 0.09, + 0.905, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.375, + 0.907, + 0.403 + ], + "angle": 0, + "content": "Figure 8. Qualitative comparison with different methods. Best viewed on Screen. To aid comprehension, Chinese text lines in the image are translated into English and annotated using corresponding colors." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.408, + 0.484, + 0.485 + ], + "angle": 0, + "content": "where \\( x, c, m \\) sampled from the train data \\( \\mathcal{D}_{\\mathrm{train}} \\), represent the subject image, control conditions, and subject mask respectively. To avoid overfitting, we don't calculate reward loss for the cases where the foreground extension score is below 0.3. Our total training loss is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.195, + 0.502, + 0.483, + 0.518 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {d e n o i s e}} + \\lambda \\mathcal {L} _ {\\text {r e w a r d}}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.536, + 0.483, + 0.566 + ], + "angle": 0, + "content": "where \\(\\lambda\\) is the hyperparameter to adjust the weight of reward loss and the denoise loss." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.592, + 0.265, + 0.609 + ], + "angle": 0, + "content": "3.5. Training Strategy" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.618, + 0.484, + 0.906 + ], + "angle": 0, + "content": "To efficiently train PosterMaker, this paper introduces a two-stage training strategy, as shown in Fig. 7, aimed at decoupling the learning for text rendering and background image generation. Specifically, in the first stage, the training task is local text editing. We freeze SceneGenNet and only the TextRenderNet and adapter are optimized. Since we initialize SceneGenNet with pre-trained weights of inpainting-controlnet [7], it can fill the local background well thus TextRenderNet can focus on learning text generation. In the second stage, the training task is subject-based text-to-image generation. Here we froze TextRenderNet and only train the SceneGenNet. In this stage, SceneGenNet focuses on learning poster scenes and creative design from the train data. Notably, Stage 1 learns local text editing/inpainting and Stage 2 learns background inpainting, thus the input images indicating the area to inpaint are different (See Fig. 7). With such a two-stage training strategy, TextRenderNet and SceneGenNet can be efficiently optimized since they can focus on their specific tasks." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.406, + 0.646, + 0.423 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.428, + 0.704, + 0.444 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.448, + 0.907, + 0.704 + ], + "angle": 0, + "content": "Dataset. We crawl product posters from online e-commerce platforms to construct our training set. Our training data mainly consists of Chinese posters, we first employ PPOCRv4 model [34] to extract the text content and their bounding boxes from the images as a coarse annotation. And we ask some annotators to further refine the bounding boxes and correct the text content to improve the annotation quality. Resulting in a dataset containing 160k images. We generate image captions with GPT4-o [32] and extract foreground subject masks with \\(\\mathrm{U}^2\\)-Net [37] and VitMatte [50]. We randomly select 302 images for evaluation and leave the rest for training. To better evaluate the performance of our method, we use LLM [10] to generate some background prompts and text layouts as evaluation samples, after manually checking and removing those irrational ones, we obtain another 198 evaluation samples to form a final evaluation set named PosterBenchmark containing 500 samples." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.705, + 0.909, + 0.902 + ], + "angle": 0, + "content": "Evaluation Metrics. We follow Anytext [42] to evaluate text rendering accuracy using two metrics: sentence accuracy (Sen. Acc) and normalized edit distance (NED). Specifically, we crop the text line from the generated image according to the provided bounding box and utilize the OCR model [31] to predict the content \\( s_{\\mathrm{pred}} \\) of the generated text line. We denote the ground truth text content as \\( s_{\\mathrm{gt}} \\). A text line is considered to be correctly generated if \\( s_{\\mathrm{pred}} = s_{\\mathrm{gt}} \\); this condition is used to calculate Sen. Acc. Additionally, we compute the normalized edit distance (NED) between \\( s_{\\mathrm{pred}} \\) and \\( s_{\\mathrm{gt}} \\) to measure their similarity. We further calculate FID [15] to measure the visual quality and CLIP-T [40] metric for evaluating text-image alignment." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.505, + 0.937 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.09, + 0.905, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.372, + 0.907, + 0.399 + ], + "angle": 0, + "content": "Figure 9. Qualitative comparison using various text features. It is obvious that the character-level OCR features we used (PPOCR Char) are the most effective at maintaining character accuracy." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.408, + 0.486, + 0.651 + ], + "angle": 0, + "content": "Implementation Details. Our SceneGenNet is initialized from pre-trained SD3 Inpainting-Controlnet [7] and TextRenderNet is initialized from SD3 [13] weight with the same configuration as in [8]. For Subject Fidelity Feedback Learning, we follow existing work [47] to uniformly sample \\( t' \\) between [1, 10]. Within this range, the one-step inference result of image \\( x_0 \\) from \\( t' \\) is close to the full inference result. The weight coefficient of \\( \\lambda \\) is set to 0.0005. The learning rate is set to 1e-4 and the batch size is set to 192. We train our framework for 26k and 29.5k steps for training stage1 and stage2, respectively. Finally, PosterMaker was trained on 32 A100 GPUs for 3 days. During the sampling process, based on the statistical information, a maximum of 7 lines of text and 16 characters per line of text are selected from each image to render onto the image, as this setting can cover most situations in the dataset." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.658, + 0.363, + 0.673 + ], + "angle": 0, + "content": "4.2. Comparison with Prior Works" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.678, + 0.485, + 0.875 + ], + "angle": 0, + "content": "Baseline methods. We carefully designed the following baseline approaches based on existing open-sourced techniques for comparative analysis. SD3_inpaint_byt5: We encode the text content into prompt embeddings using ByT5 [48] and employ an adapter to map these embeddings to the original prompt embedding space of SD3 before feeding them into the controlnet, which enables the controlnet to render multilingual text. SD3_canny&inpaint: First render the text into a white-background image and extract the canny edge from it as control. Then finetune a pre-trained SD3 canny controlnet together with an inpainting controlnet to achieve multilingual text rendering. Anytext: It is the SOTA open-sourced T2I method that supports multilin" + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.405, + 0.9, + 0.535 + ], + "angle": 0, + "content": "
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑FG Ext. Ratio ↓
SD3 inpaint_AnyText52.78%75.27%100.8726.9014.82%
SD3 inpaint_byt552.28%86.57%65.4526.7114.60%
AnyText63.90%82.81%71.2726.6919.25%
Glyph-ByT5-v269.54%87.65%79.2326.6018.91%
SD3_canny&inpaint80.75%92.75%67.1927.0314.38%
GlyphDraw286.14%96.78%72.4926.7216.52%
GT (w/ SD1.5 Rec.)76.95%89.91%---
GT (w/ SD3 Rec.)98.09%99.36%---
GT98.53%99.59%---
Ours (SD1.5)72.12%88.01%68.1726.93-
Ours93.36%98.39%65.3527.0411.57%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.576, + 0.536, + 0.843, + 0.549 + ], + "angle": 0, + "content": "Table 1. Comparison with baseline methods." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.553, + 0.907, + 0.718 + ], + "angle": 0, + "content": "gual text rendering and its text editing mode supports text inpainting [42]. So we directly finetune it on our data using its text editing training pipeline. SD3_inpaint_Anytext: First generate the background with SD3 inpainting control-net, then render the text on the corresponding region using Anytext. Glyph-ByT5-v2 and GlyphDraw2: They are both the SOTA T2I methods that support multilingual text rendering [26, 28]. However, they don't have open-sourced pre-trained weights, so we reproduced them on our dataset. And we added an inpainting controlnet for them to support subject-preserved generation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.72, + 0.909, + 0.902 + ], + "angle": 0, + "content": "Quantitative Comparison. We trained all baseline models on the same dataset, and then quantitatively compared all methods on the PosterBenchmark, as shown in Tab. 1. It is worth noting that SD3 is used as the base model by default, but since we observed that the SD1.5 VAE leads to significant error in reconstruction, to enable a more equitable comparison between our method and AnyText (SD1.5 architecture), we also implemented an SD1.5 version of PosterMaker with the same experimental setup as AnyText. As the VAEs, especially SD1.5, introduce some reconstruction error and the OCR model may incorrectly recognize some characters, we also report the metrics on ground truth" + }, + { + "type": "page_footnote", + "bbox": [ + 0.109, + 0.887, + 0.318, + 0.901 + ], + "angle": 0, + "content": "Details can be found in the Appendix." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.131, + 0.089, + 0.442, + 0.201 + ], + "angle": 0, + "content": "
Text FeatureTypeSen. ACCNED
ByT5textual feat.33.48%54.50%
Cannyimg81.50%92.72%
TrOCR Linevisual feat.26.58%49.46%
TrOCR Charvisual feat.94.27%98.54%
PPOCR Linevisual feat.38.91%53.86%
PPOCR Char (Ours)visual feat.95.15%98.75%
GT (w/o Rec.)-98.53%99.59%
GT (w/ SD3 Rec.)-98.09%99.36%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.106, + 0.202, + 0.468, + 0.215 + ], + "angle": 0, + "content": "Table 2. Quantitative comparison using various text features." + }, + { + "type": "table", + "bbox": [ + 0.097, + 0.219, + 0.476, + 0.255 + ], + "angle": 0, + "content": "
MethodFG Ext. Ratio↓Sen. ACC ↑NED↑FID↓CLIP-T↑
Ours11.57%93.36%98.39%65.3527.04
Ours w/o Lreward15.05%93.11%98.21%65.1027.04
" + }, + { + "type": "table_caption", + "bbox": [ + 0.106, + 0.255, + 0.467, + 0.269 + ], + "angle": 0, + "content": "Table 3. Evaluation on the subject fidelity feedback learning." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.272, + 0.483, + 0.514 + ], + "angle": 0, + "content": "images as an upper bound. As shown in Tab. 1, our method achieves the best performance on all metrics. Notably, on text rendering metrics Sen. ACC and NED, our model outperforms the baselines by an impressive margin and is already close to the upper bound. The promising results demonstrate the effectiveness of the proposed PosterMaker. Qualitative Comparison. The results are shown in Fig. 8. Compared to the baselines, our PosterMaker generates more readable and accurate poster images with texts, particularly for smaller texts. Notably, as an end-to-end generation method, PosterMaker automatically creates underlays to enhance the contrast between text and background, effectively highlighting the text. This feature is crucial in product poster design for capturing viewers' attention. These findings demonstrate that our PosterMaker successfully learns the distribution of posters created by human designers." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.519, + 0.347, + 0.535 + ], + "angle": 0, + "content": "4.3. Ablation Study and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.539, + 0.484, + 0.902 + ], + "angle": 0, + "content": "How to achieve high text rendering accuracy? We conduct experiments to explore the effectiveness of different control conditions for visual text rendering. Due to the fact that text rendering accuracy is primarily determined by the first training stage, we discard the second training stage in this experiment to save computational resources. The results are summarized in Tab. 2. We observed several valuable experimental results: 1) The use of char-level features significantly outperforms previous line-level features, benefiting from finer-grained representation. This explains why previous methods [4, 28, 42], achieve inferior performance (PPOCR Line is used in [28, 42], TrOCR Line is used in [4]). Recent concurrent works [29, 46] have also found similar experimental findings as ours. 2) Char-level feature representation is superior to low-level image features such as Canny. 3) PPOCR outperforms TrOCR, which is attributed to PPOCR being a multi-language OCR model, while TrOCR is an English version model. 4) Even though TrOCR has not been trained on multi-language text data, it still achieves decent results, likely because it extracts universal visual structural features. 5) ByT5 extracts char-level features but the performance is inferior to OCR features, because it extracts semantic features rather than character structural features, while T2I models' text rendering" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.089, + 0.632, + 0.152 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.635, + 0.089, + 0.903, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.534, + 0.238, + 0.885, + 0.253 + ], + "angle": 0, + "content": "Figure 10. Visual examples showing the effect of \\(\\mathcal{L}_{reward}\\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.264, + 0.907, + 0.475 + ], + "angle": 0, + "content": "capability relies more on character structural features. We present visualization results in Fig. 9. We observe that when using line-level features as a control, the generated text occasionally becomes completely unrecognizable. This suggests that line-level features are insufficient for achieving precise text rendering. Additionally, it is evident that using canny control always introduces stroke artifacts, particularly in smaller texts (as seen in row 3 of Fig. 9). This further demonstrates that canny control is also not an ideal condition for text rendering. In summary, the char-level feature extracted by PPOCR performs optimally and the accuracy is already close to the upper bound, indicating the discriminative char-level visual feature is the key to achieve high text rendering accuracy." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.476, + 0.909, + 0.658 + ], + "angle": 0, + "content": "Effectiveness of subject fidelity feedback learning. We calculate the foreground extension ratio (termed as FG Ext. Ratio) by asking human annotators to manually check each generated image whether the foreground subject is incorrectly extended. As demonstrated in Tab. 3, training our model with \\(\\mathcal{L}_{reward}\\) effectively reduces FG Ext. Ratio by \\(3.4\\%\\), while maintaining subtle variations in other performance metrics. Representative visual examples are presented in Fig. 10. Besides, our model outperforms baseline methods in FG Ext. Ratio (see Tab. 1). These results show the efficacy of our proposed subject fidelity feedback learning approach in mitigating foreground extension artifacts." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.667, + 0.634, + 0.683 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.689, + 0.907, + 0.901 + ], + "angle": 0, + "content": "The application of image generation in poster creation is often impeded by subpar text rendering and inconsistent subjects. To address these challenges, this paper introduces a novel framework, PosterMaker, which synthesizes aesthetically pleasing product posters with accurate and harmonious texts and contents. Moreover, we reveal that the key underlying successful multilingual text rendering is the construction of robust character-level visual text representations. Additionally, we propose subject fidelity feedback learning to mitigate inconsistencies in subjects. Through extensive experiments, our method demonstrates a significant improvement in both high-precision text generation and subject fidelity. These findings not only advance poster generation but also inspire future research on T2I models." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.251, + 0.108 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.115, + 0.486, + 0.177 + ], + "angle": 0, + "content": "This work was supported by the National Nature Science Foundation of China (62425114, 62121002, U23B2028, 62232006, 62272436) and Alibaba Group (Alibaba Research Intern Program)." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.189, + 0.188, + 0.205 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.214, + 0.484, + 0.284 + ], + "angle": 0, + "content": "[1] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Qinsheng Zhang, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, et al. ediff-i: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.1, + 0.285, + 0.484, + 0.394 + ], + "angle": 0, + "content": "[2] Tingfeng Cao, Junsheng Kong, Xue Zhao, Wenqing Yao, Junwei Ding, Jinhui Zhu, and Jiandong Zhang. Product2img: Prompt-free e-commerce product background generation with diffusion model and self-improved LMM. In Proceedings of the 32nd ACM International Conference on Multimedia, MM 2024, Melbourne, VIC, Australia, 28 October 2024 - 1 November 2024, pages 10774-10783. ACM, 2024. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.396, + 0.484, + 0.478 + ], + "angle": 0, + "content": "[3] Kelvin C. K. Chan, Yang Zhao, Xuhui Jia, Ming-Hsuan Yang, and Huisheng Wang. Improving subject-driven image synthesis with subject-agnostic guidance. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2024, Seattle, WA, USA, June 16-22, 2024, pages 6733-6742. IEEE, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.481, + 0.484, + 0.576 + ], + "angle": 0, + "content": "[4] Haoxing Chen, Zhuoer Xu, Zhangxuan Gu, Jun Lan, Xing Zheng, Yaohui Li, Changhua Meng, Huijia Zhu, and Weiqiang Wang. Diffuse: Universal text editing diffusion model. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 2, 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.578, + 0.484, + 0.647 + ], + "angle": 0, + "content": "[5] Ruidong Chen, Lanjun Wang, Weizhi Nie, Yongdong Zhang, and An-An Liu. Anyscene: Customized image synthesis with composited foreground. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8724-8733, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.649, + 0.484, + 0.744 + ], + "angle": 0, + "content": "[6] Wenhu Chen, Hexiang Hu, Yandong Li, Nataniel Ruiz, Xuhui Jia, Ming-Wei Chang, and William W. Cohen. Subject-driven text-to-image generation via apprenticeship learning. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.746, + 0.482, + 0.787 + ], + "angle": 0, + "content": "[7] Alimama Creative. Sd3-controlnet-inpainting. https://huggingface.co/alamama-creative/SD3-Controlnet-Inpainting, 2024.6,7,2,4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.789, + 0.482, + 0.83 + ], + "angle": 0, + "content": "[8] Alimama Creative. Sd3-controlnet-softedge. https://huggingface.co/alamama-creative/SD3-Controlnet-Softedge, 2024.7, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.831, + 0.484, + 0.872 + ], + "angle": 0, + "content": "[9] Alimama Creative. Ecomxl-controlnet-inpaint. https://huggingface.co/alimama-creative/EcomXL_controlnet_inpaint, 2024.2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.873, + 0.484, + 0.902 + ], + "angle": 0, + "content": "[10] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Xilin Wei, Songyang Zhang, Haodong" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.214, + 0.484, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.907, + 0.176 + ], + "angle": 0, + "content": "Duan, Maosong Cao, Wenwei Zhang, Yining Li, Hang Yan, Yang Gao, Xinyue Zhang, Wei Li, Jingwen Li, Kai Chen, Conghui He, Xingcheng Zhang, Yu Qiao, Dahua Lin, and Jiaqi Wang. Internlm-xcomposer2: Mastering free-form text-image composition and comprehension in vision-language large model. arXiv preprint arXiv:2401.16420, 2024. 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.178, + 0.907, + 0.245 + ], + "angle": 0, + "content": "[11] Zhenbang Du, Wei Feng, Haohan Wang, Yaoyu Li, Jingsen Wang, Jian Li, Zheng Zhang, Jingjing Lv, Xin Zhu, Junsheng Jin, et al. Towards reliable advertising image generation using human feedback. In European Conference on Computer Vision, pages 399-415. Springer, 2024. 2, 4, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.247, + 0.909, + 0.342 + ], + "angle": 0, + "content": "[12] Amir Erfan Eshratifar, Joao V.B. Soares, Kapil Thadani, Shaunak Mishra, Mikhail Kuznetsov, Yueh-Ning Ku, and Paloma De Juan. Salient object-aware background generation using text-guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 7489-7499, 2024. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.344, + 0.906, + 0.426 + ], + "angle": 0, + "content": "[13] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 2, 3, 4, 7, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.428, + 0.907, + 0.509 + ], + "angle": 0, + "content": "[14] Yifan Gao, Jinpeng Lin, Min Zhou, Chuanbin Liu, Hongtao Xie, Tiezheng Ge, and Yuning Jiang. Textpainter: Multimodal text image generation with visual-harmony and text-comprehension for poster design. In Proceedings of the 31st ACM International Conference on Multimedia, pages 7236-7246, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.511, + 0.906, + 0.607 + ], + "angle": 0, + "content": "[15] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 6626-6637, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.609, + 0.906, + 0.677 + ], + "angle": 0, + "content": "[16] Peidong Jia, Chenxuan Li, Yuhui Yuan, Zeyu Liu, Yichao Shen, Bohan Chen, Xingru Chen, Yinglin Zheng, Dong Chen, Ji Li, Xiaodong Xie, Shanghang Zhang, and Baining Guo. Cole: A hierarchical generation framework for multilayered and editable graphic design, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.679, + 0.906, + 0.745 + ], + "angle": 0, + "content": "[17] Lei Ke, Mingqiao Ye, Martin Danelljan, Yifan liu, Yu-Wing Tai, Chi-Keung Tang, and Fisher Yu. Segment anything in high quality. In Advances in Neural Information Processing Systems, pages 29914–29934. Curran Associates, Inc., 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.748, + 0.906, + 0.83 + ], + "angle": 0, + "content": "[18] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dollar, and Ross Girshick. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4015-4026, 2023. 5, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.832, + 0.906, + 0.872 + ], + "angle": 0, + "content": "[19] Chao Li, Chen Jiang, Xiaolong Liu, Jun Zhao, and Guoxin Wang. Joytype: A robust design for multilingual visual text creation. arXiv preprint arXiv:2409.17524, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.874, + 0.906, + 0.901 + ], + "angle": 0, + "content": "[20] Dongxu Li, Junnan Li, and Steven C. H. Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.909, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.092, + 0.484, + 0.148 + ], + "angle": 0, + "content": "image generation and editing. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.484, + 0.219 + ], + "angle": 0, + "content": "[21] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee. Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22511-22521, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.22, + 0.483, + 0.288 + ], + "angle": 0, + "content": "[22] Zhaochen Li, Fengheng Li, Wei Feng, Honghe Zhu, An Liu, Yaoyu Li, Zheng Zhang, Jingjing Lv, Xin Zhu, Junjie Shen, et al. Planning and rendering: Towards end-to-end product poster generation. arXiv preprint arXiv:2312.08822, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.291, + 0.483, + 0.373 + ], + "angle": 0, + "content": "[23] Jinpeng Lin, Min Zhou, Ye Ma, Yifan Gao, Chenxi Fei, Yangjian Chen, Zhang Yu, and Tiezheng Ge. Autoposter: A highly automatic and content-aware design system for advertising poster generation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 1250–1260, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.375, + 0.483, + 0.417 + ], + "angle": 0, + "content": "[24] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.418, + 0.483, + 0.487 + ], + "angle": 0, + "content": "[25] Zeyu Liu, Weicong Liang, Zhanhao Liang, Chong Luo, Ji Li, Gao Huang, and Yuhui Yuan. Glyph-byt5: A customized text encoder for accurate visual text rendering. In European Conference on Computer Vision, pages 361-377. Springer, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.489, + 0.483, + 0.545 + ], + "angle": 0, + "content": "[26] Zeyu Liu, Weicong Liang, Yiming Zhao, Bohan Chen, Ji Li, and Yuhui Yuan. Glyph-byt5-v2: A strong aesthetic baseline for accurate multilingual visual text rendering. arXiv preprint arXiv:2406.10208, 2024. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.546, + 0.483, + 0.602 + ], + "angle": 0, + "content": "[27] Zhiying Lu, Chuanbin Liu, Xiaojun Chang, Yongdong Zhang, and Hongtao Xie. Dhvt: Dynamic hybrid vision transformer for small dataset recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.603, + 0.483, + 0.659 + ], + "angle": 0, + "content": "[28] Jian Ma, Yonglin Deng, Chen Chen, Haonan Lu, and Zhenyu Yang. Glyphdraw2: Automatic generation of complex glyph posters with diffusion models and large language models. arXiv preprint arXiv:2407.02252, 2024. 2, 3, 5, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.66, + 0.483, + 0.715 + ], + "angle": 0, + "content": "[29] Lichen Ma, Tiezhu Yue, Pei Fu, Yujie Zhong, Kai Zhou, Xiaoming Wei, and Jie Hu. Chargen: High accurate character-level visual text generation model with multimodal encoder. arXiv preprint arXiv:2412.17225, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.716, + 0.483, + 0.785 + ], + "angle": 0, + "content": "[30] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. SDEdit: Guided image synthesis and editing with stochastic differential equations. In International Conference on Learning Representations, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.787, + 0.483, + 0.829 + ], + "angle": 0, + "content": "[31] ModelScope. https://modelscope.cn/models/damo/cv_convnextTinyOCR-recognition-general_damo/summary,2023.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.83, + 0.483, + 0.858 + ], + "angle": 0, + "content": "[32] OpenAI. https://openai.com/index/hello-gpt-4o/, 2024.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.859, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[33] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al." + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.092, + 0.905, + 0.121 + ], + "angle": 0, + "content": "Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.122, + 0.905, + 0.149 + ], + "angle": 0, + "content": "[34] PaddlePaddle. https://github.com/PaddlePaddle/PaddleOCR, 2023.6,2,3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.151, + 0.905, + 0.247 + ], + "angle": 0, + "content": "[35] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.25, + 0.905, + 0.332 + ], + "angle": 0, + "content": "[36] Tianhao Qi, Shancheng Fang, Yanze Wu, Hongtao Xie, Jiawei Liu, Lang Chen, Qian He, and Yongdong Zhang. Deadiff: An efficient stylization diffusion model with disentangled representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8693-8702, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.334, + 0.905, + 0.39 + ], + "angle": 0, + "content": "[37] Xuebin Qin, Zichen Zhang, Chenyang Huang, Masood Dehghan, Osmar Zaiane, and Martin Jagersand. U2-net: Going deeper with nested u-structure for salient object detection. page 107404, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.392, + 0.905, + 0.462 + ], + "angle": 0, + "content": "[38] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and PeterJ. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv: Learning, arXiv: Learning, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.463, + 0.905, + 0.545 + ], + "angle": 0, + "content": "[39] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 10674-10685. IEEE, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.548, + 0.905, + 0.63 + ], + "angle": 0, + "content": "[40] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22500-22510, 2023. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.632, + 0.905, + 0.716 + ], + "angle": 0, + "content": "[41] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.717, + 0.905, + 0.787 + ], + "angle": 0, + "content": "[42] Yuxiang Tuo, Wangmeng Xiang, Jun-Yan He, Yifeng Geng, and Xuansong Xie. Anytext: Multilingual visual text generation and editing. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. 2, 3, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.788, + 0.905, + 0.871 + ], + "angle": 0, + "content": "[43] Haohan Wang, Wei Feng, Yaoyu Li, Zheng Zhang, Jingjing Lv, Junjie Shen, Zhangang Lin, and Jingping Shao. Generate e-commerce product background by integrating category commonality and personalized style. In ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2025. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.873, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[44] Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, and Anthony Chen. Instantid: Zero-shot identity-preserving gener" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.092, + 0.482, + 0.119 + ], + "angle": 0, + "content": "ation in seconds. arXiv preprint arXiv:2401.07519, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.122, + 0.483, + 0.218 + ], + "angle": 0, + "content": "[45] Shaodong Wang, Yunyang Ge, Liuhan Chen, Haiyang Zhou, Qian Wang, Xinhua Cheng, and Li Yuan. Prompt2poster: Automatically artistic chinese poster creation from prompt only. In Proceedings of the 32nd ACM International Conference on Multimedia, MM 2024, Melbourne, VIC, Australia, 28 October 2024 - 1 November 2024, pages 10716-10724. ACM, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.22, + 0.482, + 0.261 + ], + "angle": 0, + "content": "[46] Tong Wang, Xiaochao Qu, and Ting Liu. Textmastero: Mastering high-quality scene text editing in diverse languages and styles. arXiv preprint arXiv:2408.10623, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.263, + 0.482, + 0.344 + ], + "angle": 0, + "content": "[47] Jiazheng Xu, Xiao Liu, Yuchen Wu, Yuxuan Tong, Qinkai Li, Ming Ding, Jie Tang, and Yuxiao Dong. Imagereward: Learning and evaluating human preferences for text-to-image generation. In Advances in Neural Information Processing Systems, pages 15903-15935. Curran Associates, Inc., 2023. 5, 7, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.347, + 0.482, + 0.415 + ], + "angle": 0, + "content": "[48] Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, and Colin Raffel. ByT5: Towards a token-free future with pre-trained byte-to-byte models. Transactions of the Association for Computational Linguistics, 10:291-306, 2022. 7, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.418, + 0.482, + 0.513 + ], + "angle": 0, + "content": "[49] Yukang Yang, Dongnan Gui, Yuhui Yuan, Weicong Liang, Haisong Ding, Han Hu, and Kai Chen. Glyphcontrol: Glyph conditional control for visual text generation. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.516, + 0.482, + 0.569 + ], + "angle": 0, + "content": "[50] Jingfeng Yao, Xinggang Wang, Shusheng Yang, and Baoyuan Wang. Vitmatte: Boosting image matting with pretrained plain vision transformers. Information Fusion, 103: 102091, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.572, + 0.482, + 0.625 + ], + "angle": 0, + "content": "[51] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.629, + 0.482, + 0.682 + ], + "angle": 0, + "content": "[52] Boqiang Zhang, Zuan Gao, Yadong Qu, and Hongtao Xie. How control information influences multilingual text image generation and editing? arXiv preprint arXiv:2407.11502, 2024. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.686, + 0.482, + 0.74 + ], + "angle": 0, + "content": "[53] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.743, + 0.482, + 0.796 + ], + "angle": 0, + "content": "[54] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In Computer Vision and Pattern Recognition, 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.799, + 0.482, + 0.854 + ], + "angle": 0, + "content": "[55] Yuanzhi Zhu, Jiawei Liu, Feiyu Gao, Wenyu Liu, Xinggang Wang, Peng Wang, Fei Huang, Cong Yao, and Zhibo Yang. Visual text generation in the wild. In European Conference on Computer Vision, pages 89-106. Springer, 2024. 3" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.483, + 0.854 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.175, + 0.086, + 0.825, + 0.131 + ], + "angle": 0, + "content": "PosterMaker: Towards High-Quality Product Poster Generation with Accurate Text Rendering" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.142, + 0.615, + 0.163 + ], + "angle": 0, + "content": "Supplementary Material" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.18, + 0.484, + 0.242 + ], + "angle": 0, + "content": "Due to space limitations, we were unable to present all experimental results in the main text. In this supplementary material, we will give more details about our experiments and present additional results." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.257, + 0.313, + 0.274 + ], + "angle": 0, + "content": "6. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.282, + 0.484, + 0.357 + ], + "angle": 0, + "content": "Training and Inference. We fully follow the settings of SD3 [13]. During training, the denoise loss \\(\\mathcal{L}_{\\mathrm{denoise}}\\) uses simplified flow matching, also known as 0-rectified flow matching loss [24]. In inference, we also use the inference method of flow matching, with 28 inference steps." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.358, + 0.484, + 0.479 + ], + "angle": 0, + "content": "TextRenderNet and SceneGenNet. TextRenderNet and SceneGenNet have an architecture similar to SD3 [13], composed of multiple MM-DiT Blocks. In our implementation, TextRenderNet consists of 12 layers of MM-DiT Blocks, while SceneGenNet consists of 23 layers of MM-DiT Blocks. The output of the \\( N_{i} \\)-th block of SceneGenNet is first added with the output of the \\( \\left\\lceil \\frac{N_i}{2} \\right\\rceil \\)-th block of TextRenderNet, and then add to the \\( N_{i} \\)-th SD3 block." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.48, + 0.484, + 0.57 + ], + "angle": 0, + "content": "Classifier-Free Guidance. We use CFG during inference, with a CFG scale of 5. Additionally, since the \"prompt\" inputted to TextRenderNet is not a caption but a text representation, the negative one for CFG is set to a zero vector. During training, we randomly drop the text representation to a zero vector with \\(10\\%\\) probability." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.571, + 0.484, + 0.646 + ], + "angle": 0, + "content": "The Setting of \\( t_1 \\) in Reward Loss. We follow [47] to train the reward loss at the last 10 inference steps, i.e., we set \\( t_1 \\) to 10. Within the range of \\( t' \\sim [1, t_1] \\), the result of the image \\( x_0 \\) obtained by one-step inference is close to the result of complete inference." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.647, + 0.484, + 0.767 + ], + "angle": 0, + "content": "Details about Metric Calculation. Our evaluation benchmark contains samples generated by LLM [10] thus there is no ground truth for these samples. Therefore, we exclude these LLM-generated samples when calculating metrics that depend on ground truth images, i.e., FID metric for all experiments, text accuracy metrics for GT (with and without VAE reconstruction) and results for ablation on different text features." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.769, + 0.484, + 0.83 + ], + "angle": 0, + "content": "About ground truth for training Foreground Extension Detector. We treat the task of detecting foreground extension as a binary classification problem and ask annotators to manually label the ground truth." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.845, + 0.249, + 0.86 + ], + "angle": 0, + "content": "7. Baseline Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.901 + ], + "angle": 0, + "content": "We carefully designed 6 baseline approaches based on existing techniques for comparative analysis. The de" + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.178, + 0.925, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.263, + 0.816, + 0.367 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.37, + 0.884, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.485, + 0.814, + 0.553 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.554, + 0.811, + 0.644 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.646, + 0.807, + 0.749 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.769, + 0.907, + 0.796 + ], + "angle": 0, + "content": "Figure 11. Detailed illustration of the implementation of the different baseline methods." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.825, + 0.907, + 0.901 + ], + "angle": 0, + "content": "tails are shown in Fig. 11. For 1) SD3_inpaint_byt5, 2) SD3_canny&inpaint, and 4) AnyText, we fine-tune them on our 160K dataset for the poster generation task. Meanwhile, 3) SD3_inpaint_Anytext is a two-stage inference method. In the first stage, the pre-trained Inpaint ControlNet gener" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.258 + ], + "angle": 0, + "content": "ates the background, and in the second stage, AnyText performs the text editing task, with AnyText also fine-tuned on the 160K dataset specifically for the text editing task. The Inpainting ControlNet is initialized from pre-trained SD3 Inpainting-ControlNet [7] and Canny ControlNet is initialized from [8]. For 5) GlyphDraw2 [28] and 6) Glyph-ByT5-v2 [26] are both the SOTA T2I methods that support multilingual text rendering. However, they neither have open-source pre-trained weights nor support subject input, so we reproduced them on our dataset by adding the pre-trained inpainting controlnet [9] to support the subject input." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.272, + 0.427, + 0.29 + ], + "angle": 0, + "content": "8. Scalable Training for Text Rendering" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.298, + 0.485, + 0.584 + ], + "angle": 0, + "content": "Our proposed two-stage training strategy allows the model to learn two different capabilities (i.e., text rendering and scene generation) separately, enabling more flexibility with distinct datasets for each phase. Recent text rendering methods [4, 25, 26, 42] typically train their models on datasets containing millions of samples. To verify the potential of further improving our performance with more training data, we build a large dataset with 1 million samples and we directly obtain the text annotations with PPOCRv4 [34] without manually annotating. And we use this dataset for the first stage of text rendering training and use the same 160k data for the second stage of scene generation learning. Compared to using 160k data in both of the previous stages, the text sentence accuracy significantly improved by \\(4.48\\%\\) (as shown in Tab. 4), demonstrating that the multistage training strategy is flexible and scalable. However, in the main experiments, we select to report the performance of our model training only on 160k data for fair comparison with the baselines." + }, + { + "type": "table", + "bbox": [ + 0.137, + 0.595, + 0.439, + 0.64 + ], + "angle": 0, + "content": "
Data Size (St.1 & St.2)Sen. ACCNED
160k & 160k93.11%98.21%
1M & 160k97.59%99.38%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.65, + 0.483, + 0.679 + ], + "angle": 0, + "content": "Table 4. Quantitative comparison with different data sizes for text rendering training." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.688, + 0.483, + 0.723 + ], + "angle": 0, + "content": "9. Discussion on advantages of end-to-end over two-stage methods." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.732, + 0.483, + 0.839 + ], + "angle": 0, + "content": "The main weakness of two-stage methods (first inpaint background, then render text) is their inability to consistently provide a clean background for texts (see Fig. 12, reducing text readability, especially with complex backgrounds. In contrast, one-stage methods generate texts and backgrounds simultaneously, enabling them to create a clean backdrop or underlays that enhance text visibility." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.845, + 0.305, + 0.861 + ], + "angle": 0, + "content": "10. Text Position Control" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.902 + ], + "angle": 0, + "content": "The position control of PosterMaker uses a very straightforward approach (as shown in Fig. 13), mapping the text" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.084, + 0.642, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.647, + 0.083, + 0.776, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.779, + 0.083, + 0.906, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.527, + 0.25, + 0.892, + 0.264 + ], + "angle": 0, + "content": "Figure 12. Showcases for end-to-end and two-stage methods." + }, + { + "type": "table", + "bbox": [ + 0.57, + 0.277, + 0.851, + 0.307 + ], + "angle": 0, + "content": "
MethodmIoUIoU@0.5IoU@0.7
Ours84.65%97.18%93.94%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.574, + 0.318, + 0.846, + 0.332 + ], + "angle": 0, + "content": "Table 5. Evaluation on text location accuracy." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.356, + 0.907, + 0.599 + ], + "angle": 0, + "content": "bounding box to cosine position encoding, which is then concatenated with text features and used as the input to TextRenderNet. To demonstrate our method's effectiveness, we evaluate the bounding box IoU (Intersection of Union) metric as follows: 1) we employ OCR model to extract texts from the generated image. 2) For each ground truth text, we identify the best-matched OCR-detected text based on edit distance and then calculate the IoU between their corresponding bounding boxes. We average the IoU score over all the samples to obtain mean IoU (termed mIoU). And we also report IoU@R which indicates the proportion of samples with IoU higher than \\( R \\). As shown in Tab. 5, our method achieves a high mIoU of \\( 84.65\\% \\) and \\( 93.94\\% \\) samples have an IoU score higher than 0.7. These promising results prove that our text position control method is simple yet effective." + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.614, + 0.918, + 0.713 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.725, + 0.907, + 0.754 + ], + "angle": 0, + "content": "Figure 13. Detailed illustration of how we construct the position embedding for controlling the text position." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.782, + 0.907, + 0.816 + ], + "angle": 0, + "content": "11. Comparison Between GlyphByT5 and PosterMaker" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.825, + 0.906, + 0.901 + ], + "angle": 0, + "content": "GlyphByT5 [25, 26] are recently proposed visual text rendering methods that achieve high text rendering accuracy. And we will discuss some differences and internal connections between our PosterMaker and GlyphByT5 on how to control text rendering." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.092, + 0.482, + 0.211 + ], + "angle": 0, + "content": "- Text position control: GlyphByT5 achieve text position control by modifying the original cross-attention module with their proposed region-wise multi-head cross-attention. In contrast, our PosterMaker encodes the text location directly into the character-level text representation to accomplish text position control. As discussed in Sec. 10, our approach is both simple and effective for precise text location control." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.213, + 0.486, + 0.651 + ], + "angle": 0, + "content": "- Text content control: both GlyphByT5 and our PosterMaker control the generation of text content by constructing suitable text representation. Specifically, in this work, we claim that the key to achieve accurate text rendering is to extract character-level visual features as the control condition and carefully construct a robust text representation based on off-the-shelf OCR model [34]. In GlyphByT5, the authors also extract character-level text features, but with a textual encoder named ByT5 [48]. Then they propose glyph-alignment pre-training to align these textual features with pre-trained visual encoders DINOv2 [33]. Additionally, they employ box-level contrastive learning with complex augmentations and a hard-mining strategy to enhance character-level discriminativeness. We hypothesize that the primary reason both our method and GlyphByT5 achieve high text rendering accuracy is our shared goal of constructing a robust character-level visual representation. In fact, the ability of GlyphByT5's character-level visual representation is distilled from the pre-trained visual encoder DINOv2, rather than inherited from the pre-trained textual encoder ByT5 itself. In order to verify our hypothesis and insights, we adopt a more direct approach to directly replace the PPOCR encoder in PosterMaker with DINOv2. As shown in Tab. 6, simply extracting character-wise visual features with DINOv2 can also achieve precise text rendering. This result further verifies our claim: the key to precise text rendering is to extract character-level visual features as the control condition." + }, + { + "type": "table", + "bbox": [ + 0.111, + 0.66, + 0.465, + 0.761 + ], + "angle": 0, + "content": "
Text FeatureTypeSen. ACCNED
PPOCR Linevisual feat.38.91%53.86%
PPOCR Charvisual feat.95.15%98.75%
DINOv2 Linevisual feat.4.25%20.59%
DINOv2 Charvisual feat.94.92%98.66%
GT (w/o Rec.)-98.53%99.59%
GT (w/ SD3 Rec.)-98.09%99.36%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.106, + 0.77, + 0.468, + 0.785 + ], + "angle": 0, + "content": "Table 6. Quantitative comparison using various text features." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.83, + 0.409, + 0.848 + ], + "angle": 0, + "content": "12. Visualization of Training Samples" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.483, + 0.903 + ], + "angle": 0, + "content": "We present example training images from our dataset in Fig. 17. The dataset predominantly consists of Chinese text, with a small portion of English text. Additionally, it in" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.885, + 0.108 + ], + "angle": 0, + "content": "cludes challenging cases with small-sized text elements." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.121, + 0.905, + 0.139 + ], + "angle": 0, + "content": "13. The Generalization of Text Representation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.147, + 0.907, + 0.358 + ], + "angle": 0, + "content": "PosterMaker is trained primarily on common Chinese data, with only a minimal amount of English data. Despite this, it demonstrates a notable level of generalization, enabling it to generate English, Japanese, and uncommon Chinese characters that were not included in the training set (as shown in Fig. 16). In order to quantitatively evaluate the generalization capability of PosterMaker, we compared the accuracy of different text representations on uncommon characters using a randomly sampled uncommon character benchmark. The results show that our method can also generalize well to some characters that are unseen in the training set. Our performance is inferior to the canny baseline, likely because the canny baseline has been pre-trained on large-scale image data." + }, + { + "type": "table", + "bbox": [ + 0.545, + 0.371, + 0.878, + 0.444 + ], + "angle": 0, + "content": "
Text FeatureTypeSen. ACCNED
ByT5textual feat.2.01%10.27%
Cannyimg65.12%74.56%
PPOCR Linevisual feat.8.34%15.84%
PPOCR Charvisual feat.61.54%70.38%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.453, + 0.906, + 0.482 + ], + "angle": 0, + "content": "Table 7. Quantitative comparison of the rendering results of different text features on uncommon characters." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.525, + 0.905, + 0.559 + ], + "angle": 0, + "content": "14. Ablation about Foreground Extension Detector" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.569, + 0.909, + 0.766 + ], + "angle": 0, + "content": "We collected \\(20\\mathrm{k}\\) manually annotated images to train the foreground extension detector. We randomly selected \\(10\\%\\) samples as a validation set, while using the remaining \\(90\\%\\) for model training. We conduct ablation experiments on different architecture designs of the detector to verify the effectiveness of the proposed architecture. We implement 2 baselines: 1) RFNet [11]: we reimplemented RFNet based on the description in their paper [11]. Since we could not access their depth and saliency detection models, we modified our implementation to only use the product image and generated image as input, excluding the depth and saliency maps. 2) RFNet(SAM): in this baseline, we replace the image encoder used in RFNet with the same SAM[18] im" + }, + { + "type": "table", + "bbox": [ + 0.551, + 0.788, + 0.871, + 0.848 + ], + "angle": 0, + "content": "
MethodPrecisionRecallF1 Score
RFNet (our impl.)76.52%75.52%76.02%
RFNet (SAM)81.35%80.99%81.17%
Ours83.52%84.81%84.16%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.856, + 0.905, + 0.884 + ], + "angle": 0, + "content": "Table 8. Evaluation on different architectures of foreground extension detector." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.124, + 0.092, + 0.164, + 0.104 + ], + "angle": 0, + "content": "Subject" + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.137, + 0.215, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.244, + 0.093, + 0.331, + 0.103 + ], + "angle": 0, + "content": "Generated Image" + }, + { + "type": "image", + "bbox": [ + 0.229, + 0.105, + 0.353, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.376, + 0.093, + 0.452, + 0.103 + ], + "angle": 0, + "content": "Activation Map" + }, + { + "type": "image", + "bbox": [ + 0.356, + 0.105, + 0.48, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.239, + 0.223, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.229, + 0.294, + 0.334, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.376, + 0.294, + 0.461, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.4, + 0.483, + 0.428 + ], + "angle": 0, + "content": "Figure 14. Class activation map of the foreground extension detector." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.453, + 0.483, + 0.512 + ], + "angle": 0, + "content": "age encoder used in our method. As summarized in Tab. 8, our proposed foreground extension detector outperforms the baselines by a considerable margin, which demonstrates its effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.514, + 0.484, + 0.635 + ], + "angle": 0, + "content": "In Fig. 14, we visualize the class activation map [54] of our proposed foreground extension detector. As shown, we can observe a notably higher activation score in the extended foreground regions compared to other areas. This compelling evidence demonstrates that our detector has effectively learned to discern foreground extension cases, thereby it can serve as a robust reward model for fine-tuning PosterMaker to mitigate the foreground extension problem." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.647, + 0.37, + 0.663 + ], + "angle": 0, + "content": "15. Ablation about SceneGenNet" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.673, + 0.483, + 0.749 + ], + "angle": 0, + "content": "SceneGenNet enables our model to perform background inpainting while preserve the subject so we cannot directly remove it. We replace it by SDEdit [30] to achieve inpainting. As the results shown in Sec. 15, replacing it results in a significant drop of performance." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.759, + 0.482, + 0.799 + ], + "angle": 0, + "content": "
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑
Ours w/o SceneGenNet90.53%97.95%79.4426.67
Ours93.36%98.39%65.3527.04
" + }, + { + "type": "table_caption", + "bbox": [ + 0.118, + 0.803, + 0.456, + 0.817 + ], + "angle": 0, + "content": "Table 9. Comparison between SceneGenNet and SDEdit" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.845, + 0.483, + 0.863 + ], + "angle": 0, + "content": "16. Discussion on the impact of the test set size." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.901 + ], + "angle": 0, + "content": "To ensure a fairer comparison between PosterMaker and the baseline methods, we expanded the test set to 5,000 sam" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.906, + 0.182 + ], + "angle": 0, + "content": "plies(10x the previous PosterBenchmark). The results are shown in Tab. 10, and the experimental conclusions remain consistent with the previous test set. Due to the calculation principle of the FID metric, increasing the test size leads to a significant decrease in the FID scores for all methods, but still maintains the same conclusion." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.192, + 0.905, + 0.257 + ], + "angle": 0, + "content": "
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑
Glyph-ByT5-v267.87%86.23%20.3721.08
SD3_canny&inpaint74.49%88.78%17.9120.79
GlyphDraw283.81%96.49%15.2420.67
Ours90.20%97.58%13.3621.36
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.263, + 0.905, + 0.291 + ], + "angle": 0, + "content": "Table 10. Comparison with baseline methods on 5,000 test samples." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.316, + 0.905, + 0.351 + ], + "angle": 0, + "content": "17. Discussion on the meaningless texts generated outside target position." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.359, + 0.906, + 0.479 + ], + "angle": 0, + "content": "In our early experimental attempts about text rendering in poster generation, we found that the trained model sometimes generates meaningless texts outside the target area of the text, which will seriously affect the aesthetics. We conjecture that the main reason is that the ground truth images sometimes contain text outside the specified position. To solve this problem, we masked out the extra text during training and it solved most cases." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.48, + 0.907, + 0.661 + ], + "angle": 0, + "content": "Specifically, SceneGenNet is initialized from pre-trained SD3 Inpainting-Controlnet [7]. In the second stage of training, we simultaneously mask out the regions of the untrained texts (usually those that are too small or just logos) both in the subject mask input to SceneGenNet and in the ground truth image used for loss calculation(as shown in Fig. 15). It is worth noting that although these small texts and logos are not included in the training, we have also annotated them to address the aforementioned issues. Finally, this technique makes the loss corresponding to the masked-out regions very close to zero so that the model will not learn these meaningless texts." + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.677, + 0.651, + 0.801 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.677, + 0.767, + 0.801 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.771, + 0.677, + 0.887, + 0.801 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.811, + 0.906, + 0.84 + ], + "angle": 0, + "content": "Figure 15. Example of our solution technique for meaningless texts and logos that generated outside target position." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.093, + 0.088, + 0.253, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.253, + 0.089, + 0.414, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.089, + 0.574, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.574, + 0.089, + 0.685, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.685, + 0.089, + 0.797, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.797, + 0.089, + 0.905, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.21, + 0.232, + 0.787, + 0.246 + ], + "angle": 0, + "content": "Figure 16. Visualization results on texts in English, Japanese, and uncommon Chinese characters." + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.258, + 0.307, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.263, + 0.497, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.263, + 0.688, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.695, + 0.262, + 0.882, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.419, + 0.303, + 0.605 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.419, + 0.495, + 0.62 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.419, + 0.688, + 0.625 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.695, + 0.419, + 0.88, + 0.628 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.636, + 0.307, + 0.824 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.638, + 0.492, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.637, + 0.686, + 0.844 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.693, + 0.637, + 0.879, + 0.844 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.283, + 0.864, + 0.714, + 0.878 + ], + "angle": 0, + "content": "Figure 17. Visualization of ground truth for some samples in the dataset." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_origin.pdf b/data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1722a2a1ec8ec5168e7e771d8e7ee8848cf9c8e5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cfeb9cde83de14fbd249e2fd1af297e1c59a065788291e734e484274b0a5979 +size 13754295 diff --git a/data/2025/2504_06xxx/2504.06632/full.md b/data/2025/2504_06xxx/2504.06632/full.md new file mode 100644 index 0000000000000000000000000000000000000000..f722427b01001409e245071b9e926d0b6f2e2483 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/full.md @@ -0,0 +1,541 @@ +# PosterMaker: Towards High-Quality Product Poster Generation with Accurate Text Rendering + +Yifan $\mathrm{Gao}^{1,2*^{\dagger}}$ , Zihang Lin $^{2*}$ , Chuanbin Liu $^{1\ddagger}$ , Min Zhou $^{2}$ + +Tiezheng Ge², Bo Zheng², Hongtao Xie¹ + +1University of Science and Technology of China 2Taubao & Tmall Group of Alibaba + +eafn@mail.ustc.edu.cn {liucb92, htxie}@ustc.edu.cn + +{linzihang.lzh, yunqi.zm, tiezheng.gtz, bozheng}@alibaba-inc.com + +Project page: https://poster-maker.github.io + +# Prompt + +The box of fish oil supplements is placed on a wooden table, with a background of a serene ocean and clear sky, symbolizing purity and the natural source of the product + +The subject rests on a smooth, dark wooden table, surrounded by a few scattered leaves and delicate flowers, with a serene garden scene complete with blooming flowers and lush greenery in the background. + +# Subject + +![](images/1e079d3f112cc12b9f993bc37690dfe7939ba2bbd0baf1a45063ce19c15ac543.jpg) + +![](images/a58e688e8dedf31932fdfcb7201e6f920b0c0e8366cfc7da9d05f237165b5659.jpg) + +# Text + +![](images/17f2983698b127a00d4e62597e010fa56f7291a6f66ec063e7f2f40531d73f5f.jpg) + +![](images/f4521cddeceba886319b3866a3f1acd34aea14164e3e0df48fbea79cc2442101.jpg) + +# Poster + +![](images/6ba6efda80ae24712f5628d9c9a49d3b0c5a7601c44fea4221ad6127c7f0c711.jpg) + +![](images/664f809e620ed5313bd043a4f137e29ea847a04403fad44ef5ab96a28af07f67.jpg) + +# Previous: two stage + +![](images/3d43dd864821885fae7c2db8123d1c1ed34fd14792cb07a03aaaf5258e5c537b.jpg) + +![](images/0acb3c0d5fed3a3f8193e9127851af5894725354a094709aea933134f3e3b7b0.jpg) + +# Ours: end to end + +![](images/5aafc7be6f249efe6e065066c893a2da912a408f5a9da7a7962c20d68194f624.jpg) + +![](images/0881d676bb6c6f70842f5ed1ac298edb0c67e1c6d95af2b034679feb230f99be.jpg) + +![](images/84cb058d0e8dc1bbdb90f7e73f2e5a9985e9375393a90b68108134dc28db47f5.jpg) +Figure 1. (a) Definition of the advertising product poster generation task. The input includes the prompt, subject image, and the texts to be rendered with their layouts. The output is the poster image. (b) The comparison of our method with the previous method. PosterMaker generates posters end-to-end, while previous methods first generate poster backgrounds and then render texts. (c) Visualization results demonstrate that PosterMaker can generate harmonious and aesthetically pleasing posters with accurate texts and maintain subject fidelity. + +![](images/e6705e40e73fa87f1df270a6d2380df0e9a7efa0f5d05b1e5a0f2a835d460f14.jpg) +(a) + +![](images/dfbca06eace34b9928322d5af9014b4a18479b600dfa3a56f5147f507be663cf.jpg) + +![](images/8860ac0793d8245add147225b1af9376c5b05f22070e3ad2315d5b0d55f1fb48.jpg) + +![](images/d57e8b3cd2f71d6889bca5f2899b15a010f1cd79ba9d11b597c7b10a57738729.jpg) +(c) + +![](images/76fb2ebcebf1e1c35721cea4542b4bfcb7234470893d55ce6c3bf73c0f43ce28.jpg) + +![](images/b86755819730d39a871b328f8d3b51e66352c042590e7172bf79246b96c76c85.jpg) + +![](images/a37a51ba7e3c0388312024f2864b9a532c649ad3b79c31574d200575e0d7adf1.jpg) + +# Abstract + +Product posters, which integrate subject, scene, and text, are crucial promotional tools for attracting customers. Cre + +ating such posters using modern image generation methods is valuable, while the main challenge lies in accurately rendering text, especially for complex writing systems like Chinese, which contains over 10,000 individual characters. In this work, we identify the key to precise text rendering + +as constructing a character-discriminative visual feature as a control signal. Based on this insight, we propose a robust character-wise representation as control and we develop TextRenderNet, which achieves a high text rendering accuracy of over $90\%$ . Another challenge in poster generation is maintaining the fidelity of user-specific products. We address this by introducing SceneGenNet, an inpainting-based model, and propose subject fidelity feedback learning to further enhance fidelity. Based on TextRenderNet and SceneGenNet, we present PosterMaker, an end-to-end generation framework. To optimize PosterMaker efficiently, we implement a two-stage training strategy that decouples text rendering and background generation learning. Experimental results show that PosterMaker outperforms existing baselines by a remarkable margin, which demonstrates its effectiveness. + +# 1. Introduction + +Product posters, which showcase items for sale within well-chosen background scenes and include descriptive text, play a vital role in e-commerce advertising by capturing customers' attention and boosting sales. Creating such posters necessitates photographing the product in carefully selected environments that highlight its features, as well as thoughtfully choosing text colors and fonts to ensure that the text is appealing, legible, and harmonious with the background. This process can be quite expensive. With the significant advancements in large-scale text-to-image (T2I) models [13, 35, 39], synthesizing such product posters with image generation models attracts increasing attention. In this paper, we focus on the product poster generation task. Specifically, given a prompt describing the background scene, the foreground image of the user-specified subject and some texts together with their layouts, we aim to develop a model to generate the subject into the desired scene background and accurately render the text in an end-to-end manner (as shown in Fig. 1 (a)). + +A straightforward solution for this task is to first generate the subject into the desired scene [2, 11, 40], and then predict the text attributes (such as color and font) [14, 23] and render them on the image. However, such two-stage approach suffers from disharmony between the text and the poster background(as shown in Fig. 2 (b)). And collecting training data is also challenging since the text attributes, especially the text font, are difficult to extract from the poster. Another solution is learning to generate the poster using a per-pixel synthesis approach, which can benefit from directly learning the distribution of professionally designed posters. We focus on such one-stage solution. The main challenge is how to ensure the text rendering accuracy. + +Many recent works [13, 25, 42, 49] have been proposed to improve the text rendering accuracy for large diffusion models. Great progress has been made and some + +recent work can achieve high rendering accuracy for English. However, for non-Latin languages like Chinese, one of the most widely spoken languages, achieving high rendering accuracy remains challenging. This difficulty stems from the existence of over 10,000 characters, with Chinese characters characterized by complex and diverse stroke patterns, making it extremely difficult to train a model to memorize the rendering of each individual character. Recent studies [4, 28, 42] have focused on extracting visual features of text as control signals. Typically, these approaches render text lines into glyph images and extract line-level text visual features to guide generation. + +Nevertheless, line-level visual features often lack the discriminative power to capture character-level visual nuances. To address this limitation, GlyphByT5 [25, 26] introduced a box-level contrastive loss with sophisticated glyph augmentation strategies to enhance character-level discriminativeness, achieving promising results. In this paper, we point out that the key to high-accuracy text rendering lies in constructing character-discriminative visual features as control signals. Specifically, we render each character as a glyph image and extract visual features via a visual encoder. These features are then concatenated with positional embeddings to form a character-level representation. Then we propose TextRenderNet, an SD3 [13] controlnet-like [53] architecture that takes the character-level representation as the control signal to render visual text. Our experiments demonstrate that the proposed character-level representation is effectively capable of achieving accurate text rendering. + +In the task of poster generation, another important thing is to generate the user-specific subject into a desired scene while keeping high subject fidelity. Recent subject-driven controllable generation methods [40, 44, 51] can synthesize such images, but they still cannot ensure that the user-specified subject is completely consistent in the generated details (e.g., the logo on the product may be inaccurately generated), which could potentially mislead customers. Therefore, we follow poster generation methods [5, 11, 22] to address this task via introducing an inpainting-based module named SceneGenNet. However, we found that even using inpainting methods, subject consistency is not always achieved as the inpainting model sometimes extends the subject shape (as shown in Fig. 2 (a)). Similar phenomenon is also observed in [11, 12]. To address this issue, we elaboratively develop a detector to detect the foreground extension cases. Then we employ the detector as a reward model to train the SceneGenNet via feedback learning for further improving subject fidelity. + +Combining the proposed TextRenderNet and SceneGenNet, we develop a framework named PosterMaker that can synthesize the product poster in an end-to-end manner. To efficiently optimize PosterMaker, we introduce a two-stage + +![](images/cc4fa3e524b81f1d124f3a83bd3a2d6b24e7b0990c9c1eab0e4379791445aebf.jpg) +User-Specified Texts +Figure 2. The illustration of the three challenges faced by poster generation, which seriously hinder the practical application. + +![](images/cc26ae10209f45c45f172ef4c0e2705be1dbb550c1fbbfc2c45f08dbbb262431.jpg) +User-Specified Subject + +![](images/e14fa0fb52af6019d2f7fd749129efe78e4f649029e674de79c8f0fa553d9312.jpg) +(a) Foreground Extension + +![](images/25b6e838f24b2f49dde5dbd24af262e442a4c225a7be8405132847f005181a21.jpg) +(b)Text-Scene Disharmony + +![](images/26a7703ae2cdb9597f122eb7e7605316ed457d62788d2b67698dee17ecbe5f74.jpg) +(c) Poor Text Rendering + +![](images/88324587a54ed90f04e0beebb66bcd3330c40f445ac07add6ffd62a1ee4555da.jpg) +Our Result + +training strategy to separately train TextRenderNet and SceneGenNet. This training strategy decouples the learning of text rendering and background image generation, thus TextRenderNet and SceneGenNet can focus on their specific tasks. Qualitative results (as shown in Fig. 1 (c)) demonstrate our training strategy is effective for training PosterMaker and it achieves promising poster generation results. + +To summarize, our contributions are as follows: + +- We proposed a novel framework named PosterMaker, which mainly consists of a TextRenderNet and a SceneGenNet. With a two-stage training strategy, PosterMaker can synthesis aesthetically product posters with texts accurately and harmoniously rendered on it. +- We reveal the core of achieving accurate Chinese text rendering is to construct a robust character-level text representation as the control condition. These findings can inspire future research on improving the text rendering abilities of T2I models. +- We improve the subject fidelity via subject fidelity feedback learning, which is shown effective in addressing the subject inconsistency issue. + +# 2. Related Work + +# 2.1. Poster Generation + +Generating posters involves combining various elements like a subject image, a background scene image, and text to ensure the subject and text are prominently and accurately displayed while maintaining an appealing look. Automating this process is quite complex and challenging. Methods like AutoPoster [23], Prompt2Poster [45], and COLE [16] break it down into stages: creating images and layout, predicting the visual properties of text, and rendering the poster. These approaches have several steps and often struggle to precisely obtain all the necessary visual attributes like font and color gradients. With the emergence of more advanced generative models [35], methods like JoyType [19], Glyphbyt5 [25], and GlyphDraw2 [28] can directly generate the image and text together at the pixel level based on the poster prompt, text content, and layout. This more streamlined approach can leverage more readily available poster pixel data for training, but there is still room for improvement in terms of the overall poster cohesion and text accuracy. Our method is also a one-stage, direct pixel-level generation approach that simultaneously creates the image and + +text. However, our focus is on generating posters for a given product subject, where the input includes the subject image, prompt, text content, and layout. In addition to considering text rendering accuracy and overall poster harmony, we also need to maintain the fidelity of the product. + +# 2.2. Visual Text Rendering + +Recently, text-to-image (T2I) models [1, 13, 41] have made significant strides in enhancing English text rendering by introducing stronger text encoders, such as T5 [38]. However, multilingual text image generation still faces significant challenges due to the large number of non-Latin characters and complex stroke structures. Early work [49] has explored the ControlNet-based method [53], using low-level visual images such as glyph images as the control signal for text image generation. However, glyph images are easily affected by text size and shape, especially complex stroke details. Besides, some recent works [4, 27, 28, 42, 52, 55] utilize more robust visual features, such as line-level OCR features as control conditions to further improve the text accuracy. But the line-level visual features still perform poorly in representing stroke details for each character. To address this issue, GlyphByT5 [25, 26] proposes a method with box-level contrastive learning to align the text features extracted from the language model with the features extracted from the visual encoder. To effectively learn such alignment, GlyphByT5 relies on collecting massive amounts of data and developing complex data augmentation strategies for the alignment pre-training, which lacks flexibility. In contrast, in this paper, we reveal that the key to high-accuracy text rendering lies in constructing discriminative character-level visual features. Thus we propose a plug-and-play and robust character-level text representation derived from off-the-shelf OCR encoders, which can accurately represent the visual structure of the text without additional training and enable precise text rendering. + +# 2.3. Subject-Preserved Scene Generation + +To create a scene image with a product subject while ensuring subject fidelity, two main methods are commonly used. One is the subject-driven method [3, 6, 20, 36, 40], which adjusts the position, angle and lighting of the subject based on the prompt to create a harmonious image. However, it often struggles to preserve the significant features of the subject. The other utilizes inpainting-based background com + +![](images/8b46bbeabdcde0195c3c0ba74ad57a536fc5d0d9fa001be5a351e582462f2f21.jpg) +Figure 3. The framework of the PosterMaker, which is based on the SD3. To precisely generate multilingual texts and create aesthetically pleasing poster scenes, TextRenderNet and SenceGenNet are introduced, whose outputs are used as control conditions added to the SD3. + +pletion techniques [2, 11, 43]. It only generates the non-subject areas of an image and naturally keeps consistency in the original subject area. But it sometimes extends the foreground subject [11, 12], such as adding an extra handle to a cup, which also reduces subject fidelity. To maximize subject fidelity, our method uses background completion and a reward model to determine whether the foreground extension occurred, thereby enhancing subject fidelity. + +# 3. Method + +# 3.1. Problem Formulation + +This paper focuses on the creation of product posters, which typically consist of multiple elements such as text, subjects, and scenes, as illustrated in Fig. 1 (a). The central challenge of this task is to generate these elements accurately and harmoniously, offering both research and practical applications. The task is defined as: + +$$ +I _ {g} = f \left(I _ {s}, M _ {s}, T, P\right), \tag {1} +$$ + +where $I_{g}$ denotes the generated poster image, $I_{s}$ represents the subject image, and $M_{s}$ is the subject mask. The variable $T$ signifies the content and the position of text and $P$ is the prompt describing the background scene. Subsequent sections will detail the design of PosterMaker, and our proposed solution to this task. + +# 3.2. Framework + +As shown in Fig. 3, PosterMaker is developed based on Stable Diffusion 3 (SD3) [13], which contains a strong VAE for reconstructing the image details like text stroke. And we propose two modules, i.e., TextRenderNet and SceneGenNet, to address the poster generation task. TextRenderNet is specifically designed to learn visual text rendering, taking character-level visual text representations as input to achieve precise and controllable text rendering. SceneGenNet, on the other hand, accepts a masked image (indicating which content should remain unchanged) and a prompt, learning to generate the foreground subject within the desired scene described by the prompt. Both TextRenderNet + +![](images/f8b790dd38d0e52b2ec90ca9820adac388e4331906f3563714af637d4a26fe5d.jpg) +Figure 4. The details of TextRenderNet and SceneGenNet, showcasing their model architectures and their interactions with SD3. + +and SceneGenNet are grounded in a ControlNet-like [52] architecture derived from SD3 and their architectures are detailed in Fig. 4. They share the same internal structure, comprising several cascaded MM-DiT blocks [13], with weights copied from the base model for initialization. The output of each MM-DiT block is added to the corresponding block of the base model after passing through a zero convolution layer [53]. The key distinction between the two modules lies in their input configurations. SceneGenNet takes the prompt as input to the text condition branch, and for the visual branch, the input is derived by the latent feature at timestep $t$ , the subject mask, and the masked latent to preserve the foreground area. In contrast, TextRenderNet receives text representations (detailed in the next section) in the text condition branch for text rendering. An adapter, consisting of a linear layer and layer normalization, adjusts the feature dimensions of these text representations before they are input to TextRenderNet. The outputs of each block in TextRenderNet and SceneGenNet are directly added to the corresponding block outputs of the SD3 base model. + +# 3.3. Character-level Visual Representation for Precise Text Rendering + +Recently, some works have explored multilingual visual text generation. Among them, a promising approach is based on ControlNet-like methods [42], which utilize both glyph images and line-level OCR features as conditions. + +![](images/4d77d17c9b67ac938b6d26b556a4f1a609b222eff4de3596b373fda50aac6eab.jpg) +Figure 5. The distinction between the multilingual character-level text representation we proposed and the line-level methods of previous works like AnyText [42] and GlyphDraw2 [28]. + +However, this control information cannot accurately represent characters: 1) glyph images are easily affected by text size and shape, making them less robust. 2) line-level visual features lack fine-grained stroke features and are limited by the OCR model's poor capability to recognize long texts. To address these challenges, this paper proposes a plug-and-play and robust character-level text representation, where each character is precisely represented by one token. + +Specifically, the text $C$ has $n$ characters. For each character $c_{i}$ , its feature is separately extracted by a pre-trained OCR encoder $f_{v}$ and then averaged and pooled to obtain a compact character representation vector $r_{c_i} \in \mathbb{R}^c$ . Thus, the character-level text representation is defined as follows: + +$$ +r _ {c i} = \operatorname {a v g p o o l} \left(f _ {v} \left(I _ {c i}\right)\right), \tag {2} +$$ + +$$ +R _ {c} = \left[ r _ {c _ {1}}, r _ {c _ {2}}, \dots , r _ {c _ {n}} \right], \tag {3} +$$ + +where $I_{c_i}$ is the $i$ -th character image rendered in a fixed font, and $R_{c} \in \mathbb{R}^{n \times c}$ is the char-level text representation. + +As shown in Fig. 5, compared to previous methods, our key difference is extracting representations from character glyph images. This enables the model to perceive character stroke structures and achieve high text accuracy. Additionally, since the number of characters is fixed, we can pre-extract the representations of each character and store them in a dictionary, eliminating the need for online rendering and feature extraction. This significantly simplifies the training and inference pipeline. + +Finally, this text representation lacks order and positional information. Thus, the character order encoding $P_{rank}$ is introduced to represent the order of characters in the text, which is implemented through a sinusoidal position encoding of the char order. Besides, inspired by GLIGEN [21], the text position coordinates are mapped to sinusoidal position encoding $P_{bbox}$ to control the position of the text. Then we concatenate $P_{rank}$ , $P_{bbox}$ and $R_c$ along the feature dimension to construct the final text representation. + +# 3.4. Improving Subject Fidelity + +In the task of generating product posters, it is crucial to maintain subject fidelity, i.e., ensuring that the subject in the generated poster remains consistent with the user-specified subject. To achieve this goal, we employ SceneGenNet to perform background inpainting, which is trained to precisely preserve the foreground subject and only inpaint the background according to the prompt. However, inpainting-based models sometimes extend the foreground subject into + +![](images/7b1ab2581a2637e361192dadf19b149350be83c2f5e81ad624f504a04833ae50.jpg) +Figure 6. The model details of the foreground extension detector. + +![](images/cba135ab6536ff93c30644eef668d3f30af7317c5bf63b7a1b66f5d7419fe1cf.jpg) + +![](images/ac592ba23bddb135c0915fc0128194478bb41293ec4e39dc475919d75340602d.jpg) +Figure 7. The illustration of our two-stage training strategy for efficiently optimizing PosterMaker. + +another subject (as shown in Fig. 2 (b)) thereby compromising subject fidelity. We refer to this as "foreground extension". To mitigate this issue, we develop a model to detect foreground extension and employ it as a reward model to fine-tune PosterMaker to improve subject fidelity. + +Foreground Extension Detector. We develop the foreground extension detector $S_{\theta}$ based on HQ-SAM [17]. As shown in Fig. 6, we input the generated image $I_{g}$ to SAM [18] image encoder. The subject mask $M_{s}$ and box $B_{s}$ are provided as mask prompt and box prompt, respectively, to the HQ-SAM decoder to obtain an intermediate mask $M_{i}$ . Next, we concatenate the image features extracted from SAM encoder with $M_{s}$ , $M_{i}$ and $M_{s} - M_{i}$ at the channel dimension. The concatenated features are processed through convolutional layers and MLP layers to predict whether the foreground has been extended in the generated image. We collected 20k manually annotated images to train the foreground extension detector $S_{\theta}$ . + +Subject Fidelity Feedback Learning. The foreground extension detector $S_{\theta}$ , after the offline training, is used as a reward model to supervise PosterMaker to improve subject fidelity. Specifically, assuming the reverse process has a total of $T'$ steps, we follow ReFL [47] to first sample $z_{T'} \sim \mathcal{N}(0,1)$ and after $T' - t'$ steps of inference $(z_{T'} \rightarrow z_{T'-1} \rightarrow \dots \rightarrow z_{t'})$ , we obtain $z_{t'}$ , where $t' \sim [1, t_1]$ . Then, we directly perform a one-step inference $z_{t'} \rightarrow z_0$ to accelerate the reverse process. Furthermore, $z_0$ is decoded to the generated image $x_0$ . The detector $S_{\theta}$ predicts the foreground extension score for $x_0$ , and this score is used as the reward loss to optimize the generator $G_{\phi}$ (i.e., PostMaker). The reward loss is defined as follows: + +$$ +\begin{array}{l} \mathcal {L} _ {\text {r e w a r d}} (\phi) = - \mathbb {E} _ {(x, c, m) \sim \mathcal {D} _ {\text {t r a i n}}, t ^ {\prime} \sim [ 1, t _ {1} ], z _ {T ^ {\prime}} \sim \mathcal {N} (0, 1)} \\ \log \sigma \left(1 - S _ {\theta} \left(G _ {\phi} \left(z _ {T ^ {\prime}}, x, c, m, t ^ {\prime}\right), m\right)\right), \tag {4} \\ \end{array} +$$ + +![](images/4aaf4cadc0bd2e76d849fa02a6386076ba7a57a8b840bee0a63daad304225b64.jpg) +Figure 8. Qualitative comparison with different methods. Best viewed on Screen. To aid comprehension, Chinese text lines in the image are translated into English and annotated using corresponding colors. + +where $x, c, m$ sampled from the train data $\mathcal{D}_{\mathrm{train}}$ , represent the subject image, control conditions, and subject mask respectively. To avoid overfitting, we don't calculate reward loss for the cases where the foreground extension score is below 0.3. Our total training loss is defined as: + +$$ +\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\text {d e n o i s e}} + \lambda \mathcal {L} _ {\text {r e w a r d}}, \tag {5} +$$ + +where $\lambda$ is the hyperparameter to adjust the weight of reward loss and the denoise loss. + +# 3.5. Training Strategy + +To efficiently train PosterMaker, this paper introduces a two-stage training strategy, as shown in Fig. 7, aimed at decoupling the learning for text rendering and background image generation. Specifically, in the first stage, the training task is local text editing. We freeze SceneGenNet and only the TextRenderNet and adapter are optimized. Since we initialize SceneGenNet with pre-trained weights of inpainting-controlnet [7], it can fill the local background well thus TextRenderNet can focus on learning text generation. In the second stage, the training task is subject-based text-to-image generation. Here we froze TextRenderNet and only train the SceneGenNet. In this stage, SceneGenNet focuses on learning poster scenes and creative design from the train data. Notably, Stage 1 learns local text editing/inpainting and Stage 2 learns background inpainting, thus the input images indicating the area to inpaint are different (See Fig. 7). With such a two-stage training strategy, TextRenderNet and SceneGenNet can be efficiently optimized since they can focus on their specific tasks. + +# 4. Experiments + +# 4.1. Experimental Setup + +Dataset. We crawl product posters from online e-commerce platforms to construct our training set. Our training data mainly consists of Chinese posters, we first employ PPOCRv4 model [34] to extract the text content and their bounding boxes from the images as a coarse annotation. And we ask some annotators to further refine the bounding boxes and correct the text content to improve the annotation quality. Resulting in a dataset containing 160k images. We generate image captions with GPT4-o [32] and extract foreground subject masks with $\mathrm{U}^2$ -Net [37] and VitMatte [50]. We randomly select 302 images for evaluation and leave the rest for training. To better evaluate the performance of our method, we use LLM [10] to generate some background prompts and text layouts as evaluation samples, after manually checking and removing those irrational ones, we obtain another 198 evaluation samples to form a final evaluation set named PosterBenchmark containing 500 samples. + +Evaluation Metrics. We follow Anytext [42] to evaluate text rendering accuracy using two metrics: sentence accuracy (Sen. Acc) and normalized edit distance (NED). Specifically, we crop the text line from the generated image according to the provided bounding box and utilize the OCR model [31] to predict the content $s_{\mathrm{pred}}$ of the generated text line. We denote the ground truth text content as $s_{\mathrm{gt}}$ . A text line is considered to be correctly generated if $s_{\mathrm{pred}} = s_{\mathrm{gt}}$ ; this condition is used to calculate Sen. Acc. Additionally, we compute the normalized edit distance (NED) between $s_{\mathrm{pred}}$ and $s_{\mathrm{gt}}$ to measure their similarity. We further calculate FID [15] to measure the visual quality and CLIP-T [40] metric for evaluating text-image alignment. + +![](images/feef9c7b9f80eb27c54a517336ee0da7210d0f42e9a0a3a0f5135fc86b1da784.jpg) +Figure 9. Qualitative comparison using various text features. It is obvious that the character-level OCR features we used (PPOCR Char) are the most effective at maintaining character accuracy. + +Implementation Details. Our SceneGenNet is initialized from pre-trained SD3 Inpainting-Controlnet [7] and TextRenderNet is initialized from SD3 [13] weight with the same configuration as in [8]. For Subject Fidelity Feedback Learning, we follow existing work [47] to uniformly sample $t'$ between [1, 10]. Within this range, the one-step inference result of image $x_0$ from $t'$ is close to the full inference result. The weight coefficient of $\lambda$ is set to 0.0005. The learning rate is set to 1e-4 and the batch size is set to 192. We train our framework for 26k and 29.5k steps for training stage1 and stage2, respectively. Finally, PosterMaker was trained on 32 A100 GPUs for 3 days. During the sampling process, based on the statistical information, a maximum of 7 lines of text and 16 characters per line of text are selected from each image to render onto the image, as this setting can cover most situations in the dataset. + +# 4.2. Comparison with Prior Works + +Baseline methods. We carefully designed the following baseline approaches based on existing open-sourced techniques for comparative analysis. SD3_inpaint_byt5: We encode the text content into prompt embeddings using ByT5 [48] and employ an adapter to map these embeddings to the original prompt embedding space of SD3 before feeding them into the controlnet, which enables the controlnet to render multilingual text. SD3_canny&inpaint: First render the text into a white-background image and extract the canny edge from it as control. Then finetune a pre-trained SD3 canny controlnet together with an inpainting controlnet to achieve multilingual text rendering. Anytext: It is the SOTA open-sourced T2I method that supports multilin + +
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑FG Ext. Ratio ↓
SD3 inpaint_AnyText52.78%75.27%100.8726.9014.82%
SD3 inpaint_byt552.28%86.57%65.4526.7114.60%
AnyText63.90%82.81%71.2726.6919.25%
Glyph-ByT5-v269.54%87.65%79.2326.6018.91%
SD3_canny&inpaint80.75%92.75%67.1927.0314.38%
GlyphDraw286.14%96.78%72.4926.7216.52%
GT (w/ SD1.5 Rec.)76.95%89.91%---
GT (w/ SD3 Rec.)98.09%99.36%---
GT98.53%99.59%---
Ours (SD1.5)72.12%88.01%68.1726.93-
Ours93.36%98.39%65.3527.0411.57%
+ +Table 1. Comparison with baseline methods. + +gual text rendering and its text editing mode supports text inpainting [42]. So we directly finetune it on our data using its text editing training pipeline. SD3_inpaint_Anytext: First generate the background with SD3 inpainting control-net, then render the text on the corresponding region using Anytext. Glyph-ByT5-v2 and GlyphDraw2: They are both the SOTA T2I methods that support multilingual text rendering [26, 28]. However, they don't have open-sourced pre-trained weights, so we reproduced them on our dataset. And we added an inpainting controlnet for them to support subject-preserved generation. + +Quantitative Comparison. We trained all baseline models on the same dataset, and then quantitatively compared all methods on the PosterBenchmark, as shown in Tab. 1. It is worth noting that SD3 is used as the base model by default, but since we observed that the SD1.5 VAE leads to significant error in reconstruction, to enable a more equitable comparison between our method and AnyText (SD1.5 architecture), we also implemented an SD1.5 version of PosterMaker with the same experimental setup as AnyText. As the VAEs, especially SD1.5, introduce some reconstruction error and the OCR model may incorrectly recognize some characters, we also report the metrics on ground truth + +
Text FeatureTypeSen. ACCNED
ByT5textual feat.33.48%54.50%
Cannyimg81.50%92.72%
TrOCR Linevisual feat.26.58%49.46%
TrOCR Charvisual feat.94.27%98.54%
PPOCR Linevisual feat.38.91%53.86%
PPOCR Char (Ours)visual feat.95.15%98.75%
GT (w/o Rec.)-98.53%99.59%
GT (w/ SD3 Rec.)-98.09%99.36%
+ +Table 2. Quantitative comparison using various text features. + +
MethodFG Ext. Ratio↓Sen. ACC ↑NED↑FID↓CLIP-T↑
Ours11.57%93.36%98.39%65.3527.04
Ours w/o Lreward15.05%93.11%98.21%65.1027.04
+ +Table 3. Evaluation on the subject fidelity feedback learning. + +images as an upper bound. As shown in Tab. 1, our method achieves the best performance on all metrics. Notably, on text rendering metrics Sen. ACC and NED, our model outperforms the baselines by an impressive margin and is already close to the upper bound. The promising results demonstrate the effectiveness of the proposed PosterMaker. Qualitative Comparison. The results are shown in Fig. 8. Compared to the baselines, our PosterMaker generates more readable and accurate poster images with texts, particularly for smaller texts. Notably, as an end-to-end generation method, PosterMaker automatically creates underlays to enhance the contrast between text and background, effectively highlighting the text. This feature is crucial in product poster design for capturing viewers' attention. These findings demonstrate that our PosterMaker successfully learns the distribution of posters created by human designers. + +# 4.3. Ablation Study and Analysis + +How to achieve high text rendering accuracy? We conduct experiments to explore the effectiveness of different control conditions for visual text rendering. Due to the fact that text rendering accuracy is primarily determined by the first training stage, we discard the second training stage in this experiment to save computational resources. The results are summarized in Tab. 2. We observed several valuable experimental results: 1) The use of char-level features significantly outperforms previous line-level features, benefiting from finer-grained representation. This explains why previous methods [4, 28, 42], achieve inferior performance (PPOCR Line is used in [28, 42], TrOCR Line is used in [4]). Recent concurrent works [29, 46] have also found similar experimental findings as ours. 2) Char-level feature representation is superior to low-level image features such as Canny. 3) PPOCR outperforms TrOCR, which is attributed to PPOCR being a multi-language OCR model, while TrOCR is an English version model. 4) Even though TrOCR has not been trained on multi-language text data, it still achieves decent results, likely because it extracts universal visual structural features. 5) ByT5 extracts char-level features but the performance is inferior to OCR features, because it extracts semantic features rather than character structural features, while T2I models' text rendering + +![](images/c241b8495f20c789e14637a4732870b94dd037c3ecd9e058762ced2a4aab2709.jpg) + +![](images/213ffce2dd7c950b4a730fb1079c8eecdbe8ab5edf2fd70d17dc6203496226e0.jpg) +Figure 10. Visual examples showing the effect of $\mathcal{L}_{reward}$ . + +capability relies more on character structural features. We present visualization results in Fig. 9. We observe that when using line-level features as a control, the generated text occasionally becomes completely unrecognizable. This suggests that line-level features are insufficient for achieving precise text rendering. Additionally, it is evident that using canny control always introduces stroke artifacts, particularly in smaller texts (as seen in row 3 of Fig. 9). This further demonstrates that canny control is also not an ideal condition for text rendering. In summary, the char-level feature extracted by PPOCR performs optimally and the accuracy is already close to the upper bound, indicating the discriminative char-level visual feature is the key to achieve high text rendering accuracy. + +Effectiveness of subject fidelity feedback learning. We calculate the foreground extension ratio (termed as FG Ext. Ratio) by asking human annotators to manually check each generated image whether the foreground subject is incorrectly extended. As demonstrated in Tab. 3, training our model with $\mathcal{L}_{reward}$ effectively reduces FG Ext. Ratio by $3.4\%$ , while maintaining subtle variations in other performance metrics. Representative visual examples are presented in Fig. 10. Besides, our model outperforms baseline methods in FG Ext. Ratio (see Tab. 1). These results show the efficacy of our proposed subject fidelity feedback learning approach in mitigating foreground extension artifacts. + +# 5. Conclusion + +The application of image generation in poster creation is often impeded by subpar text rendering and inconsistent subjects. To address these challenges, this paper introduces a novel framework, PosterMaker, which synthesizes aesthetically pleasing product posters with accurate and harmonious texts and contents. Moreover, we reveal that the key underlying successful multilingual text rendering is the construction of robust character-level visual text representations. Additionally, we propose subject fidelity feedback learning to mitigate inconsistencies in subjects. Through extensive experiments, our method demonstrates a significant improvement in both high-precision text generation and subject fidelity. These findings not only advance poster generation but also inspire future research on T2I models. + +# Acknowledgments + +This work was supported by the National Nature Science Foundation of China (62425114, 62121002, U23B2028, 62232006, 62272436) and Alibaba Group (Alibaba Research Intern Program). + +# References + +[1] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Qinsheng Zhang, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, et al. ediff-i: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 3 +[2] Tingfeng Cao, Junsheng Kong, Xue Zhao, Wenqing Yao, Junwei Ding, Jinhui Zhu, and Jiandong Zhang. Product2img: Prompt-free e-commerce product background generation with diffusion model and self-improved LMM. In Proceedings of the 32nd ACM International Conference on Multimedia, MM 2024, Melbourne, VIC, Australia, 28 October 2024 - 1 November 2024, pages 10774-10783. ACM, 2024. 2, 4 +[3] Kelvin C. K. Chan, Yang Zhao, Xuhui Jia, Ming-Hsuan Yang, and Huisheng Wang. Improving subject-driven image synthesis with subject-agnostic guidance. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2024, Seattle, WA, USA, June 16-22, 2024, pages 6733-6742. IEEE, 2024. 3 +[4] Haoxing Chen, Zhuoer Xu, Zhangxuan Gu, Jun Lan, Xing Zheng, Yaohui Li, Changhua Meng, Huijia Zhu, and Weiqiang Wang. Diffuse: Universal text editing diffusion model. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 2, 3, 8 +[5] Ruidong Chen, Lanjun Wang, Weizhi Nie, Yongdong Zhang, and An-An Liu. Anyscene: Customized image synthesis with composited foreground. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8724-8733, 2024. 2 +[6] Wenhu Chen, Hexiang Hu, Yandong Li, Nataniel Ruiz, Xuhui Jia, Ming-Wei Chang, and William W. Cohen. Subject-driven text-to-image generation via apprenticeship learning. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 3 +[7] Alimama Creative. Sd3-controlnet-inpainting. https://huggingface.co/alamama-creative/SD3-Controlnet-Inpainting, 2024.6,7,2,4 +[8] Alimama Creative. Sd3-controlnet-softedge. https://huggingface.co/alamama-creative/SD3-Controlnet-Softedge, 2024.7, 2 +[9] Alimama Creative. Ecomxl-controlnet-inpaint. https://huggingface.co/alimama-creative/EcomXL_controlnet_inpaint, 2024.2 +[10] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Xilin Wei, Songyang Zhang, Haodong + +Duan, Maosong Cao, Wenwei Zhang, Yining Li, Hang Yan, Yang Gao, Xinyue Zhang, Wei Li, Jingwen Li, Kai Chen, Conghui He, Xingcheng Zhang, Yu Qiao, Dahua Lin, and Jiaqi Wang. Internlm-xcomposer2: Mastering free-form text-image composition and comprehension in vision-language large model. arXiv preprint arXiv:2401.16420, 2024. 6, 1 +[11] Zhenbang Du, Wei Feng, Haohan Wang, Yaoyu Li, Jingsen Wang, Jian Li, Zheng Zhang, Jingjing Lv, Xin Zhu, Junsheng Jin, et al. Towards reliable advertising image generation using human feedback. In European Conference on Computer Vision, pages 399-415. Springer, 2024. 2, 4, 3 +[12] Amir Erfan Eshratifar, Joao V.B. Soares, Kapil Thadani, Shaunak Mishra, Mikhail Kuznetsov, Yueh-Ning Ku, and Paloma De Juan. Salient object-aware background generation using text-guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 7489-7499, 2024. 2, 4 +[13] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 2, 3, 4, 7, 1 +[14] Yifan Gao, Jinpeng Lin, Min Zhou, Chuanbin Liu, Hongtao Xie, Tiezheng Ge, and Yuning Jiang. Textpainter: Multimodal text image generation with visual-harmony and text-comprehension for poster design. In Proceedings of the 31st ACM International Conference on Multimedia, pages 7236-7246, 2023. 2 +[15] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 6626-6637, 2017. 6 +[16] Peidong Jia, Chenxuan Li, Yuhui Yuan, Zeyu Liu, Yichao Shen, Bohan Chen, Xingru Chen, Yinglin Zheng, Dong Chen, Ji Li, Xiaodong Xie, Shanghang Zhang, and Baining Guo. Cole: A hierarchical generation framework for multilayered and editable graphic design, 2024. 3 +[17] Lei Ke, Mingqiao Ye, Martin Danelljan, Yifan liu, Yu-Wing Tai, Chi-Keung Tang, and Fisher Yu. Segment anything in high quality. In Advances in Neural Information Processing Systems, pages 29914–29934. Curran Associates, Inc., 2023. 5 +[18] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dollar, and Ross Girshick. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4015-4026, 2023. 5, 3 +[19] Chao Li, Chen Jiang, Xiaolong Liu, Jun Zhao, and Guoxin Wang. Joytype: A robust design for multilingual visual text creation. arXiv preprint arXiv:2409.17524, 2024. 3 +[20] Dongxu Li, Junnan Li, and Steven C. H. Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to + +image generation and editing. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 3 +[21] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee. Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22511-22521, 2023. 5 +[22] Zhaochen Li, Fengheng Li, Wei Feng, Honghe Zhu, An Liu, Yaoyu Li, Zheng Zhang, Jingjing Lv, Xin Zhu, Junjie Shen, et al. Planning and rendering: Towards end-to-end product poster generation. arXiv preprint arXiv:2312.08822, 2023. 2 +[23] Jinpeng Lin, Min Zhou, Ye Ma, Yifan Gao, Chenxi Fei, Yangjian Chen, Zhang Yu, and Tiezheng Ge. Autoposter: A highly automatic and content-aware design system for advertising poster generation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 1250–1260, 2023. 2, 3 +[24] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 1 +[25] Zeyu Liu, Weicong Liang, Zhanhao Liang, Chong Luo, Ji Li, Gao Huang, and Yuhui Yuan. Glyph-byt5: A customized text encoder for accurate visual text rendering. In European Conference on Computer Vision, pages 361-377. Springer, 2024. 2, 3 +[26] Zeyu Liu, Weicong Liang, Yiming Zhao, Bohan Chen, Ji Li, and Yuhui Yuan. Glyph-byt5-v2: A strong aesthetic baseline for accurate multilingual visual text rendering. arXiv preprint arXiv:2406.10208, 2024. 2, 3, 7 +[27] Zhiying Lu, Chuanbin Liu, Xiaojun Chang, Yongdong Zhang, and Hongtao Xie. Dhvt: Dynamic hybrid vision transformer for small dataset recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2025. 3 +[28] Jian Ma, Yonglin Deng, Chen Chen, Haonan Lu, and Zhenyu Yang. Glyphdraw2: Automatic generation of complex glyph posters with diffusion models and large language models. arXiv preprint arXiv:2407.02252, 2024. 2, 3, 5, 7, 8 +[29] Lichen Ma, Tiezhu Yue, Pei Fu, Yujie Zhong, Kai Zhou, Xiaoming Wei, and Jie Hu. Chargen: High accurate character-level visual text generation model with multimodal encoder. arXiv preprint arXiv:2412.17225, 2024. 8 +[30] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. SDEdit: Guided image synthesis and editing with stochastic differential equations. In International Conference on Learning Representations, 2022. 4 +[31] ModelScope. https://modelscope.cn/models/damo/cv_convnextTinyOCR-recognition-general_damo/summary,2023.6 +[32] OpenAI. https://openai.com/index/hello-gpt-4o/, 2024.6 +[33] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. + +Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 3 +[34] PaddlePaddle. https://github.com/PaddlePaddle/PaddleOCR, 2023.6,2,3 +[35] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. 2, 3 +[36] Tianhao Qi, Shancheng Fang, Yanze Wu, Hongtao Xie, Jiawei Liu, Lang Chen, Qian He, and Yongdong Zhang. Deadiff: An efficient stylization diffusion model with disentangled representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8693-8702, 2024. 3 +[37] Xuebin Qin, Zichen Zhang, Chenyang Huang, Masood Dehghan, Osmar Zaiane, and Martin Jagersand. U2-net: Going deeper with nested u-structure for salient object detection. page 107404, 2020. 6 +[38] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and PeterJ. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv: Learning, arXiv: Learning, 2019. 3 +[39] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 10674-10685. IEEE, 2022. 2 +[40] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22500-22510, 2023. 2, 3, 6 +[41] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022. 3 +[42] Yuxiang Tuo, Wangmeng Xiang, Jun-Yan He, Yifeng Geng, and Xuansong Xie. Anytext: Multilingual visual text generation and editing. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. 2, 3, 4, 5, 6, 7, 8 +[43] Haohan Wang, Wei Feng, Yaoyu Li, Zheng Zhang, Jingjing Lv, Junjie Shen, Zhangang Lin, and Jingping Shao. Generate e-commerce product background by integrating category commonality and personalized style. In ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2025. 4 +[44] Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, and Anthony Chen. Instantid: Zero-shot identity-preserving gener + +ation in seconds. arXiv preprint arXiv:2401.07519, 2024. 2 +[45] Shaodong Wang, Yunyang Ge, Liuhan Chen, Haiyang Zhou, Qian Wang, Xinhua Cheng, and Li Yuan. Prompt2poster: Automatically artistic chinese poster creation from prompt only. In Proceedings of the 32nd ACM International Conference on Multimedia, MM 2024, Melbourne, VIC, Australia, 28 October 2024 - 1 November 2024, pages 10716-10724. ACM, 2024. 3 +[46] Tong Wang, Xiaochao Qu, and Ting Liu. Textmastero: Mastering high-quality scene text editing in diverse languages and styles. arXiv preprint arXiv:2408.10623, 2024. 8 +[47] Jiazheng Xu, Xiao Liu, Yuchen Wu, Yuxuan Tong, Qinkai Li, Ming Ding, Jie Tang, and Yuxiao Dong. Imagereward: Learning and evaluating human preferences for text-to-image generation. In Advances in Neural Information Processing Systems, pages 15903-15935. Curran Associates, Inc., 2023. 5, 7, 1 +[48] Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, and Colin Raffel. ByT5: Towards a token-free future with pre-trained byte-to-byte models. Transactions of the Association for Computational Linguistics, 10:291-306, 2022. 7, 3 +[49] Yukang Yang, Dongnan Gui, Yuhui Yuan, Weicong Liang, Haisong Ding, Han Hu, and Kai Chen. Glyphcontrol: Glyph conditional control for visual text generation. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 2, 3 +[50] Jingfeng Yao, Xinggang Wang, Shusheng Yang, and Baoyuan Wang. Vitmatte: Boosting image matting with pretrained plain vision transformers. Information Fusion, 103: 102091, 2024. 6 +[51] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 2 +[52] Boqiang Zhang, Zuan Gao, Yadong Qu, and Hongtao Xie. How control information influences multilingual text image generation and editing? arXiv preprint arXiv:2407.11502, 2024. 3, 4 +[53] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 2, 3, 4 +[54] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In Computer Vision and Pattern Recognition, 2016. 4 +[55] Yuanzhi Zhu, Jiawei Liu, Feiyu Gao, Wenyu Liu, Xinggang Wang, Peng Wang, Fei Huang, Cong Yao, and Zhibo Yang. Visual text generation in the wild. In European Conference on Computer Vision, pages 89-106. Springer, 2024. 3 + +# PosterMaker: Towards High-Quality Product Poster Generation with Accurate Text Rendering + +Supplementary Material + +Due to space limitations, we were unable to present all experimental results in the main text. In this supplementary material, we will give more details about our experiments and present additional results. + +# 6. Implementation Details + +Training and Inference. We fully follow the settings of SD3 [13]. During training, the denoise loss $\mathcal{L}_{\mathrm{denoise}}$ uses simplified flow matching, also known as 0-rectified flow matching loss [24]. In inference, we also use the inference method of flow matching, with 28 inference steps. + +TextRenderNet and SceneGenNet. TextRenderNet and SceneGenNet have an architecture similar to SD3 [13], composed of multiple MM-DiT Blocks. In our implementation, TextRenderNet consists of 12 layers of MM-DiT Blocks, while SceneGenNet consists of 23 layers of MM-DiT Blocks. The output of the $N_{i}$ -th block of SceneGenNet is first added with the output of the $\left\lceil \frac{N_i}{2} \right\rceil$ -th block of TextRenderNet, and then add to the $N_{i}$ -th SD3 block. + +Classifier-Free Guidance. We use CFG during inference, with a CFG scale of 5. Additionally, since the "prompt" inputted to TextRenderNet is not a caption but a text representation, the negative one for CFG is set to a zero vector. During training, we randomly drop the text representation to a zero vector with $10\%$ probability. + +The Setting of $t_1$ in Reward Loss. We follow [47] to train the reward loss at the last 10 inference steps, i.e., we set $t_1$ to 10. Within the range of $t' \sim [1, t_1]$ , the result of the image $x_0$ obtained by one-step inference is close to the result of complete inference. + +Details about Metric Calculation. Our evaluation benchmark contains samples generated by LLM [10] thus there is no ground truth for these samples. Therefore, we exclude these LLM-generated samples when calculating metrics that depend on ground truth images, i.e., FID metric for all experiments, text accuracy metrics for GT (with and without VAE reconstruction) and results for ablation on different text features. + +About ground truth for training Foreground Extension Detector. We treat the task of detecting foreground extension as a binary classification problem and ask annotators to manually label the ground truth. + +# 7. Baseline Details + +We carefully designed 6 baseline approaches based on existing techniques for comparative analysis. The de + +![](images/d6744c5d9caa6bc83f69bb57ecb1d9daafd4f9a7d94f1863a5c86d9575225be2.jpg) + +![](images/d83b09b24b46dfeee4c8c54a89e5dbf00003f3dc7e316e42c12abbfdcf799593.jpg) + +![](images/d7751a41607c78a3558de3d5bbf74fffe5b47dff69d5f6c0fb1aec1026926f20.jpg) + +![](images/efadff1d49693fc289e772cae52adb6dc2a92ba704dca7a62cfbaedaf7a7cadf.jpg) + +![](images/879af9b19d382fa98252e55347ac57b0349226e8c666c4bc696de805ba1b0522.jpg) + +![](images/235f3cdda602d017551430d3d9e0d84d734901b0f8a30523be01b46932a2c2dd.jpg) +Figure 11. Detailed illustration of the implementation of the different baseline methods. + +tails are shown in Fig. 11. For 1) SD3_inpaint_byt5, 2) SD3_canny&inpaint, and 4) AnyText, we fine-tune them on our 160K dataset for the poster generation task. Meanwhile, 3) SD3_inpaint_Anytext is a two-stage inference method. In the first stage, the pre-trained Inpaint ControlNet gener + +ates the background, and in the second stage, AnyText performs the text editing task, with AnyText also fine-tuned on the 160K dataset specifically for the text editing task. The Inpainting ControlNet is initialized from pre-trained SD3 Inpainting-ControlNet [7] and Canny ControlNet is initialized from [8]. For 5) GlyphDraw2 [28] and 6) Glyph-ByT5-v2 [26] are both the SOTA T2I methods that support multilingual text rendering. However, they neither have open-source pre-trained weights nor support subject input, so we reproduced them on our dataset by adding the pre-trained inpainting controlnet [9] to support the subject input. + +# 8. Scalable Training for Text Rendering + +Our proposed two-stage training strategy allows the model to learn two different capabilities (i.e., text rendering and scene generation) separately, enabling more flexibility with distinct datasets for each phase. Recent text rendering methods [4, 25, 26, 42] typically train their models on datasets containing millions of samples. To verify the potential of further improving our performance with more training data, we build a large dataset with 1 million samples and we directly obtain the text annotations with PPOCRv4 [34] without manually annotating. And we use this dataset for the first stage of text rendering training and use the same 160k data for the second stage of scene generation learning. Compared to using 160k data in both of the previous stages, the text sentence accuracy significantly improved by $4.48\%$ (as shown in Tab. 4), demonstrating that the multistage training strategy is flexible and scalable. However, in the main experiments, we select to report the performance of our model training only on 160k data for fair comparison with the baselines. + +
Data Size (St.1 & St.2)Sen. ACCNED
160k & 160k93.11%98.21%
1M & 160k97.59%99.38%
+ +# 9. Discussion on advantages of end-to-end over two-stage methods. + +The main weakness of two-stage methods (first inpaint background, then render text) is their inability to consistently provide a clean background for texts (see Fig. 12, reducing text readability, especially with complex backgrounds. In contrast, one-stage methods generate texts and backgrounds simultaneously, enabling them to create a clean backdrop or underlays that enhance text visibility. + +# 10. Text Position Control + +The position control of PosterMaker uses a very straightforward approach (as shown in Fig. 13), mapping the text + +![](images/1d93b0891749cf4e584bd0d8c2a0366ce2a33c0bed4b72a2ba222d7d57d42ce0.jpg) +Figure 12. Showcases for end-to-end and two-stage methods. + +![](images/7255cb2de3ecc45d951a2703e4df897c410c3a500d2cbea9e40d0630fb329c24.jpg) + +![](images/70b4fc4b9ea93367b5f8c88d331426b2da2ceffe70c8e631ce18d9d27c1b4f4c.jpg) + +Table 4. Quantitative comparison with different data sizes for text rendering training. + +
MethodmIoUIoU@0.5IoU@0.7
Ours84.65%97.18%93.94%
+ +Table 5. Evaluation on text location accuracy. + +bounding box to cosine position encoding, which is then concatenated with text features and used as the input to TextRenderNet. To demonstrate our method's effectiveness, we evaluate the bounding box IoU (Intersection of Union) metric as follows: 1) we employ OCR model to extract texts from the generated image. 2) For each ground truth text, we identify the best-matched OCR-detected text based on edit distance and then calculate the IoU between their corresponding bounding boxes. We average the IoU score over all the samples to obtain mean IoU (termed mIoU). And we also report IoU@R which indicates the proportion of samples with IoU higher than $R$ . As shown in Tab. 5, our method achieves a high mIoU of $84.65\%$ and $93.94\%$ samples have an IoU score higher than 0.7. These promising results prove that our text position control method is simple yet effective. + +![](images/b8c56939fa446816f17a9f9e6141f67f07ea783277973f441764a0c8d18d3221.jpg) +Figure 13. Detailed illustration of how we construct the position embedding for controlling the text position. + +# 11. Comparison Between GlyphByT5 and PosterMaker + +GlyphByT5 [25, 26] are recently proposed visual text rendering methods that achieve high text rendering accuracy. And we will discuss some differences and internal connections between our PosterMaker and GlyphByT5 on how to control text rendering. + +- Text position control: GlyphByT5 achieve text position control by modifying the original cross-attention module with their proposed region-wise multi-head cross-attention. In contrast, our PosterMaker encodes the text location directly into the character-level text representation to accomplish text position control. As discussed in Sec. 10, our approach is both simple and effective for precise text location control. + +- Text content control: both GlyphByT5 and our PosterMaker control the generation of text content by constructing suitable text representation. Specifically, in this work, we claim that the key to achieve accurate text rendering is to extract character-level visual features as the control condition and carefully construct a robust text representation based on off-the-shelf OCR model [34]. In GlyphByT5, the authors also extract character-level text features, but with a textual encoder named ByT5 [48]. Then they propose glyph-alignment pre-training to align these textual features with pre-trained visual encoders DINOv2 [33]. Additionally, they employ box-level contrastive learning with complex augmentations and a hard-mining strategy to enhance character-level discriminativeness. We hypothesize that the primary reason both our method and GlyphByT5 achieve high text rendering accuracy is our shared goal of constructing a robust character-level visual representation. In fact, the ability of GlyphByT5's character-level visual representation is distilled from the pre-trained visual encoder DINOv2, rather than inherited from the pre-trained textual encoder ByT5 itself. In order to verify our hypothesis and insights, we adopt a more direct approach to directly replace the PPOCR encoder in PosterMaker with DINOv2. As shown in Tab. 6, simply extracting character-wise visual features with DINOv2 can also achieve precise text rendering. This result further verifies our claim: the key to precise text rendering is to extract character-level visual features as the control condition. + +
Text FeatureTypeSen. ACCNED
PPOCR Linevisual feat.38.91%53.86%
PPOCR Charvisual feat.95.15%98.75%
DINOv2 Linevisual feat.4.25%20.59%
DINOv2 Charvisual feat.94.92%98.66%
GT (w/o Rec.)-98.53%99.59%
GT (w/ SD3 Rec.)-98.09%99.36%
+ +# 12. Visualization of Training Samples + +We present example training images from our dataset in Fig. 17. The dataset predominantly consists of Chinese text, with a small portion of English text. Additionally, it in + +cludes challenging cases with small-sized text elements. + +# 13. The Generalization of Text Representation. + +PosterMaker is trained primarily on common Chinese data, with only a minimal amount of English data. Despite this, it demonstrates a notable level of generalization, enabling it to generate English, Japanese, and uncommon Chinese characters that were not included in the training set (as shown in Fig. 16). In order to quantitatively evaluate the generalization capability of PosterMaker, we compared the accuracy of different text representations on uncommon characters using a randomly sampled uncommon character benchmark. The results show that our method can also generalize well to some characters that are unseen in the training set. Our performance is inferior to the canny baseline, likely because the canny baseline has been pre-trained on large-scale image data. + +Table 6. Quantitative comparison using various text features. + +
Text FeatureTypeSen. ACCNED
ByT5textual feat.2.01%10.27%
Cannyimg65.12%74.56%
PPOCR Linevisual feat.8.34%15.84%
PPOCR Charvisual feat.61.54%70.38%
+ +# 14. Ablation about Foreground Extension Detector + +We collected $20\mathrm{k}$ manually annotated images to train the foreground extension detector. We randomly selected $10\%$ samples as a validation set, while using the remaining $90\%$ for model training. We conduct ablation experiments on different architecture designs of the detector to verify the effectiveness of the proposed architecture. We implement 2 baselines: 1) RFNet [11]: we reimplemented RFNet based on the description in their paper [11]. Since we could not access their depth and saliency detection models, we modified our implementation to only use the product image and generated image as input, excluding the depth and saliency maps. 2) RFNet(SAM): in this baseline, we replace the image encoder used in RFNet with the same SAM[18] im + +Table 7. Quantitative comparison of the rendering results of different text features on uncommon characters. + +
MethodPrecisionRecallF1 Score
RFNet (our impl.)76.52%75.52%76.02%
RFNet (SAM)81.35%80.99%81.17%
Ours83.52%84.81%84.16%
+ +Table 8. Evaluation on different architectures of foreground extension detector. + +![](images/acced411c7664b213472fb0ffbfb1a259736f4feed475108678b21a9429b879f.jpg) +Subject + +![](images/bd46d644d9015e5b410203395e465ed73688ec000cef69633c7ab447a9200c8f.jpg) +Generated Image + +![](images/ffedaf24c35862b032dc99b4a85857797a2e5b2a367e7bd1415005d7a390fe76.jpg) +Activation Map + +![](images/19a2fb38e515248ee4b45a3ab38d79f63e2f74ea44d6f7b97387045908418125.jpg) + +![](images/af6d3635434938a34b71ba93e460bad374edc1de60303ed9f7c30405bf9390ec.jpg) +Figure 14. Class activation map of the foreground extension detector. + +![](images/8c9d4514fd6a4a9d1ec9f5ff58b54853662ebc113a4a253a827240e56b0b06da.jpg) + +age encoder used in our method. As summarized in Tab. 8, our proposed foreground extension detector outperforms the baselines by a considerable margin, which demonstrates its effectiveness. + +In Fig. 14, we visualize the class activation map [54] of our proposed foreground extension detector. As shown, we can observe a notably higher activation score in the extended foreground regions compared to other areas. This compelling evidence demonstrates that our detector has effectively learned to discern foreground extension cases, thereby it can serve as a robust reward model for fine-tuning PosterMaker to mitigate the foreground extension problem. + +# 15. Ablation about SceneGenNet + +SceneGenNet enables our model to perform background inpainting while preserve the subject so we cannot directly remove it. We replace it by SDEdit [30] to achieve inpainting. As the results shown in Sec. 15, replacing it results in a significant drop of performance. + +
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑
Ours w/o SceneGenNet90.53%97.95%79.4426.67
Ours93.36%98.39%65.3527.04
+ +# 16. Discussion on the impact of the test set size. + +To ensure a fairer comparison between PosterMaker and the baseline methods, we expanded the test set to 5,000 sam + +plies(10x the previous PosterBenchmark). The results are shown in Tab. 10, and the experimental conclusions remain consistent with the previous test set. Due to the calculation principle of the FID metric, increasing the test size leads to a significant decrease in the FID scores for all methods, but still maintains the same conclusion. + +Table 9. Comparison between SceneGenNet and SDEdit + +
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑
Glyph-ByT5-v267.87%86.23%20.3721.08
SD3_canny&inpaint74.49%88.78%17.9120.79
GlyphDraw283.81%96.49%15.2420.67
Ours90.20%97.58%13.3621.36
+ +Table 10. Comparison with baseline methods on 5,000 test samples. + +# 17. Discussion on the meaningless texts generated outside target position. + +In our early experimental attempts about text rendering in poster generation, we found that the trained model sometimes generates meaningless texts outside the target area of the text, which will seriously affect the aesthetics. We conjecture that the main reason is that the ground truth images sometimes contain text outside the specified position. To solve this problem, we masked out the extra text during training and it solved most cases. + +Specifically, SceneGenNet is initialized from pre-trained SD3 Inpainting-Controlnet [7]. In the second stage of training, we simultaneously mask out the regions of the untrained texts (usually those that are too small or just logos) both in the subject mask input to SceneGenNet and in the ground truth image used for loss calculation(as shown in Fig. 15). It is worth noting that although these small texts and logos are not included in the training, we have also annotated them to address the aforementioned issues. Finally, this technique makes the loss corresponding to the masked-out regions very close to zero so that the model will not learn these meaningless texts. + +![](images/ac56ce7a14fe06acfd1f9d373213ba03b55a7a52cc249287d4914e6f752251c1.jpg) +Figure 15. Example of our solution technique for meaningless texts and logos that generated outside target position. + +![](images/773f2b5721a7dfd43745f510e4aa2a780733a1f155cb049f71c66cfa0f62e74a.jpg) + +![](images/62ab0255279e0013a7a9e0ffe8d85575ce34f5e5f9e399dfd33d8dbdb10447d6.jpg) + +![](images/5e80a7606f80a52667546bcd4d7872be923bd45b4c53b620e658b4b3e64d671f.jpg) +Figure 16. Visualization results on texts in English, Japanese, and uncommon Chinese characters. + +![](images/74039fd0e173fe7d59038a7f229de2e88e5cbcf1d43d3440544e155caba17099.jpg) + +![](images/4a2e2c8c0a9d1d6a8fb03c52c4c0a557c6b5d165113dc13020b29cdf24ce825f.jpg) + +![](images/6260cbe319bf3452d748f869daf3b8586b2edf5abe2fe1e22740950281fb35db.jpg) + +![](images/5377a14a7ca31602338199f71cf74f38cce1d22b831db83a4f7e315ec63825c7.jpg) + +![](images/285d0c0eb47d0b7c3ec34802cd6ff44913974f5624cce623b3c1b4d5858a5444.jpg) + +![](images/5ec602cf40fe1938ef2768c493fab20a46c6a41971b9cf7f91249bab5e7ac082.jpg) + +![](images/484ea3a158871455f056f770a790db31f20761ccaec686d36278db567da63f36.jpg) + +![](images/5a04c3f6873570243f276bd503122b3e1e26e195d7e7a21ae5c52320d5f8336e.jpg) + +![](images/ca0895ab89b57898376a5b8d1f4963a9a9aea4e850914a42fd1b3fa9e73a9d7b.jpg) + +![](images/d19d64c86c9f00cf6a484d8797879b816195a46db8a3fb1ba3ee4edbd33c9c0c.jpg) + +![](images/f65aecae76910308dbf4fdfd3db0dfa49557ea379c1fb2f97b2a0d19123d85b8.jpg) + +![](images/c9aac91754c4899933bcf0e740e1bfc3817b1d9f38f73587ff56903fd9479333.jpg) + +![](images/d13c740981bc7e2e64e6ebf40a3f42a41a3a67a6706cdbdd1ae3d96c325cc0f1.jpg) + +![](images/f1edcbe6ebae9de62998064f33c802d0608f55c5748245719098ae0035f7178c.jpg) +Figure 17. Visualization of ground truth for some samples in the dataset. + +![](images/1f80e0402f89e1240e016e8362ce19565d42bf67373ccd3cffd4465e9d757ab9.jpg) + +![](images/95f067b8f9adc2c7f64f770fe86a8a0c33167d2b1220013b4f0653e183b12bdd.jpg) + +![](images/30723d6c96c7f0e3a399f301dfdc4277f4fd49d43e6db8e55aa926072df85f2c.jpg) \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06632/images/0881d676bb6c6f70842f5ed1ac298edb0c67e1c6d95af2b034679feb230f99be.jpg b/data/2025/2504_06xxx/2504.06632/images/0881d676bb6c6f70842f5ed1ac298edb0c67e1c6d95af2b034679feb230f99be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a3a6093639cb4a4d34d03d6bc192b7f907be46f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/0881d676bb6c6f70842f5ed1ac298edb0c67e1c6d95af2b034679feb230f99be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1c5087823167137ed8427ab996cc0aff72d2d4ef35702c2a308f292c204f746 +size 9338 diff --git a/data/2025/2504_06xxx/2504.06632/images/0acb3c0d5fed3a3f8193e9127851af5894725354a094709aea933134f3e3b7b0.jpg b/data/2025/2504_06xxx/2504.06632/images/0acb3c0d5fed3a3f8193e9127851af5894725354a094709aea933134f3e3b7b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03a6fc116b8c2e6609bcb629986a94aff995f133 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/0acb3c0d5fed3a3f8193e9127851af5894725354a094709aea933134f3e3b7b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55623c9389f2815cb9fd8f8361c05012b1bbff98e15844bb00929bc98bf0be09 +size 8249 diff --git a/data/2025/2504_06xxx/2504.06632/images/12b078e8e2fd28b63d5c774cf616c066a0bd21c41973822071bbfa2e26b7278a.jpg b/data/2025/2504_06xxx/2504.06632/images/12b078e8e2fd28b63d5c774cf616c066a0bd21c41973822071bbfa2e26b7278a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..738454e052c3d3b331b81e21aead4fab838f2ad3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/12b078e8e2fd28b63d5c774cf616c066a0bd21c41973822071bbfa2e26b7278a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc4f1ef64442f43f2cac97afbabdadd63c67f9239e66964a25591b2eb987c3d7 +size 3201 diff --git a/data/2025/2504_06xxx/2504.06632/images/164a4b0b551f6078712682cff0d2fad4136ca43265769cddadb2be6d386ef572.jpg b/data/2025/2504_06xxx/2504.06632/images/164a4b0b551f6078712682cff0d2fad4136ca43265769cddadb2be6d386ef572.jpg new file mode 100644 index 0000000000000000000000000000000000000000..195a942c4beeb54f766dbe9e04911349f2624f90 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/164a4b0b551f6078712682cff0d2fad4136ca43265769cddadb2be6d386ef572.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1ddc33062b579956b6dab533707d0844dc2e8b3124a19d629fc06dd67feda5e +size 12362 diff --git a/data/2025/2504_06xxx/2504.06632/images/17f2983698b127a00d4e62597e010fa56f7291a6f66ec063e7f2f40531d73f5f.jpg b/data/2025/2504_06xxx/2504.06632/images/17f2983698b127a00d4e62597e010fa56f7291a6f66ec063e7f2f40531d73f5f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56803f16b9a9236d3dbcc214a570dafcd86fee67 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/17f2983698b127a00d4e62597e010fa56f7291a6f66ec063e7f2f40531d73f5f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e1c0d01ca52be06bdd1b4049ed409aa068e5913b9d9f22bb9141f689d791798 +size 5053 diff --git a/data/2025/2504_06xxx/2504.06632/images/19a2fb38e515248ee4b45a3ab38d79f63e2f74ea44d6f7b97387045908418125.jpg b/data/2025/2504_06xxx/2504.06632/images/19a2fb38e515248ee4b45a3ab38d79f63e2f74ea44d6f7b97387045908418125.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ca00a137a15666cdc2faaed574292464a983bc1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/19a2fb38e515248ee4b45a3ab38d79f63e2f74ea44d6f7b97387045908418125.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ad7d39724dbe9181defdbd98165e226562b561ca10dfddcf3d74fa01fb55983 +size 3022 diff --git a/data/2025/2504_06xxx/2504.06632/images/1d93b0891749cf4e584bd0d8c2a0366ce2a33c0bed4b72a2ba222d7d57d42ce0.jpg b/data/2025/2504_06xxx/2504.06632/images/1d93b0891749cf4e584bd0d8c2a0366ce2a33c0bed4b72a2ba222d7d57d42ce0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bac3f40241b5c52baf6224cdc74fd50e1fcf9149 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/1d93b0891749cf4e584bd0d8c2a0366ce2a33c0bed4b72a2ba222d7d57d42ce0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ecb3f1dc1583db86eac32b3897ac99a72befd8713e7ec88ca1f155b305a98e9 +size 14527 diff --git a/data/2025/2504_06xxx/2504.06632/images/1e079d3f112cc12b9f993bc37690dfe7939ba2bbd0baf1a45063ce19c15ac543.jpg b/data/2025/2504_06xxx/2504.06632/images/1e079d3f112cc12b9f993bc37690dfe7939ba2bbd0baf1a45063ce19c15ac543.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e9e8849975c906ac5074830ad623010cac7d3e7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/1e079d3f112cc12b9f993bc37690dfe7939ba2bbd0baf1a45063ce19c15ac543.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e758bd645161b117889ca29487b3fefdf9bd1d8db621f64247dcae71abcbf610 +size 5362 diff --git a/data/2025/2504_06xxx/2504.06632/images/1f80e0402f89e1240e016e8362ce19565d42bf67373ccd3cffd4465e9d757ab9.jpg b/data/2025/2504_06xxx/2504.06632/images/1f80e0402f89e1240e016e8362ce19565d42bf67373ccd3cffd4465e9d757ab9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f62f0d8cbbf5f23f5b730c5d15d18ff726a7ae1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/1f80e0402f89e1240e016e8362ce19565d42bf67373ccd3cffd4465e9d757ab9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6536a4fb630df8b35cd21459851b654ebbe79f11062b60268acef49c22fae7f9 +size 26517 diff --git a/data/2025/2504_06xxx/2504.06632/images/213ffce2dd7c950b4a730fb1079c8eecdbe8ab5edf2fd70d17dc6203496226e0.jpg b/data/2025/2504_06xxx/2504.06632/images/213ffce2dd7c950b4a730fb1079c8eecdbe8ab5edf2fd70d17dc6203496226e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75c00bf59f7477e88fb08bf4a42e8e566333817b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/213ffce2dd7c950b4a730fb1079c8eecdbe8ab5edf2fd70d17dc6203496226e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62d5ee2bafbaf594589a62358be72bb7b1c6c6db673edacb20da8244355ecbe8 +size 33979 diff --git a/data/2025/2504_06xxx/2504.06632/images/235f3cdda602d017551430d3d9e0d84d734901b0f8a30523be01b46932a2c2dd.jpg b/data/2025/2504_06xxx/2504.06632/images/235f3cdda602d017551430d3d9e0d84d734901b0f8a30523be01b46932a2c2dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b82421b034ae57cdb9a851517436084ba5a911c2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/235f3cdda602d017551430d3d9e0d84d734901b0f8a30523be01b46932a2c2dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ef219332d5ada558c5d2f8b3abca57109298fb9d7004a52fa8c007cf72b701f +size 14264 diff --git a/data/2025/2504_06xxx/2504.06632/images/24ca81d60b370c4397f3be04cf42a59193824bccef0b0c63b48b29673a01efdc.jpg b/data/2025/2504_06xxx/2504.06632/images/24ca81d60b370c4397f3be04cf42a59193824bccef0b0c63b48b29673a01efdc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6cc6c3f38cbbbf2ce84139ced9a0a79f84211a8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/24ca81d60b370c4397f3be04cf42a59193824bccef0b0c63b48b29673a01efdc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a30054687139e0b87641ba6f22ee929687f619a8ffd450e5669decd3335f4e8b +size 10601 diff --git a/data/2025/2504_06xxx/2504.06632/images/25b6e838f24b2f49dde5dbd24af262e442a4c225a7be8405132847f005181a21.jpg b/data/2025/2504_06xxx/2504.06632/images/25b6e838f24b2f49dde5dbd24af262e442a4c225a7be8405132847f005181a21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f079869ad39abd511d5d378836d9f5755510d6bf --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/25b6e838f24b2f49dde5dbd24af262e442a4c225a7be8405132847f005181a21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19f3ff4a50427055a15217d8b6c16a9b6daefa981cc36262f3323bce093b9f16 +size 9485 diff --git a/data/2025/2504_06xxx/2504.06632/images/26a7703ae2cdb9597f122eb7e7605316ed457d62788d2b67698dee17ecbe5f74.jpg b/data/2025/2504_06xxx/2504.06632/images/26a7703ae2cdb9597f122eb7e7605316ed457d62788d2b67698dee17ecbe5f74.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e0c76331c72dad971c34634fc7fdda876ff21d5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/26a7703ae2cdb9597f122eb7e7605316ed457d62788d2b67698dee17ecbe5f74.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60977e583004588e00b7c3716b296cfe2df9b601a0098b277f626c883fa8bf56 +size 11900 diff --git a/data/2025/2504_06xxx/2504.06632/images/285d0c0eb47d0b7c3ec34802cd6ff44913974f5624cce623b3c1b4d5858a5444.jpg b/data/2025/2504_06xxx/2504.06632/images/285d0c0eb47d0b7c3ec34802cd6ff44913974f5624cce623b3c1b4d5858a5444.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22598f2eb9ef3870684395f2ae625c46d5d4360e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/285d0c0eb47d0b7c3ec34802cd6ff44913974f5624cce623b3c1b4d5858a5444.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c04b45dfe4ae3b5cc5995853767918a3c80d833a0eb3a7d802f07ed4d90d5415 +size 17178 diff --git a/data/2025/2504_06xxx/2504.06632/images/30723d6c96c7f0e3a399f301dfdc4277f4fd49d43e6db8e55aa926072df85f2c.jpg b/data/2025/2504_06xxx/2504.06632/images/30723d6c96c7f0e3a399f301dfdc4277f4fd49d43e6db8e55aa926072df85f2c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2dc3dbf556c7b20af7662422079470921c858f11 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/30723d6c96c7f0e3a399f301dfdc4277f4fd49d43e6db8e55aa926072df85f2c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:166c05883c19ba757a0696d9b725536e290ba5da6c38aac09de0f8a6a3a9bb71 +size 24373 diff --git a/data/2025/2504_06xxx/2504.06632/images/32e835a8f32d7bf58c13f374fc44caccece61a2ac658239857e6cafe78078e9f.jpg b/data/2025/2504_06xxx/2504.06632/images/32e835a8f32d7bf58c13f374fc44caccece61a2ac658239857e6cafe78078e9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8604c27ca51009ddf3720d2b0f3474375bdd5cd --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/32e835a8f32d7bf58c13f374fc44caccece61a2ac658239857e6cafe78078e9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2ed250aef3c099aa4050c9a0fed5e497674f18b2d50e6379b1ca50ce84689e4 +size 3292 diff --git a/data/2025/2504_06xxx/2504.06632/images/35f87e04b1510ca15f297ae621021c0892cb6465a983b42340d3dcacb4511d73.jpg b/data/2025/2504_06xxx/2504.06632/images/35f87e04b1510ca15f297ae621021c0892cb6465a983b42340d3dcacb4511d73.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f145ab0ec82873622cce07dfc023bfb824628f79 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/35f87e04b1510ca15f297ae621021c0892cb6465a983b42340d3dcacb4511d73.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85bd25c3083db79739e7ad2af18a5d857cbbc7e011d6bc21c3583c0e83c701dc +size 18218 diff --git a/data/2025/2504_06xxx/2504.06632/images/3d43dd864821885fae7c2db8123d1c1ed34fd14792cb07a03aaaf5258e5c537b.jpg b/data/2025/2504_06xxx/2504.06632/images/3d43dd864821885fae7c2db8123d1c1ed34fd14792cb07a03aaaf5258e5c537b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d315b31709c18bb5a9c49c5aad19c4ed5f929e4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/3d43dd864821885fae7c2db8123d1c1ed34fd14792cb07a03aaaf5258e5c537b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85f90c129dbf52644ba0fff97abefa395ca991665180adbcd951fa7a91e4f0ba +size 11801 diff --git a/data/2025/2504_06xxx/2504.06632/images/447d088256e1c87d6ae91a0c725ac663c352ccd8596de25616f41d878902395b.jpg b/data/2025/2504_06xxx/2504.06632/images/447d088256e1c87d6ae91a0c725ac663c352ccd8596de25616f41d878902395b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71012a3230547ed99f2d1f28e30915421f7a94b4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/447d088256e1c87d6ae91a0c725ac663c352ccd8596de25616f41d878902395b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f0de45ae1949bd3738d8dd2a0ec159271984d7c21dee46069d4d7a0fbaf6fa9 +size 46088 diff --git a/data/2025/2504_06xxx/2504.06632/images/484ea3a158871455f056f770a790db31f20761ccaec686d36278db567da63f36.jpg b/data/2025/2504_06xxx/2504.06632/images/484ea3a158871455f056f770a790db31f20761ccaec686d36278db567da63f36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..725400feaf4a8c933e96e28268030d7a2b170ee3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/484ea3a158871455f056f770a790db31f20761ccaec686d36278db567da63f36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b42eb89e33db559ff5bbe30c0f7cb02255797c7be85a7437fa2b21a3856e7c1e +size 15460 diff --git a/data/2025/2504_06xxx/2504.06632/images/4a2e2c8c0a9d1d6a8fb03c52c4c0a557c6b5d165113dc13020b29cdf24ce825f.jpg b/data/2025/2504_06xxx/2504.06632/images/4a2e2c8c0a9d1d6a8fb03c52c4c0a557c6b5d165113dc13020b29cdf24ce825f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b846809b821c45a6caec4a8f74f864060f35390 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/4a2e2c8c0a9d1d6a8fb03c52c4c0a557c6b5d165113dc13020b29cdf24ce825f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08aa58d7f995a437eda0bb8e3c9e86154b4d7d7d49d3964652c6ddbbdbb16c72 +size 12286 diff --git a/data/2025/2504_06xxx/2504.06632/images/4aaf4cadc0bd2e76d849fa02a6386076ba7a57a8b840bee0a63daad304225b64.jpg b/data/2025/2504_06xxx/2504.06632/images/4aaf4cadc0bd2e76d849fa02a6386076ba7a57a8b840bee0a63daad304225b64.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30d09382a591c64e8345ad64c20fcb58c6779b8c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/4aaf4cadc0bd2e76d849fa02a6386076ba7a57a8b840bee0a63daad304225b64.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:017f12157a32ec83fba4be75ff97019e6866231ce3bd6f0ad07a9c4f95b738c8 +size 251367 diff --git a/data/2025/2504_06xxx/2504.06632/images/4d77d17c9b67ac938b6d26b556a4f1a609b222eff4de3596b373fda50aac6eab.jpg b/data/2025/2504_06xxx/2504.06632/images/4d77d17c9b67ac938b6d26b556a4f1a609b222eff4de3596b373fda50aac6eab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccc64615d3f3f13f8b2afbada6816692a3eff1e1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/4d77d17c9b67ac938b6d26b556a4f1a609b222eff4de3596b373fda50aac6eab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d9b8d83f285664f28fef40aad6751f5e25db489999c63b023bfa7d0cf3ea1a9 +size 23524 diff --git a/data/2025/2504_06xxx/2504.06632/images/5377a14a7ca31602338199f71cf74f38cce1d22b831db83a4f7e315ec63825c7.jpg b/data/2025/2504_06xxx/2504.06632/images/5377a14a7ca31602338199f71cf74f38cce1d22b831db83a4f7e315ec63825c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b782beb3ae72f98fefd76e4f319175d91b17f76 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/5377a14a7ca31602338199f71cf74f38cce1d22b831db83a4f7e315ec63825c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b26fab58e027096e6e77c3fd8b442888fd224b0f8aca6ff0eab1ba4c2df744ee +size 12114 diff --git a/data/2025/2504_06xxx/2504.06632/images/5a04c3f6873570243f276bd503122b3e1e26e195d7e7a21ae5c52320d5f8336e.jpg b/data/2025/2504_06xxx/2504.06632/images/5a04c3f6873570243f276bd503122b3e1e26e195d7e7a21ae5c52320d5f8336e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42b0b1622a53f8d6b1a8d80d7a9e7f0547fea1e2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/5a04c3f6873570243f276bd503122b3e1e26e195d7e7a21ae5c52320d5f8336e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8403de187b9e4daee1f62d2c0d1fae1a89c6d6af7448888656060b4fe9b1557b +size 25081 diff --git a/data/2025/2504_06xxx/2504.06632/images/5aafc7be6f249efe6e065066c893a2da912a408f5a9da7a7962c20d68194f624.jpg b/data/2025/2504_06xxx/2504.06632/images/5aafc7be6f249efe6e065066c893a2da912a408f5a9da7a7962c20d68194f624.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01bcf43e3e45883bce6f7f485a322325e495ea88 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/5aafc7be6f249efe6e065066c893a2da912a408f5a9da7a7962c20d68194f624.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:faafad21054f6c09e8c740fe13834283e1ced2c5f6cb18503d7e091d528742d0 +size 6239 diff --git a/data/2025/2504_06xxx/2504.06632/images/5aec90ffde2450410d03c71d671e77e85a895c0442dbc0116e68b9d874370019.jpg b/data/2025/2504_06xxx/2504.06632/images/5aec90ffde2450410d03c71d671e77e85a895c0442dbc0116e68b9d874370019.jpg new file mode 100644 index 0000000000000000000000000000000000000000..078c55e535e3a7a6239cbed23863337f6f091639 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/5aec90ffde2450410d03c71d671e77e85a895c0442dbc0116e68b9d874370019.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:479f456c9b391d589b5e956481eb3cfe594ad814016dac1f5beaa2cc426a83ce +size 3575 diff --git a/data/2025/2504_06xxx/2504.06632/images/5e80a7606f80a52667546bcd4d7872be923bd45b4c53b620e658b4b3e64d671f.jpg b/data/2025/2504_06xxx/2504.06632/images/5e80a7606f80a52667546bcd4d7872be923bd45b4c53b620e658b4b3e64d671f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fc43d10660619964e899b15a607b40ff8a11928 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/5e80a7606f80a52667546bcd4d7872be923bd45b4c53b620e658b4b3e64d671f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da2e3de34a734abeaecd49fc620e22c3c8e76178bfc9f15ff7c43d87610a3337 +size 11931 diff --git a/data/2025/2504_06xxx/2504.06632/images/5ec602cf40fe1938ef2768c493fab20a46c6a41971b9cf7f91249bab5e7ac082.jpg b/data/2025/2504_06xxx/2504.06632/images/5ec602cf40fe1938ef2768c493fab20a46c6a41971b9cf7f91249bab5e7ac082.jpg new file mode 100644 index 0000000000000000000000000000000000000000..333fefff77ed1b1c424b3231bf1c999286d054ce --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/5ec602cf40fe1938ef2768c493fab20a46c6a41971b9cf7f91249bab5e7ac082.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8d226291fc7faf07f38059c128b6e2ce7fbe82c95d936d267a232418a5cd667 +size 19706 diff --git a/data/2025/2504_06xxx/2504.06632/images/6057f34d7b5bcdecb9710dc1ea7043989105c23d15c139875fc1f6224f4cc489.jpg b/data/2025/2504_06xxx/2504.06632/images/6057f34d7b5bcdecb9710dc1ea7043989105c23d15c139875fc1f6224f4cc489.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89b13b72f98f41dbd71798a0a4eec9f4f37fc6ab --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/6057f34d7b5bcdecb9710dc1ea7043989105c23d15c139875fc1f6224f4cc489.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:084ced80f31dd9921928b34410196fffc4a882b3fefef40c3ea5551233392f92 +size 13893 diff --git a/data/2025/2504_06xxx/2504.06632/images/6260cbe319bf3452d748f869daf3b8586b2edf5abe2fe1e22740950281fb35db.jpg b/data/2025/2504_06xxx/2504.06632/images/6260cbe319bf3452d748f869daf3b8586b2edf5abe2fe1e22740950281fb35db.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d050bebe03a87a4ca24998ee2d15f2bd8555b46 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/6260cbe319bf3452d748f869daf3b8586b2edf5abe2fe1e22740950281fb35db.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4c3808f166a14bfb87c3ba1df11eff8ed1240ea416d603878b874c5ff0a4dd5 +size 13648 diff --git a/data/2025/2504_06xxx/2504.06632/images/62ab0255279e0013a7a9e0ffe8d85575ce34f5e5f9e399dfd33d8dbdb10447d6.jpg b/data/2025/2504_06xxx/2504.06632/images/62ab0255279e0013a7a9e0ffe8d85575ce34f5e5f9e399dfd33d8dbdb10447d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc6238c16618e43dd082a28a9aaab443db60897c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/62ab0255279e0013a7a9e0ffe8d85575ce34f5e5f9e399dfd33d8dbdb10447d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e2cfff6b6f852a10ec9582b37243bb62ee0ffedada38c71b330783b4454fa89 +size 5854 diff --git a/data/2025/2504_06xxx/2504.06632/images/664f809e620ed5313bd043a4f137e29ea847a04403fad44ef5ab96a28af07f67.jpg b/data/2025/2504_06xxx/2504.06632/images/664f809e620ed5313bd043a4f137e29ea847a04403fad44ef5ab96a28af07f67.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9e33ef4ad09f471fa0f4b01bf0c804d8f741dee --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/664f809e620ed5313bd043a4f137e29ea847a04403fad44ef5ab96a28af07f67.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57e798e0a8757c4c2b5c8abecaf7d5c24f17212953e0b9bb34332ffc7b9f44d8 +size 12579 diff --git a/data/2025/2504_06xxx/2504.06632/images/6ba6efda80ae24712f5628d9c9a49d3b0c5a7601c44fea4221ad6127c7f0c711.jpg b/data/2025/2504_06xxx/2504.06632/images/6ba6efda80ae24712f5628d9c9a49d3b0c5a7601c44fea4221ad6127c7f0c711.jpg new file mode 100644 index 0000000000000000000000000000000000000000..613a5f69ce475019f6330386200bd24853ecf557 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/6ba6efda80ae24712f5628d9c9a49d3b0c5a7601c44fea4221ad6127c7f0c711.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf97cc8c6d96107581f03e40a814fdfd1e6d276b2389bb6482a6cefb5f623481 +size 10259 diff --git a/data/2025/2504_06xxx/2504.06632/images/6d387ff5c8a05c8017ac5a154369d50fe2bc44b6362e2366fba384dd7ded98aa.jpg b/data/2025/2504_06xxx/2504.06632/images/6d387ff5c8a05c8017ac5a154369d50fe2bc44b6362e2366fba384dd7ded98aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f0b76cd49747c5e61ec6c36ed497c0021fbce37 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/6d387ff5c8a05c8017ac5a154369d50fe2bc44b6362e2366fba384dd7ded98aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d58a4342a97ddcaf0922ad75d2258503fec3406e71ec4a3cd1645d5dbc57b2f +size 32188 diff --git a/data/2025/2504_06xxx/2504.06632/images/70b4fc4b9ea93367b5f8c88d331426b2da2ceffe70c8e631ce18d9d27c1b4f4c.jpg b/data/2025/2504_06xxx/2504.06632/images/70b4fc4b9ea93367b5f8c88d331426b2da2ceffe70c8e631ce18d9d27c1b4f4c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5793293c4e3c0bc70a16e365272699a6b00960c9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/70b4fc4b9ea93367b5f8c88d331426b2da2ceffe70c8e631ce18d9d27c1b4f4c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59cb52db275fd9e3ff748790e2732a9979b40b0695cabe8a1d864bafdefd8d39 +size 17855 diff --git a/data/2025/2504_06xxx/2504.06632/images/7255cb2de3ecc45d951a2703e4df897c410c3a500d2cbea9e40d0630fb329c24.jpg b/data/2025/2504_06xxx/2504.06632/images/7255cb2de3ecc45d951a2703e4df897c410c3a500d2cbea9e40d0630fb329c24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..10cabf22266a23bed05cedb63825a91bdf2a9447 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/7255cb2de3ecc45d951a2703e4df897c410c3a500d2cbea9e40d0630fb329c24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b25b9ed3ff344e20df409eb3d0ddbe60c0b75cbc07d682a9e686f2917785e02 +size 17593 diff --git a/data/2025/2504_06xxx/2504.06632/images/74039fd0e173fe7d59038a7f229de2e88e5cbcf1d43d3440544e155caba17099.jpg b/data/2025/2504_06xxx/2504.06632/images/74039fd0e173fe7d59038a7f229de2e88e5cbcf1d43d3440544e155caba17099.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4803098aaffe953a23a5c4afa45329f7b5b8ef9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/74039fd0e173fe7d59038a7f229de2e88e5cbcf1d43d3440544e155caba17099.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3a10e5a6355b730ece892ec19974a9f58828f3e06b8d62285253f936d9d86a3 +size 14659 diff --git a/data/2025/2504_06xxx/2504.06632/images/76fb2ebcebf1e1c35721cea4542b4bfcb7234470893d55ce6c3bf73c0f43ce28.jpg b/data/2025/2504_06xxx/2504.06632/images/76fb2ebcebf1e1c35721cea4542b4bfcb7234470893d55ce6c3bf73c0f43ce28.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0893467e47efd019386c31c6d7da5dffe8f3abda --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/76fb2ebcebf1e1c35721cea4542b4bfcb7234470893d55ce6c3bf73c0f43ce28.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92a41bdda49a9ce8c3b1029f8946fa2cb78dc5208691fb908cb622a27ce9b958 +size 10737 diff --git a/data/2025/2504_06xxx/2504.06632/images/773f2b5721a7dfd43745f510e4aa2a780733a1f155cb049f71c66cfa0f62e74a.jpg b/data/2025/2504_06xxx/2504.06632/images/773f2b5721a7dfd43745f510e4aa2a780733a1f155cb049f71c66cfa0f62e74a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e28d433642de3761ce6fb2612cdb751badd22e11 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/773f2b5721a7dfd43745f510e4aa2a780733a1f155cb049f71c66cfa0f62e74a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b3878a2e72e610646d33b99e6ac8dae7feeae1226bd32018f2367d782edc2a6 +size 15064 diff --git a/data/2025/2504_06xxx/2504.06632/images/7a184ebc870702926c0d50fabb66d44c3e79daadc427cc87722d2cdb992e8db9.jpg b/data/2025/2504_06xxx/2504.06632/images/7a184ebc870702926c0d50fabb66d44c3e79daadc427cc87722d2cdb992e8db9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27aa12d6b489d891cf969c8e1a63f49189293134 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/7a184ebc870702926c0d50fabb66d44c3e79daadc427cc87722d2cdb992e8db9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f762999239f0b214cd508efb1e3d61a0933594543c1429859e2c52bb02ac2e50 +size 8993 diff --git a/data/2025/2504_06xxx/2504.06632/images/7b1ab2581a2637e361192dadf19b149350be83c2f5e81ad624f504a04833ae50.jpg b/data/2025/2504_06xxx/2504.06632/images/7b1ab2581a2637e361192dadf19b149350be83c2f5e81ad624f504a04833ae50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..721d5357d2d0461c37b4c676ed91265f6319f2d6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/7b1ab2581a2637e361192dadf19b149350be83c2f5e81ad624f504a04833ae50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7078069d4e9bd0c4c280034052ea9c9eb0e1846f83359fe291687e487c162bbb +size 24692 diff --git a/data/2025/2504_06xxx/2504.06632/images/84cb058d0e8dc1bbdb90f7e73f2e5a9985e9375393a90b68108134dc28db47f5.jpg b/data/2025/2504_06xxx/2504.06632/images/84cb058d0e8dc1bbdb90f7e73f2e5a9985e9375393a90b68108134dc28db47f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aeef0f810082a1b18f944112abb098ca05cadc52 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/84cb058d0e8dc1bbdb90f7e73f2e5a9985e9375393a90b68108134dc28db47f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f22c9c56d2ecda6faa85e751d8d3ccbac2c47c98ee58faab47034e163aa1fe2b +size 39701 diff --git a/data/2025/2504_06xxx/2504.06632/images/879af9b19d382fa98252e55347ac57b0349226e8c666c4bc696de805ba1b0522.jpg b/data/2025/2504_06xxx/2504.06632/images/879af9b19d382fa98252e55347ac57b0349226e8c666c4bc696de805ba1b0522.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d4a17a56a7bb8918e57ec59f0b999f0a1bd0bf7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/879af9b19d382fa98252e55347ac57b0349226e8c666c4bc696de805ba1b0522.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce6fbd9d3609a0cf1cc523866e09d395ba9d04f3ff7027905d1424d3950f10cc +size 13929 diff --git a/data/2025/2504_06xxx/2504.06632/images/88324587a54ed90f04e0beebb66bcd3330c40f445ac07add6ffd62a1ee4555da.jpg b/data/2025/2504_06xxx/2504.06632/images/88324587a54ed90f04e0beebb66bcd3330c40f445ac07add6ffd62a1ee4555da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b400937c7efa7b31a6bb2000a6b173eaa9b2cee --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/88324587a54ed90f04e0beebb66bcd3330c40f445ac07add6ffd62a1ee4555da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14767354db07b1d745f014b50273215644fafcfc5273f0f6101d8ce5ad6a2516 +size 11054 diff --git a/data/2025/2504_06xxx/2504.06632/images/8860ac0793d8245add147225b1af9376c5b05f22070e3ad2315d5b0d55f1fb48.jpg b/data/2025/2504_06xxx/2504.06632/images/8860ac0793d8245add147225b1af9376c5b05f22070e3ad2315d5b0d55f1fb48.jpg new file mode 100644 index 0000000000000000000000000000000000000000..826f19403e63002135cdd5c5f2c5ec73b9f0fa31 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/8860ac0793d8245add147225b1af9376c5b05f22070e3ad2315d5b0d55f1fb48.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:153d738456b27c1c94ec7f18dc83692dda04d2d8bb3cdcf8c5c6cea98039103b +size 11597 diff --git a/data/2025/2504_06xxx/2504.06632/images/8b46bbeabdcde0195c3c0ba74ad57a536fc5d0d9fa001be5a351e582462f2f21.jpg b/data/2025/2504_06xxx/2504.06632/images/8b46bbeabdcde0195c3c0ba74ad57a536fc5d0d9fa001be5a351e582462f2f21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5e89fe68e2ae15a590f14995c11ab803618b967 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/8b46bbeabdcde0195c3c0ba74ad57a536fc5d0d9fa001be5a351e582462f2f21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54581d4f259a9e7e7452b017e17090f4aac1bb8c77e54e26332ba02952d22e11 +size 78834 diff --git a/data/2025/2504_06xxx/2504.06632/images/8c9d4514fd6a4a9d1ec9f5ff58b54853662ebc113a4a253a827240e56b0b06da.jpg b/data/2025/2504_06xxx/2504.06632/images/8c9d4514fd6a4a9d1ec9f5ff58b54853662ebc113a4a253a827240e56b0b06da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e5749590948a1aab425e8b0f769f16efce9e8e1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/8c9d4514fd6a4a9d1ec9f5ff58b54853662ebc113a4a253a827240e56b0b06da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b01caeca5c09709585cf3271910869879d8c13b4626b7f6f9be00bb143813e1c +size 8894 diff --git a/data/2025/2504_06xxx/2504.06632/images/94f522890ac89db5cbced1996353dcdd9ce9d6d4ee56253dc26bff58a3b35ab3.jpg b/data/2025/2504_06xxx/2504.06632/images/94f522890ac89db5cbced1996353dcdd9ce9d6d4ee56253dc26bff58a3b35ab3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9a97b779ef5767136117f9a68adfa9bc5b4e5186 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/94f522890ac89db5cbced1996353dcdd9ce9d6d4ee56253dc26bff58a3b35ab3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7fb82fa3ee180fb10c362b14b408c7c4f2fc8ea0dd99e0ca37251a0eb354b46 +size 20487 diff --git a/data/2025/2504_06xxx/2504.06632/images/95f067b8f9adc2c7f64f770fe86a8a0c33167d2b1220013b4f0653e183b12bdd.jpg b/data/2025/2504_06xxx/2504.06632/images/95f067b8f9adc2c7f64f770fe86a8a0c33167d2b1220013b4f0653e183b12bdd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b929dcaef4fc7497b1ced0f4c00327224f7c88dc --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/95f067b8f9adc2c7f64f770fe86a8a0c33167d2b1220013b4f0653e183b12bdd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0789f16d9bece8e12e99f6fd8e66a8ca58453019643397ed128c8d737ae30237 +size 21388 diff --git a/data/2025/2504_06xxx/2504.06632/images/a37a51ba7e3c0388312024f2864b9a532c649ad3b79c31574d200575e0d7adf1.jpg b/data/2025/2504_06xxx/2504.06632/images/a37a51ba7e3c0388312024f2864b9a532c649ad3b79c31574d200575e0d7adf1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fda45bbad33f33ee2852f910d6eae1dc4151b940 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/a37a51ba7e3c0388312024f2864b9a532c649ad3b79c31574d200575e0d7adf1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ad7dabfc7c6ab46523c5134c18746fc81a1f9aa517ccbc1b479a4aab79448aa +size 23207 diff --git a/data/2025/2504_06xxx/2504.06632/images/a58e688e8dedf31932fdfcb7201e6f920b0c0e8366cfc7da9d05f237165b5659.jpg b/data/2025/2504_06xxx/2504.06632/images/a58e688e8dedf31932fdfcb7201e6f920b0c0e8366cfc7da9d05f237165b5659.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe4073206619d9a565e4623cb4eaa2c95ac8274c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/a58e688e8dedf31932fdfcb7201e6f920b0c0e8366cfc7da9d05f237165b5659.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be28f2b099ff2d4213facc602c4e1949f74bb5c8b3c9d691f81b6edff7caae10 +size 3912 diff --git a/data/2025/2504_06xxx/2504.06632/images/ac56ce7a14fe06acfd1f9d373213ba03b55a7a52cc249287d4914e6f752251c1.jpg b/data/2025/2504_06xxx/2504.06632/images/ac56ce7a14fe06acfd1f9d373213ba03b55a7a52cc249287d4914e6f752251c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a40074633cd61fcb06ff0e1869a2450c68a66e8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/ac56ce7a14fe06acfd1f9d373213ba03b55a7a52cc249287d4914e6f752251c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28570741239ef7771fb37e141ad4c7a771d3076aa55af753a62640a0d87c94eb +size 14148 diff --git a/data/2025/2504_06xxx/2504.06632/images/ac592ba23bddb135c0915fc0128194478bb41293ec4e39dc475919d75340602d.jpg b/data/2025/2504_06xxx/2504.06632/images/ac592ba23bddb135c0915fc0128194478bb41293ec4e39dc475919d75340602d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37e2371a21832aec31d7df7034f870c1eeee7ea7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/ac592ba23bddb135c0915fc0128194478bb41293ec4e39dc475919d75340602d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b95bd1189a99340896da09b1377c5bfd49c1cd60bf3c45470d2aba1e04701f64 +size 16231 diff --git a/data/2025/2504_06xxx/2504.06632/images/acced411c7664b213472fb0ffbfb1a259736f4feed475108678b21a9429b879f.jpg b/data/2025/2504_06xxx/2504.06632/images/acced411c7664b213472fb0ffbfb1a259736f4feed475108678b21a9429b879f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1dd67aec17da74292ac1aeecd22c2d7047cb60f1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/acced411c7664b213472fb0ffbfb1a259736f4feed475108678b21a9429b879f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81ddf6c1db5487cbd754d0fc07bc0d3e0eda213afa4c04afccdd0d054cfb1fc5 +size 5990 diff --git a/data/2025/2504_06xxx/2504.06632/images/af6d3635434938a34b71ba93e460bad374edc1de60303ed9f7c30405bf9390ec.jpg b/data/2025/2504_06xxx/2504.06632/images/af6d3635434938a34b71ba93e460bad374edc1de60303ed9f7c30405bf9390ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..238acc1f6af2ed0dadfbf9ab54ac6718f1799c4f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/af6d3635434938a34b71ba93e460bad374edc1de60303ed9f7c30405bf9390ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dac7186b938438c8f44510eb507ba0d4067247af950cf5a89de39b7c5d1a1563 +size 6486 diff --git a/data/2025/2504_06xxx/2504.06632/images/b86755819730d39a871b328f8d3b51e66352c042590e7172bf79246b96c76c85.jpg b/data/2025/2504_06xxx/2504.06632/images/b86755819730d39a871b328f8d3b51e66352c042590e7172bf79246b96c76c85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2491bbc559e9e0c916fec31c15dfdc38acbfd27c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/b86755819730d39a871b328f8d3b51e66352c042590e7172bf79246b96c76c85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69351ad2491462b4c33bbe9360e136890f108784ec8e585121a756817637e67b +size 13742 diff --git a/data/2025/2504_06xxx/2504.06632/images/b8c56939fa446816f17a9f9e6141f67f07ea783277973f441764a0c8d18d3221.jpg b/data/2025/2504_06xxx/2504.06632/images/b8c56939fa446816f17a9f9e6141f67f07ea783277973f441764a0c8d18d3221.jpg new file mode 100644 index 0000000000000000000000000000000000000000..240096ccd1af29a01297cc47793fa085f41b38b8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/b8c56939fa446816f17a9f9e6141f67f07ea783277973f441764a0c8d18d3221.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2e74edf41c4d7f65f5a316e9abe2b60f912141cf52340d7eb334c34cdbf9431 +size 21929 diff --git a/data/2025/2504_06xxx/2504.06632/images/bd46d644d9015e5b410203395e465ed73688ec000cef69633c7ab447a9200c8f.jpg b/data/2025/2504_06xxx/2504.06632/images/bd46d644d9015e5b410203395e465ed73688ec000cef69633c7ab447a9200c8f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f11f1ece25b9481c6446c7e9802ae2cbe97496b6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/bd46d644d9015e5b410203395e465ed73688ec000cef69633c7ab447a9200c8f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7847f9899031000d7dbab88170d583df56abb02249f3c98cc7442644a36f8e7d +size 10956 diff --git a/data/2025/2504_06xxx/2504.06632/images/c0827822b7a1d65a2caaebda643136502f5b4bdb61348e9306f863b50c1f13e3.jpg b/data/2025/2504_06xxx/2504.06632/images/c0827822b7a1d65a2caaebda643136502f5b4bdb61348e9306f863b50c1f13e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..941c31f8d6e9b498781f120f2c58ec3c30b192e9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/c0827822b7a1d65a2caaebda643136502f5b4bdb61348e9306f863b50c1f13e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a9e48328b9e1bc75cbb4b8b4b0ef80c3e76685264fab1afe5ee33a99d74c259 +size 22504 diff --git a/data/2025/2504_06xxx/2504.06632/images/c202934d10fe0a130d64ae0ba56d2d6b00f5173e31d456711996a78dfc2d1cb4.jpg b/data/2025/2504_06xxx/2504.06632/images/c202934d10fe0a130d64ae0ba56d2d6b00f5173e31d456711996a78dfc2d1cb4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9c37c429869db59c0bed82246e76924d037a0414 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/c202934d10fe0a130d64ae0ba56d2d6b00f5173e31d456711996a78dfc2d1cb4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ba57bb0e65adb2e0224937b7a7a9109f90f4e10b8435e8483f0670f1a9da048 +size 32927 diff --git a/data/2025/2504_06xxx/2504.06632/images/c241b8495f20c789e14637a4732870b94dd037c3ecd9e058762ced2a4aab2709.jpg b/data/2025/2504_06xxx/2504.06632/images/c241b8495f20c789e14637a4732870b94dd037c3ecd9e058762ced2a4aab2709.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28400f516a44e5e2b9bdadd83076f1b4cc4bf75e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/c241b8495f20c789e14637a4732870b94dd037c3ecd9e058762ced2a4aab2709.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23aa0e34344575bd802bb92fe6614c89011560660b7449a584226be9d4505f83 +size 8311 diff --git a/data/2025/2504_06xxx/2504.06632/images/c9aac91754c4899933bcf0e740e1bfc3817b1d9f38f73587ff56903fd9479333.jpg b/data/2025/2504_06xxx/2504.06632/images/c9aac91754c4899933bcf0e740e1bfc3817b1d9f38f73587ff56903fd9479333.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f7b5aa33c7e82712c2796df7229ccb76d28c24b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/c9aac91754c4899933bcf0e740e1bfc3817b1d9f38f73587ff56903fd9479333.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ceef4012999407101328395a70d02503142aa5e1fc7e184283cf5429be45495 +size 17466 diff --git a/data/2025/2504_06xxx/2504.06632/images/ca0895ab89b57898376a5b8d1f4963a9a9aea4e850914a42fd1b3fa9e73a9d7b.jpg b/data/2025/2504_06xxx/2504.06632/images/ca0895ab89b57898376a5b8d1f4963a9a9aea4e850914a42fd1b3fa9e73a9d7b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64d61a03e4803a853ab9317358253ec7b4ee590d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/ca0895ab89b57898376a5b8d1f4963a9a9aea4e850914a42fd1b3fa9e73a9d7b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:badf7295ad65a11b47fc447c7bb8fd11fbc29b3cd092494e50c2aa02d87a59f6 +size 16940 diff --git a/data/2025/2504_06xxx/2504.06632/images/cba135ab6536ff93c30644eef668d3f30af7317c5bf63b7a1b66f5d7419fe1cf.jpg b/data/2025/2504_06xxx/2504.06632/images/cba135ab6536ff93c30644eef668d3f30af7317c5bf63b7a1b66f5d7419fe1cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..edb2b0de329f566089deececa08ee0706ddb6a5a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/cba135ab6536ff93c30644eef668d3f30af7317c5bf63b7a1b66f5d7419fe1cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3aee76ed4bdd2b3ffc8d00dfff85cb78c8632893462658cf48090f80d0a8e7df +size 14025 diff --git a/data/2025/2504_06xxx/2504.06632/images/cc26ae10209f45c45f172ef4c0e2705be1dbb550c1fbbfc2c45f08dbbb262431.jpg b/data/2025/2504_06xxx/2504.06632/images/cc26ae10209f45c45f172ef4c0e2705be1dbb550c1fbbfc2c45f08dbbb262431.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1798b942160f6f16732dd7815212e03d989a0303 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/cc26ae10209f45c45f172ef4c0e2705be1dbb550c1fbbfc2c45f08dbbb262431.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc2b788373256ada88d14860b206d94fd879cd5155c58bdecd5b015de0bc353d +size 6495 diff --git a/data/2025/2504_06xxx/2504.06632/images/cc4fa3e524b81f1d124f3a83bd3a2d6b24e7b0990c9c1eab0e4379791445aebf.jpg b/data/2025/2504_06xxx/2504.06632/images/cc4fa3e524b81f1d124f3a83bd3a2d6b24e7b0990c9c1eab0e4379791445aebf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3070180ef89d5cf5136cc4dfe4b8199f9d62270c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/cc4fa3e524b81f1d124f3a83bd3a2d6b24e7b0990c9c1eab0e4379791445aebf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e70a2955e0dd405352c74a9af6909ed6b915afe7b74be0357d563e70b4cf31cc +size 5391 diff --git a/data/2025/2504_06xxx/2504.06632/images/d13c740981bc7e2e64e6ebf40a3f42a41a3a67a6706cdbdd1ae3d96c325cc0f1.jpg b/data/2025/2504_06xxx/2504.06632/images/d13c740981bc7e2e64e6ebf40a3f42a41a3a67a6706cdbdd1ae3d96c325cc0f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5413cd335bc5f1f484d1acf5c74ba75082dac3c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/d13c740981bc7e2e64e6ebf40a3f42a41a3a67a6706cdbdd1ae3d96c325cc0f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c64925f0ed1d292727e667a4fa5ed03e0a696d627855707be56d4baf3a6da2ef +size 23249 diff --git a/data/2025/2504_06xxx/2504.06632/images/d19d64c86c9f00cf6a484d8797879b816195a46db8a3fb1ba3ee4edbd33c9c0c.jpg b/data/2025/2504_06xxx/2504.06632/images/d19d64c86c9f00cf6a484d8797879b816195a46db8a3fb1ba3ee4edbd33c9c0c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50410fb0ca8d48d46583dca69072fd71b072156e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/d19d64c86c9f00cf6a484d8797879b816195a46db8a3fb1ba3ee4edbd33c9c0c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e181e8cb6d68d9cd2e3b923a2c1d0efe7a6535366abf8c7df9cde0ecd9e36323 +size 20206 diff --git a/data/2025/2504_06xxx/2504.06632/images/d55101ab862e7083e8673064786bd8a14af343d42b954cda7f36b0d7bc51d0df.jpg b/data/2025/2504_06xxx/2504.06632/images/d55101ab862e7083e8673064786bd8a14af343d42b954cda7f36b0d7bc51d0df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2147a4ff2c0deda25de104a103c0983c3e0ca4a0 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/d55101ab862e7083e8673064786bd8a14af343d42b954cda7f36b0d7bc51d0df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e2c51eab207e81896fc71ebf9a6ef67c1b7eff00c6f147e96bb2f984a3818eb +size 3737 diff --git a/data/2025/2504_06xxx/2504.06632/images/d57e8b3cd2f71d6889bca5f2899b15a010f1cd79ba9d11b597c7b10a57738729.jpg b/data/2025/2504_06xxx/2504.06632/images/d57e8b3cd2f71d6889bca5f2899b15a010f1cd79ba9d11b597c7b10a57738729.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06ad87bfd8e49639a567379700e4990948ddabd3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/d57e8b3cd2f71d6889bca5f2899b15a010f1cd79ba9d11b597c7b10a57738729.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cb2fa162ac85ebce978a2c021de0462c06a32cdbbb8b434cfd931c750d7d2e4 +size 9412 diff --git a/data/2025/2504_06xxx/2504.06632/images/d6744c5d9caa6bc83f69bb57ecb1d9daafd4f9a7d94f1863a5c86d9575225be2.jpg b/data/2025/2504_06xxx/2504.06632/images/d6744c5d9caa6bc83f69bb57ecb1d9daafd4f9a7d94f1863a5c86d9575225be2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ede813c73ddf6c598184604e2f36e685a37e6489 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/d6744c5d9caa6bc83f69bb57ecb1d9daafd4f9a7d94f1863a5c86d9575225be2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05a0d07b31580a624277cc03b3ac1d7e45e00a5606d22c14c6db6d3975cfb35b +size 18750 diff --git a/data/2025/2504_06xxx/2504.06632/images/d7751a41607c78a3558de3d5bbf74fffe5b47dff69d5f6c0fb1aec1026926f20.jpg b/data/2025/2504_06xxx/2504.06632/images/d7751a41607c78a3558de3d5bbf74fffe5b47dff69d5f6c0fb1aec1026926f20.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b58273e60ef53eedd25fb165147d1acf41f6db8f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/d7751a41607c78a3558de3d5bbf74fffe5b47dff69d5f6c0fb1aec1026926f20.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee60c4b2d9f1bd9e725bae0e1c4ce68059c119d9085ab42ad796affdcba87d93 +size 21933 diff --git a/data/2025/2504_06xxx/2504.06632/images/d83b09b24b46dfeee4c8c54a89e5dbf00003f3dc7e316e42c12abbfdcf799593.jpg b/data/2025/2504_06xxx/2504.06632/images/d83b09b24b46dfeee4c8c54a89e5dbf00003f3dc7e316e42c12abbfdcf799593.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40a8fe534586fdba95ee2b4973cb52b8d0de83e5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/d83b09b24b46dfeee4c8c54a89e5dbf00003f3dc7e316e42c12abbfdcf799593.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ef7eec6d8e0b8691f49ba6d0ba0603b2b86fa2f42f278d2661c92689fb8fed9 +size 15040 diff --git a/data/2025/2504_06xxx/2504.06632/images/dfbca06eace34b9928322d5af9014b4a18479b600dfa3a56f5147f507be663cf.jpg b/data/2025/2504_06xxx/2504.06632/images/dfbca06eace34b9928322d5af9014b4a18479b600dfa3a56f5147f507be663cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a726f6319fa43b186973b5705543c52f6bd1ec03 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/dfbca06eace34b9928322d5af9014b4a18479b600dfa3a56f5147f507be663cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abed94f9b54ee48e706495517e92a1dd007ffead3908596a23999b98a044cc4e +size 8333 diff --git a/data/2025/2504_06xxx/2504.06632/images/e14fa0fb52af6019d2f7fd749129efe78e4f649029e674de79c8f0fa553d9312.jpg b/data/2025/2504_06xxx/2504.06632/images/e14fa0fb52af6019d2f7fd749129efe78e4f649029e674de79c8f0fa553d9312.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0dfd9c8f93e9a522164b048392c2cba5c08721e1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/e14fa0fb52af6019d2f7fd749129efe78e4f649029e674de79c8f0fa553d9312.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e2deafd191a86b3fd2620a3ea12d8756276b315537ade35fd1510003a9429e3 +size 8688 diff --git a/data/2025/2504_06xxx/2504.06632/images/e20e8244690f5e01dcd53e455d92f3a60769ab100ea63c696af31a4b42c9de1c.jpg b/data/2025/2504_06xxx/2504.06632/images/e20e8244690f5e01dcd53e455d92f3a60769ab100ea63c696af31a4b42c9de1c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f24a09b5107a2bec56b023ff006d7c24808376e2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/e20e8244690f5e01dcd53e455d92f3a60769ab100ea63c696af31a4b42c9de1c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5c155955b1031315c1bb3cfb0b310ccb16f0b395872a4def1ffd16de97feb68 +size 13969 diff --git a/data/2025/2504_06xxx/2504.06632/images/e6705e40e73fa87f1df270a6d2380df0e9a7efa0f5d05b1e5a0f2a835d460f14.jpg b/data/2025/2504_06xxx/2504.06632/images/e6705e40e73fa87f1df270a6d2380df0e9a7efa0f5d05b1e5a0f2a835d460f14.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41e18412e8026f61d061f8e8c981512a6c49922e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/e6705e40e73fa87f1df270a6d2380df0e9a7efa0f5d05b1e5a0f2a835d460f14.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7bb12cacab4e73701c86eab4708599bdfb082edcf7e71f25fdbdd794fcf9574 +size 15990 diff --git a/data/2025/2504_06xxx/2504.06632/images/efadff1d49693fc289e772cae52adb6dc2a92ba704dca7a62cfbaedaf7a7cadf.jpg b/data/2025/2504_06xxx/2504.06632/images/efadff1d49693fc289e772cae52adb6dc2a92ba704dca7a62cfbaedaf7a7cadf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc2f216432ceb449d55530bc6fef9a74add61ea0 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/efadff1d49693fc289e772cae52adb6dc2a92ba704dca7a62cfbaedaf7a7cadf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b269467fec845fdc36b30dc28727af0075b50b940ed93b07b9586a2c26b4935 +size 11982 diff --git a/data/2025/2504_06xxx/2504.06632/images/f1edcbe6ebae9de62998064f33c802d0608f55c5748245719098ae0035f7178c.jpg b/data/2025/2504_06xxx/2504.06632/images/f1edcbe6ebae9de62998064f33c802d0608f55c5748245719098ae0035f7178c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..afb4404b344ee35dfa94964feeeddea08b68f5c7 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/f1edcbe6ebae9de62998064f33c802d0608f55c5748245719098ae0035f7178c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1db10310c693fa73cc75410ab2f600dd36b1f9021f5644647463432b0ddf9c77 +size 18345 diff --git a/data/2025/2504_06xxx/2504.06632/images/f4521cddeceba886319b3866a3f1acd34aea14164e3e0df48fbea79cc2442101.jpg b/data/2025/2504_06xxx/2504.06632/images/f4521cddeceba886319b3866a3f1acd34aea14164e3e0df48fbea79cc2442101.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e30282fa94dc7807700ad7b66dcc3e9e7fd2c99 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/f4521cddeceba886319b3866a3f1acd34aea14164e3e0df48fbea79cc2442101.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:899c65480680549878ba1f551fc9c608885c28d0a1170b529972e8ef8858b9da +size 5681 diff --git a/data/2025/2504_06xxx/2504.06632/images/f65aecae76910308dbf4fdfd3db0dfa49557ea379c1fb2f97b2a0d19123d85b8.jpg b/data/2025/2504_06xxx/2504.06632/images/f65aecae76910308dbf4fdfd3db0dfa49557ea379c1fb2f97b2a0d19123d85b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..11a0ed6f83dd77fe73c7143ebf5aa5e239af04d9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/f65aecae76910308dbf4fdfd3db0dfa49557ea379c1fb2f97b2a0d19123d85b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65e36fa0e6aee99729ab8d8cd97f7c1c6441167718fbe0e115e31a76847a3ab3 +size 23732 diff --git a/data/2025/2504_06xxx/2504.06632/images/f8b790dd38d0e52b2ec90ca9820adac388e4331906f3563714af637d4a26fe5d.jpg b/data/2025/2504_06xxx/2504.06632/images/f8b790dd38d0e52b2ec90ca9820adac388e4331906f3563714af637d4a26fe5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ecd918d66d75e4c5dc8ffe4673ccd2451b5215e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/f8b790dd38d0e52b2ec90ca9820adac388e4331906f3563714af637d4a26fe5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdc15d6641b1e9f1887d10500bf3ebcc9999ddaa34745bfb1569de98ebac531d +size 41088 diff --git a/data/2025/2504_06xxx/2504.06632/images/feef9c7b9f80eb27c54a517336ee0da7210d0f42e9a0a3a0f5135fc86b1da784.jpg b/data/2025/2504_06xxx/2504.06632/images/feef9c7b9f80eb27c54a517336ee0da7210d0f42e9a0a3a0f5135fc86b1da784.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e453bf18ea19b60be7ac84b54ffdeb4b4b24e11a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/feef9c7b9f80eb27c54a517336ee0da7210d0f42e9a0a3a0f5135fc86b1da784.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3c7d0c1edb9a9117e47a805efbb83bd9e59dfe98484221bcf80569270a8c5d0 +size 166637 diff --git a/data/2025/2504_06xxx/2504.06632/images/ffedaf24c35862b032dc99b4a85857797a2e5b2a367e7bd1415005d7a390fe76.jpg b/data/2025/2504_06xxx/2504.06632/images/ffedaf24c35862b032dc99b4a85857797a2e5b2a367e7bd1415005d7a390fe76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d45c75eae705b95d0198da4f5ca21bb9f04540a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/images/ffedaf24c35862b032dc99b4a85857797a2e5b2a367e7bd1415005d7a390fe76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d8a5bd9fd12c268d7514859c8c630e8950af83c8ba95f0e2e9de0e6ecee95a9 +size 20432 diff --git a/data/2025/2504_06xxx/2504.06632/layout.json b/data/2025/2504_06xxx/2504.06632/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ace968b39e7913fc8118446c92a01ef0f880c91a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06632/layout.json @@ -0,0 +1,13627 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 107, + 103, + 504, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 103, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 107, + 103, + 504, + 140 + ], + "type": "text", + "content": "PosterMaker: Towards High-Quality Product Poster Generation with Accurate Text Rendering" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 160, + 160, + 449, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 160, + 449, + 175 + ], + "spans": [ + { + "bbox": [ + 160, + 160, + 449, + 175 + ], + "type": "text", + "content": "Yifan " + }, + { + "bbox": [ + 160, + 160, + 449, + 175 + ], + "type": "inline_equation", + "content": "\\mathrm{Gao}^{1,2*^{\\dagger}}" + }, + { + "bbox": [ + 160, + 160, + 449, + 175 + ], + "type": "text", + "content": ", Zihang Lin" + }, + { + "bbox": [ + 160, + 160, + 449, + 175 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 160, + 160, + 449, + 175 + ], + "type": "text", + "content": ", Chuanbin Liu" + }, + { + "bbox": [ + 160, + 160, + 449, + 175 + ], + "type": "inline_equation", + "content": "^{1\\ddagger}" + }, + { + "bbox": [ + 160, + 160, + 449, + 175 + ], + "type": "text", + "content": ", Min Zhou" + }, + { + "bbox": [ + 160, + 160, + 449, + 175 + ], + "type": "inline_equation", + "content": "^{2}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 201, + 175, + 408, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 175, + 408, + 190 + ], + "spans": [ + { + "bbox": [ + 201, + 175, + 408, + 190 + ], + "type": "text", + "content": "Tiezheng Ge², Bo Zheng², Hongtao Xie¹" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 100, + 190, + 511, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 190, + 511, + 204 + ], + "spans": [ + { + "bbox": [ + 100, + 190, + 511, + 204 + ], + "type": "text", + "content": "1University of Science and Technology of China 2Taubao & Tmall Group of Alibaba" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 168, + 205, + 442, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 205, + 442, + 217 + ], + "spans": [ + { + "bbox": [ + 168, + 205, + 442, + 217 + ], + "type": "text", + "content": "eafn@mail.ustc.edu.cn {liucb92, htxie}@ustc.edu.cn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 133, + 220, + 478, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 220, + 478, + 232 + ], + "spans": [ + { + "bbox": [ + 133, + 220, + 478, + 232 + ], + "type": "text", + "content": "{linzihang.lzh, yunqi.zm, tiezheng.gtz, bozheng}@alibaba-inc.com" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 181, + 234, + 426, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 234, + 426, + 245 + ], + "spans": [ + { + "bbox": [ + 181, + 234, + 426, + 245 + ], + "type": "text", + "content": "Project page: https://poster-maker.github.io" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 100, + 247, + 134, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 247, + 134, + 258 + ], + "spans": [ + { + "bbox": [ + 100, + 247, + 134, + 258 + ], + "type": "text", + "content": "Prompt" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 71, + 266, + 157, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 266, + 157, + 312 + ], + "spans": [ + { + "bbox": [ + 71, + 266, + 157, + 312 + ], + "type": "text", + "content": "The box of fish oil supplements is placed on a wooden table, with a background of a serene ocean and clear sky, symbolizing purity and the natural source of the product" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 71, + 339, + 157, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 339, + 157, + 395 + ], + "spans": [ + { + "bbox": [ + 71, + 339, + 157, + 395 + ], + "type": "text", + "content": "The subject rests on a smooth, dark wooden table, surrounded by a few scattered leaves and delicate flowers, with a serene garden scene complete with blooming flowers and lush greenery in the background." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 159, + 247, + 203, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 247, + 203, + 258 + ], + "spans": [ + { + "bbox": [ + 159, + 247, + 203, + 258 + ], + "type": "text", + "content": "Subject" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 160, + 253, + 203, + 315 + ], + "blocks": [ + { + "bbox": [ + 160, + 253, + 203, + 315 + ], + "lines": [ + { + "bbox": [ + 160, + 253, + 203, + 315 + ], + "spans": [ + { + "bbox": [ + 160, + 253, + 203, + 315 + ], + "type": "image", + "image_path": "1e079d3f112cc12b9f993bc37690dfe7939ba2bbd0baf1a45063ce19c15ac543.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 160, + 353, + 213, + 388 + ], + "blocks": [ + { + "bbox": [ + 160, + 353, + 213, + 388 + ], + "lines": [ + { + "bbox": [ + 160, + 353, + 213, + 388 + ], + "spans": [ + { + "bbox": [ + 160, + 353, + 213, + 388 + ], + "type": "image", + "image_path": "a58e688e8dedf31932fdfcb7201e6f920b0c0e8366cfc7da9d05f237165b5659.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 235, + 247, + 258, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 247, + 258, + 257 + ], + "spans": [ + { + "bbox": [ + 235, + 247, + 258, + 257 + ], + "type": "text", + "content": "Text" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 214, + 257, + 282, + 326 + ], + "blocks": [ + { + "bbox": [ + 214, + 257, + 282, + 326 + ], + "lines": [ + { + "bbox": [ + 214, + 257, + 282, + 326 + ], + "spans": [ + { + "bbox": [ + 214, + 257, + 282, + 326 + ], + "type": "image", + "image_path": "17f2983698b127a00d4e62597e010fa56f7291a6f66ec063e7f2f40531d73f5f.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 214, + 327, + 282, + 417 + ], + "blocks": [ + { + "bbox": [ + 214, + 327, + 282, + 417 + ], + "lines": [ + { + "bbox": [ + 214, + 327, + 282, + 417 + ], + "spans": [ + { + "bbox": [ + 214, + 327, + 282, + 417 + ], + "type": "image", + "image_path": "f4521cddeceba886319b3866a3f1acd34aea14164e3e0df48fbea79cc2442101.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 299, + 247, + 328, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 247, + 328, + 257 + ], + "spans": [ + { + "bbox": [ + 299, + 247, + 328, + 257 + ], + "type": "text", + "content": "Poster" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 283, + 257, + 352, + 326 + ], + "blocks": [ + { + "bbox": [ + 283, + 257, + 352, + 326 + ], + "lines": [ + { + "bbox": [ + 283, + 257, + 352, + 326 + ], + "spans": [ + { + "bbox": [ + 283, + 257, + 352, + 326 + ], + "type": "image", + "image_path": "6ba6efda80ae24712f5628d9c9a49d3b0c5a7601c44fea4221ad6127c7f0c711.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 283, + 327, + 352, + 417 + ], + "blocks": [ + { + "bbox": [ + 283, + 327, + 352, + 417 + ], + "lines": [ + { + "bbox": [ + 283, + 327, + 352, + 417 + ], + "spans": [ + { + "bbox": [ + 283, + 327, + 352, + 417 + ], + "type": "image", + "image_path": "664f809e620ed5313bd043a4f137e29ea847a04403fad44ef5ab96a28af07f67.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 398, + 247, + 487, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 398, + 247, + 487, + 258 + ], + "spans": [ + { + "bbox": [ + 398, + 247, + 487, + 258 + ], + "type": "text", + "content": "Previous: two stage" + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 356, + 258, + 473, + 324 + ], + "blocks": [ + { + "bbox": [ + 356, + 258, + 473, + 324 + ], + "lines": [ + { + "bbox": [ + 356, + 258, + 473, + 324 + ], + "spans": [ + { + "bbox": [ + 356, + 258, + 473, + 324 + ], + "type": "image", + "image_path": "3d43dd864821885fae7c2db8123d1c1ed34fd14792cb07a03aaaf5258e5c537b.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 475, + 258, + 531, + 324 + ], + "blocks": [ + { + "bbox": [ + 475, + 258, + 531, + 324 + ], + "lines": [ + { + "bbox": [ + 475, + 258, + 531, + 324 + ], + "spans": [ + { + "bbox": [ + 475, + 258, + 531, + 324 + ], + "type": "image", + "image_path": "0acb3c0d5fed3a3f8193e9127851af5894725354a094709aea933134f3e3b7b0.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 403, + 332, + 477, + 343 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 403, + 332, + 477, + 343 + ], + "spans": [ + { + "bbox": [ + 403, + 332, + 477, + 343 + ], + "type": "text", + "content": "Ours: end to end" + } + ] + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 356, + 361, + 462, + 411 + ], + "blocks": [ + { + "bbox": [ + 356, + 361, + 462, + 411 + ], + "lines": [ + { + "bbox": [ + 356, + 361, + 462, + 411 + ], + "spans": [ + { + "bbox": [ + 356, + 361, + 462, + 411 + ], + "type": "image", + "image_path": "5aafc7be6f249efe6e065066c893a2da912a408f5a9da7a7962c20d68194f624.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 474, + 346, + 531, + 411 + ], + "blocks": [ + { + "bbox": [ + 474, + 346, + 531, + 411 + ], + "lines": [ + { + "bbox": [ + 474, + 346, + 531, + 411 + ], + "spans": [ + { + "bbox": [ + 474, + 346, + 531, + 411 + ], + "type": "image", + "image_path": "0881d676bb6c6f70842f5ed1ac298edb0c67e1c6d95af2b034679feb230f99be.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 70, + 418, + 179, + 582 + ], + "blocks": [ + { + "bbox": [ + 70, + 418, + 179, + 582 + ], + "lines": [ + { + "bbox": [ + 70, + 418, + 179, + 582 + ], + "spans": [ + { + "bbox": [ + 70, + 418, + 179, + 582 + ], + "type": "image", + "image_path": "84cb058d0e8dc1bbdb90f7e73f2e5a9985e9375393a90b68108134dc28db47f5.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 588, + 555, + 632 + ], + "lines": [ + { + "bbox": [ + 55, + 588, + 555, + 632 + ], + "spans": [ + { + "bbox": [ + 55, + 588, + 555, + 632 + ], + "type": "text", + "content": "Figure 1. (a) Definition of the advertising product poster generation task. The input includes the prompt, subject image, and the texts to be rendered with their layouts. The output is the poster image. (b) The comparison of our method with the previous method. PosterMaker generates posters end-to-end, while previous methods first generate poster backgrounds and then render texts. (c) Visualization results demonstrate that PosterMaker can generate harmonious and aesthetically pleasing posters with accurate texts and maintain subject fidelity." + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 181, + 419, + 262, + 501 + ], + "blocks": [ + { + "bbox": [ + 197, + 407, + 207, + 418 + ], + "lines": [ + { + "bbox": [ + 197, + 407, + 207, + 418 + ], + "spans": [ + { + "bbox": [ + 197, + 407, + 207, + 418 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 181, + 419, + 262, + 501 + ], + "lines": [ + { + "bbox": [ + 181, + 419, + 262, + 501 + ], + "spans": [ + { + "bbox": [ + 181, + 419, + 262, + 501 + ], + "type": "image", + "image_path": "e6705e40e73fa87f1df270a6d2380df0e9a7efa0f5d05b1e5a0f2a835d460f14.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 181, + 501, + 261, + 582 + ], + "blocks": [ + { + "bbox": [ + 181, + 501, + 261, + 582 + ], + "lines": [ + { + "bbox": [ + 181, + 501, + 261, + 582 + ], + "spans": [ + { + "bbox": [ + 181, + 501, + 261, + 582 + ], + "type": "image", + "image_path": "dfbca06eace34b9928322d5af9014b4a18479b600dfa3a56f5147f507be663cf.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 263, + 419, + 343, + 501 + ], + "blocks": [ + { + "bbox": [ + 263, + 419, + 343, + 501 + ], + "lines": [ + { + "bbox": [ + 263, + 419, + 343, + 501 + ], + "spans": [ + { + "bbox": [ + 263, + 419, + 343, + 501 + ], + "type": "image", + "image_path": "8860ac0793d8245add147225b1af9376c5b05f22070e3ad2315d5b0d55f1fb48.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 263, + 503, + 343, + 582 + ], + "blocks": [ + { + "bbox": [ + 263, + 503, + 343, + 582 + ], + "lines": [ + { + "bbox": [ + 263, + 503, + 343, + 582 + ], + "spans": [ + { + "bbox": [ + 263, + 503, + 343, + 582 + ], + "type": "image", + "image_path": "d57e8b3cd2f71d6889bca5f2899b15a010f1cd79ba9d11b597c7b10a57738729.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 299, + 582, + 308, + 590 + ], + "lines": [ + { + "bbox": [ + 299, + 582, + 308, + 590 + ], + "spans": [ + { + "bbox": [ + 299, + 582, + 308, + 590 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 343, + 419, + 426, + 501 + ], + "blocks": [ + { + "bbox": [ + 343, + 419, + 426, + 501 + ], + "lines": [ + { + "bbox": [ + 343, + 419, + 426, + 501 + ], + "spans": [ + { + "bbox": [ + 343, + 419, + 426, + 501 + ], + "type": "image", + "image_path": "76fb2ebcebf1e1c35721cea4542b4bfcb7234470893d55ce6c3bf73c0f43ce28.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 343, + 503, + 426, + 582 + ], + "blocks": [ + { + "bbox": [ + 343, + 503, + 426, + 582 + ], + "lines": [ + { + "bbox": [ + 343, + 503, + 426, + 582 + ], + "spans": [ + { + "bbox": [ + 343, + 503, + 426, + 582 + ], + "type": "image", + "image_path": "b86755819730d39a871b328f8d3b51e66352c042590e7172bf79246b96c76c85.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 427, + 419, + 536, + 582 + ], + "blocks": [ + { + "bbox": [ + 427, + 419, + 536, + 582 + ], + "lines": [ + { + "bbox": [ + 427, + 419, + 536, + 582 + ], + "spans": [ + { + "bbox": [ + 427, + 419, + 536, + 582 + ], + "type": "image", + "image_path": "a37a51ba7e3c0388312024f2864b9a532c649ad3b79c31574d200575e0d7adf1.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "bbox": [ + 152, + 639, + 200, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 639, + 200, + 651 + ], + "spans": [ + { + "bbox": [ + 152, + 639, + 200, + 651 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 55, + 660, + 295, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 660, + 295, + 684 + ], + "spans": [ + { + "bbox": [ + 55, + 660, + 295, + 684 + ], + "type": "text", + "content": "Product posters, which integrate subject, scene, and text, are crucial promotional tools for attracting customers. Cre" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 313, + 643, + 555, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 643, + 555, + 703 + ], + "spans": [ + { + "bbox": [ + 313, + 643, + 555, + 703 + ], + "type": "text", + "content": "ating such posters using modern image generation methods is valuable, while the main challenge lies in accurately rendering text, especially for complex writing systems like Chinese, which contains over 10,000 individual characters. In this work, we identify the key to precise text rendering" + } + ] + } + ], + "index": 39 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "text", + "content": "arXiv:2504.06632v1 [cs.CV] 9 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 693, + 222, + 704 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 693, + 222, + 704 + ], + "spans": [ + { + "bbox": [ + 69, + 693, + 222, + 704 + ], + "type": "text", + "content": "* Equal contribution. ‡ Corresponding author." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 69, + 704, + 241, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 704, + 241, + 713 + ], + "spans": [ + { + "bbox": [ + 69, + 704, + 241, + 713 + ], + "type": "text", + "content": "† Work done during the internship at Alibaba Group." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 43 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 262 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 262 + ], + "type": "text", + "content": "as constructing a character-discriminative visual feature as a control signal. Based on this insight, we propose a robust character-wise representation as control and we develop TextRenderNet, which achieves a high text rendering accuracy of over " + }, + { + "bbox": [ + 55, + 72, + 294, + 262 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 55, + 72, + 294, + 262 + ], + "type": "text", + "content": ". Another challenge in poster generation is maintaining the fidelity of user-specific products. We address this by introducing SceneGenNet, an inpainting-based model, and propose subject fidelity feedback learning to further enhance fidelity. Based on TextRenderNet and SceneGenNet, we present PosterMaker, an end-to-end generation framework. To optimize PosterMaker efficiently, we implement a two-stage training strategy that decouples text rendering and background generation learning. Experimental results show that PosterMaker outperforms existing baselines by a remarkable margin, which demonstrates its effectiveness." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 274, + 136, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 274, + 136, + 285 + ], + "spans": [ + { + "bbox": [ + 56, + 274, + 136, + 285 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 293, + 295, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 293, + 295, + 521 + ], + "spans": [ + { + "bbox": [ + 55, + 293, + 295, + 521 + ], + "type": "text", + "content": "Product posters, which showcase items for sale within well-chosen background scenes and include descriptive text, play a vital role in e-commerce advertising by capturing customers' attention and boosting sales. Creating such posters necessitates photographing the product in carefully selected environments that highlight its features, as well as thoughtfully choosing text colors and fonts to ensure that the text is appealing, legible, and harmonious with the background. This process can be quite expensive. With the significant advancements in large-scale text-to-image (T2I) models [13, 35, 39], synthesizing such product posters with image generation models attracts increasing attention. In this paper, we focus on the product poster generation task. Specifically, given a prompt describing the background scene, the foreground image of the user-specified subject and some texts together with their layouts, we aim to develop a model to generate the subject into the desired scene background and accurately render the text in an end-to-end manner (as shown in Fig. 1 (a))." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 522, + 295, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 522, + 295, + 677 + ], + "spans": [ + { + "bbox": [ + 55, + 522, + 295, + 677 + ], + "type": "text", + "content": "A straightforward solution for this task is to first generate the subject into the desired scene [2, 11, 40], and then predict the text attributes (such as color and font) [14, 23] and render them on the image. However, such two-stage approach suffers from disharmony between the text and the poster background(as shown in Fig. 2 (b)). And collecting training data is also challenging since the text attributes, especially the text font, are difficult to extract from the poster. Another solution is learning to generate the poster using a per-pixel synthesis approach, which can benefit from directly learning the distribution of professionally designed posters. We focus on such one-stage solution. The main challenge is how to ensure the text rendering accuracy." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 677, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 295, + 713 + ], + "type": "text", + "content": "Many recent works [13, 25, 42, 49] have been proposed to improve the text rendering accuracy for large diffusion models. Great progress has been made and some" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 72, + 553, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 216 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 216 + ], + "type": "text", + "content": "recent work can achieve high rendering accuracy for English. However, for non-Latin languages like Chinese, one of the most widely spoken languages, achieving high rendering accuracy remains challenging. This difficulty stems from the existence of over 10,000 characters, with Chinese characters characterized by complex and diverse stroke patterns, making it extremely difficult to train a model to memorize the rendering of each individual character. Recent studies [4, 28, 42] have focused on extracting visual features of text as control signals. Typically, these approaches render text lines into glyph images and extract line-level text visual features to guide generation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 218, + 553, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 218, + 553, + 434 + ], + "spans": [ + { + "bbox": [ + 313, + 218, + 553, + 434 + ], + "type": "text", + "content": "Nevertheless, line-level visual features often lack the discriminative power to capture character-level visual nuances. To address this limitation, GlyphByT5 [25, 26] introduced a box-level contrastive loss with sophisticated glyph augmentation strategies to enhance character-level discriminativeness, achieving promising results. In this paper, we point out that the key to high-accuracy text rendering lies in constructing character-discriminative visual features as control signals. Specifically, we render each character as a glyph image and extract visual features via a visual encoder. These features are then concatenated with positional embeddings to form a character-level representation. Then we propose TextRenderNet, an SD3 [13] controlnet-like [53] architecture that takes the character-level representation as the control signal to render visual text. Our experiments demonstrate that the proposed character-level representation is effectively capable of achieving accurate text rendering." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 436, + 553, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 436, + 553, + 663 + ], + "spans": [ + { + "bbox": [ + 313, + 436, + 553, + 663 + ], + "type": "text", + "content": "In the task of poster generation, another important thing is to generate the user-specific subject into a desired scene while keeping high subject fidelity. Recent subject-driven controllable generation methods [40, 44, 51] can synthesize such images, but they still cannot ensure that the user-specified subject is completely consistent in the generated details (e.g., the logo on the product may be inaccurately generated), which could potentially mislead customers. Therefore, we follow poster generation methods [5, 11, 22] to address this task via introducing an inpainting-based module named SceneGenNet. However, we found that even using inpainting methods, subject consistency is not always achieved as the inpainting model sometimes extends the subject shape (as shown in Fig. 2 (a)). Similar phenomenon is also observed in [11, 12]. To address this issue, we elaboratively develop a detector to detect the foreground extension cases. Then we employ the detector as a reward model to train the SceneGenNet via feedback learning for further improving subject fidelity." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 665, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 553, + 713 + ], + "type": "text", + "content": "Combining the proposed TextRenderNet and SceneGenNet, we develop a framework named PosterMaker that can synthesize the product poster in an end-to-end manner. To efficiently optimize PosterMaker, we introduce a two-stage" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 82, + 71, + 154, + 144 + ], + "blocks": [ + { + "bbox": [ + 82, + 71, + 154, + 144 + ], + "lines": [ + { + "bbox": [ + 82, + 71, + 154, + 144 + ], + "spans": [ + { + "bbox": [ + 82, + 71, + 154, + 144 + ], + "type": "image", + "image_path": "cc4fa3e524b81f1d124f3a83bd3a2d6b24e7b0990c9c1eab0e4379791445aebf.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 92, + 144, + 141, + 150 + ], + "lines": [ + { + "bbox": [ + 92, + 144, + 141, + 150 + ], + "spans": [ + { + "bbox": [ + 92, + 144, + 141, + 150 + ], + "type": "text", + "content": "User-Specified Texts" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 79, + 152, + 529, + 163 + ], + "lines": [ + { + "bbox": [ + 79, + 152, + 529, + 163 + ], + "spans": [ + { + "bbox": [ + 79, + 152, + 529, + 163 + ], + "type": "text", + "content": "Figure 2. The illustration of the three challenges faced by poster generation, which seriously hinder the practical application." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 156, + 72, + 228, + 144 + ], + "blocks": [ + { + "bbox": [ + 156, + 72, + 228, + 144 + ], + "lines": [ + { + "bbox": [ + 156, + 72, + 228, + 144 + ], + "spans": [ + { + "bbox": [ + 156, + 72, + 228, + 144 + ], + "type": "image", + "image_path": "cc26ae10209f45c45f172ef4c0e2705be1dbb550c1fbbfc2c45f08dbbb262431.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 162, + 144, + 214, + 151 + ], + "lines": [ + { + "bbox": [ + 162, + 144, + 214, + 151 + ], + "spans": [ + { + "bbox": [ + 162, + 144, + 214, + 151 + ], + "type": "text", + "content": "User-Specified Subject" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 230, + 72, + 304, + 144 + ], + "blocks": [ + { + "bbox": [ + 230, + 72, + 304, + 144 + ], + "lines": [ + { + "bbox": [ + 230, + 72, + 304, + 144 + ], + "spans": [ + { + "bbox": [ + 230, + 72, + 304, + 144 + ], + "type": "image", + "image_path": "e14fa0fb52af6019d2f7fd749129efe78e4f649029e674de79c8f0fa553d9312.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 144, + 298, + 151 + ], + "lines": [ + { + "bbox": [ + 239, + 144, + 298, + 151 + ], + "spans": [ + { + "bbox": [ + 239, + 144, + 298, + 151 + ], + "type": "text", + "content": "(a) Foreground Extension" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 304, + 72, + 378, + 144 + ], + "blocks": [ + { + "bbox": [ + 304, + 72, + 378, + 144 + ], + "lines": [ + { + "bbox": [ + 304, + 72, + 378, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 378, + 144 + ], + "type": "image", + "image_path": "25b6e838f24b2f49dde5dbd24af262e442a4c225a7be8405132847f005181a21.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 144, + 373, + 151 + ], + "lines": [ + { + "bbox": [ + 310, + 144, + 373, + 151 + ], + "spans": [ + { + "bbox": [ + 310, + 144, + 373, + 151 + ], + "type": "text", + "content": "(b)Text-Scene Disharmony" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 379, + 72, + 452, + 144 + ], + "blocks": [ + { + "bbox": [ + 379, + 72, + 452, + 144 + ], + "lines": [ + { + "bbox": [ + 379, + 72, + 452, + 144 + ], + "spans": [ + { + "bbox": [ + 379, + 72, + 452, + 144 + ], + "type": "image", + "image_path": "26a7703ae2cdb9597f122eb7e7605316ed457d62788d2b67698dee17ecbe5f74.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 387, + 144, + 443, + 151 + ], + "lines": [ + { + "bbox": [ + 387, + 144, + 443, + 151 + ], + "spans": [ + { + "bbox": [ + 387, + 144, + 443, + 151 + ], + "type": "text", + "content": "(c) Poor Text Rendering" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 454, + 72, + 528, + 144 + ], + "blocks": [ + { + "bbox": [ + 454, + 72, + 528, + 144 + ], + "lines": [ + { + "bbox": [ + 454, + 72, + 528, + 144 + ], + "spans": [ + { + "bbox": [ + 454, + 72, + 528, + 144 + ], + "type": "image", + "image_path": "88324587a54ed90f04e0beebb66bcd3330c40f445ac07add6ffd62a1ee4555da.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 477, + 144, + 503, + 150 + ], + "lines": [ + { + "bbox": [ + 477, + 144, + 503, + 150 + ], + "spans": [ + { + "bbox": [ + 477, + 144, + 503, + 150 + ], + "type": "text", + "content": "Our Result" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 171, + 296, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 171, + 296, + 255 + ], + "spans": [ + { + "bbox": [ + 55, + 171, + 296, + 255 + ], + "type": "text", + "content": "training strategy to separately train TextRenderNet and SceneGenNet. This training strategy decouples the learning of text rendering and background image generation, thus TextRenderNet and SceneGenNet can focus on their specific tasks. Qualitative results (as shown in Fig. 1 (c)) demonstrate our training strategy is effective for training PosterMaker and it achieves promising poster generation results." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 256, + 261, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 256, + 261, + 266 + ], + "spans": [ + { + "bbox": [ + 67, + 256, + 261, + 266 + ], + "type": "text", + "content": "To summarize, our contributions are as follows:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 269, + 296, + 425 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 55, + 269, + 295, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 269, + 295, + 328 + ], + "spans": [ + { + "bbox": [ + 55, + 269, + 295, + 328 + ], + "type": "text", + "content": "- We proposed a novel framework named PosterMaker, which mainly consists of a TextRenderNet and a SceneGenNet. With a two-stage training strategy, PosterMaker can synthesis aesthetically product posters with texts accurately and harmoniously rendered on it." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 55, + 329, + 295, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 329, + 295, + 388 + ], + "spans": [ + { + "bbox": [ + 55, + 329, + 295, + 388 + ], + "type": "text", + "content": "- We reveal the core of achieving accurate Chinese text rendering is to construct a robust character-level text representation as the control condition. These findings can inspire future research on improving the text rendering abilities of T2I models." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 388, + 296, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 388, + 296, + 425 + ], + "spans": [ + { + "bbox": [ + 56, + 388, + 296, + 425 + ], + "type": "text", + "content": "- We improve the subject fidelity via subject fidelity feedback learning, which is shown effective in addressing the subject inconsistency issue." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 434, + 142, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 434, + 142, + 445 + ], + "spans": [ + { + "bbox": [ + 55, + 434, + 142, + 445 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 55, + 455, + 164, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 455, + 164, + 466 + ], + "spans": [ + { + "bbox": [ + 55, + 455, + 164, + 466 + ], + "type": "text", + "content": "2.1. Poster Generation" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 55, + 474, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 474, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 474, + 296, + 713 + ], + "type": "text", + "content": "Generating posters involves combining various elements like a subject image, a background scene image, and text to ensure the subject and text are prominently and accurately displayed while maintaining an appealing look. Automating this process is quite complex and challenging. Methods like AutoPoster [23], Prompt2Poster [45], and COLE [16] break it down into stages: creating images and layout, predicting the visual properties of text, and rendering the poster. These approaches have several steps and often struggle to precisely obtain all the necessary visual attributes like font and color gradients. With the emergence of more advanced generative models [35], methods like JoyType [19], Glyphbyt5 [25], and GlyphDraw2 [28] can directly generate the image and text together at the pixel level based on the poster prompt, text content, and layout. This more streamlined approach can leverage more readily available poster pixel data for training, but there is still room for improvement in terms of the overall poster cohesion and text accuracy. Our method is also a one-stage, direct pixel-level generation approach that simultaneously creates the image and" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 171, + 555, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 171, + 555, + 232 + ], + "spans": [ + { + "bbox": [ + 313, + 171, + 555, + 232 + ], + "type": "text", + "content": "text. However, our focus is on generating posters for a given product subject, where the input includes the subject image, prompt, text content, and layout. In addition to considering text rendering accuracy and overall poster harmony, we also need to maintain the fidelity of the product." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 238, + 441, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 238, + 441, + 251 + ], + "spans": [ + { + "bbox": [ + 313, + 238, + 441, + 251 + ], + "type": "text", + "content": "2.2. Visual Text Rendering" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 256, + 555, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 256, + 555, + 604 + ], + "spans": [ + { + "bbox": [ + 313, + 256, + 555, + 604 + ], + "type": "text", + "content": "Recently, text-to-image (T2I) models [1, 13, 41] have made significant strides in enhancing English text rendering by introducing stronger text encoders, such as T5 [38]. However, multilingual text image generation still faces significant challenges due to the large number of non-Latin characters and complex stroke structures. Early work [49] has explored the ControlNet-based method [53], using low-level visual images such as glyph images as the control signal for text image generation. However, glyph images are easily affected by text size and shape, especially complex stroke details. Besides, some recent works [4, 27, 28, 42, 52, 55] utilize more robust visual features, such as line-level OCR features as control conditions to further improve the text accuracy. But the line-level visual features still perform poorly in representing stroke details for each character. To address this issue, GlyphByT5 [25, 26] proposes a method with box-level contrastive learning to align the text features extracted from the language model with the features extracted from the visual encoder. To effectively learn such alignment, GlyphByT5 relies on collecting massive amounts of data and developing complex data augmentation strategies for the alignment pre-training, which lacks flexibility. In contrast, in this paper, we reveal that the key to high-accuracy text rendering lies in constructing discriminative character-level visual features. Thus we propose a plug-and-play and robust character-level text representation derived from off-the-shelf OCR encoders, which can accurately represent the visual structure of the text without additional training and enable precise text rendering." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 611, + 507, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 611, + 507, + 624 + ], + "spans": [ + { + "bbox": [ + 313, + 611, + 507, + 624 + ], + "type": "text", + "content": "2.3. Subject-Preserved Scene Generation" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 630, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 554, + 713 + ], + "type": "text", + "content": "To create a scene image with a product subject while ensuring subject fidelity, two main methods are commonly used. One is the subject-driven method [3, 6, 20, 36, 40], which adjusts the position, angle and lighting of the subject based on the prompt to create a harmonious image. However, it often struggles to preserve the significant features of the subject. The other utilizes inpainting-based background com" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 73, + 550, + 205 + ], + "blocks": [ + { + "bbox": [ + 61, + 73, + 550, + 205 + ], + "lines": [ + { + "bbox": [ + 61, + 73, + 550, + 205 + ], + "spans": [ + { + "bbox": [ + 61, + 73, + 550, + 205 + ], + "type": "image", + "image_path": "8b46bbeabdcde0195c3c0ba74ad57a536fc5d0d9fa001be5a351e582462f2f21.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 209, + 553, + 232 + ], + "lines": [ + { + "bbox": [ + 55, + 209, + 553, + 232 + ], + "spans": [ + { + "bbox": [ + 55, + 209, + 553, + 232 + ], + "type": "text", + "content": "Figure 3. The framework of the PosterMaker, which is based on the SD3. To precisely generate multilingual texts and create aesthetically pleasing poster scenes, TextRenderNet and SenceGenNet are introduced, whose outputs are used as control conditions added to the SD3." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 239, + 296, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 239, + 296, + 334 + ], + "spans": [ + { + "bbox": [ + 55, + 239, + 296, + 334 + ], + "type": "text", + "content": "pletion techniques [2, 11, 43]. It only generates the non-subject areas of an image and naturally keeps consistency in the original subject area. But it sometimes extends the foreground subject [11, 12], such as adding an extra handle to a cup, which also reduces subject fidelity. To maximize subject fidelity, our method uses background completion and a reward model to determine whether the foreground extension occurred, thereby enhancing subject fidelity." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 338, + 111, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 338, + 111, + 350 + ], + "spans": [ + { + "bbox": [ + 55, + 338, + 111, + 350 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 357, + 180, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 357, + 180, + 369 + ], + "spans": [ + { + "bbox": [ + 55, + 357, + 180, + 369 + ], + "type": "text", + "content": "3.1. Problem Formulation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 376, + 296, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 376, + 296, + 448 + ], + "spans": [ + { + "bbox": [ + 55, + 376, + 296, + 448 + ], + "type": "text", + "content": "This paper focuses on the creation of product posters, which typically consist of multiple elements such as text, subjects, and scenes, as illustrated in Fig. 1 (a). The central challenge of this task is to generate these elements accurately and harmoniously, offering both research and practical applications. The task is defined as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 129, + 454, + 294, + 467 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 454, + 294, + 467 + ], + "spans": [ + { + "bbox": [ + 129, + 454, + 294, + 467 + ], + "type": "interline_equation", + "content": "I _ {g} = f \\left(I _ {s}, M _ {s}, T, P\\right), \\tag {1}", + "image_path": "32e835a8f32d7bf58c13f374fc44caccece61a2ac658239857e6cafe78078e9f.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "spans": [ + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "inline_equation", + "content": "I_{g}" + }, + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "text", + "content": " denotes the generated poster image, " + }, + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "inline_equation", + "content": "I_{s}" + }, + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "text", + "content": " represents the subject image, and " + }, + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "inline_equation", + "content": "M_{s}" + }, + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "text", + "content": " is the subject mask. The variable " + }, + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "text", + "content": " signifies the content and the position of text and " + }, + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 55, + 473, + 296, + 544 + ], + "type": "text", + "content": " is the prompt describing the background scene. Subsequent sections will detail the design of PosterMaker, and our proposed solution to this task." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 552, + 134, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 552, + 134, + 562 + ], + "spans": [ + { + "bbox": [ + 55, + 552, + 134, + 562 + ], + "type": "text", + "content": "3.2. Framework" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "content": "As shown in Fig. 3, PosterMaker is developed based on Stable Diffusion 3 (SD3) [13], which contains a strong VAE for reconstructing the image details like text stroke. And we propose two modules, i.e., TextRenderNet and SceneGenNet, to address the poster generation task. TextRenderNet is specifically designed to learn visual text rendering, taking character-level visual text representations as input to achieve precise and controllable text rendering. SceneGenNet, on the other hand, accepts a masked image (indicating which content should remain unchanged) and a prompt, learning to generate the foreground subject within the desired scene described by the prompt. Both TextRenderNet" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 325, + 236, + 547, + 361 + ], + "blocks": [ + { + "bbox": [ + 325, + 236, + 547, + 361 + ], + "lines": [ + { + "bbox": [ + 325, + 236, + 547, + 361 + ], + "spans": [ + { + "bbox": [ + 325, + 236, + 547, + 361 + ], + "type": "image", + "image_path": "f8b790dd38d0e52b2ec90ca9820adac388e4331906f3563714af637d4a26fe5d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 363, + 553, + 384 + ], + "lines": [ + { + "bbox": [ + 313, + 363, + 553, + 384 + ], + "spans": [ + { + "bbox": [ + 313, + 363, + 553, + 384 + ], + "type": "text", + "content": "Figure 4. The details of TextRenderNet and SceneGenNet, showcasing their model architectures and their interactions with SD3." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 392, + 555, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 392, + 555, + 632 + ], + "spans": [ + { + "bbox": [ + 313, + 392, + 555, + 632 + ], + "type": "text", + "content": "and SceneGenNet are grounded in a ControlNet-like [52] architecture derived from SD3 and their architectures are detailed in Fig. 4. They share the same internal structure, comprising several cascaded MM-DiT blocks [13], with weights copied from the base model for initialization. The output of each MM-DiT block is added to the corresponding block of the base model after passing through a zero convolution layer [53]. The key distinction between the two modules lies in their input configurations. SceneGenNet takes the prompt as input to the text condition branch, and for the visual branch, the input is derived by the latent feature at timestep " + }, + { + "bbox": [ + 313, + 392, + 555, + 632 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 392, + 555, + 632 + ], + "type": "text", + "content": ", the subject mask, and the masked latent to preserve the foreground area. In contrast, TextRenderNet receives text representations (detailed in the next section) in the text condition branch for text rendering. An adapter, consisting of a linear layer and layer normalization, adjusts the feature dimensions of these text representations before they are input to TextRenderNet. The outputs of each block in TextRenderNet and SceneGenNet are directly added to the corresponding block outputs of the SD3 base model." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 637, + 553, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 637, + 553, + 663 + ], + "spans": [ + { + "bbox": [ + 313, + 637, + 553, + 663 + ], + "type": "text", + "content": "3.3. Character-level Visual Representation for Precise Text Rendering" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "content": "Recently, some works have explored multilingual visual text generation. Among them, a promising approach is based on ControlNet-like methods [42], which utilize both glyph images and line-level OCR features as conditions." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 71, + 296, + 121 + ], + "blocks": [ + { + "bbox": [ + 58, + 71, + 296, + 121 + ], + "lines": [ + { + "bbox": [ + 58, + 71, + 296, + 121 + ], + "spans": [ + { + "bbox": [ + 58, + 71, + 296, + 121 + ], + "type": "image", + "image_path": "4d77d17c9b67ac938b6d26b556a4f1a609b222eff4de3596b373fda50aac6eab.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 121, + 295, + 156 + ], + "lines": [ + { + "bbox": [ + 55, + 121, + 295, + 156 + ], + "spans": [ + { + "bbox": [ + 55, + 121, + 295, + 156 + ], + "type": "text", + "content": "Figure 5. The distinction between the multilingual character-level text representation we proposed and the line-level methods of previous works like AnyText [42] and GlyphDraw2 [28]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 161, + 296, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 161, + 296, + 257 + ], + "spans": [ + { + "bbox": [ + 55, + 161, + 296, + 257 + ], + "type": "text", + "content": "However, this control information cannot accurately represent characters: 1) glyph images are easily affected by text size and shape, making them less robust. 2) line-level visual features lack fine-grained stroke features and are limited by the OCR model's poor capability to recognize long texts. To address these challenges, this paper proposes a plug-and-play and robust character-level text representation, where each character is precisely represented by one token." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "spans": [ + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "text", + "content": "Specifically, the text " + }, + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "text", + "content": " has " + }, + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "text", + "content": " characters. For each character " + }, + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "inline_equation", + "content": "c_{i}" + }, + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "text", + "content": ", its feature is separately extracted by a pre-trained OCR encoder " + }, + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "inline_equation", + "content": "f_{v}" + }, + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "text", + "content": " and then averaged and pooled to obtain a compact character representation vector " + }, + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "inline_equation", + "content": "r_{c_i} \\in \\mathbb{R}^c" + }, + { + "bbox": [ + 55, + 257, + 296, + 317 + ], + "type": "text", + "content": ". Thus, the character-level text representation is defined as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 124, + 321, + 294, + 335 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 321, + 294, + 335 + ], + "spans": [ + { + "bbox": [ + 124, + 321, + 294, + 335 + ], + "type": "interline_equation", + "content": "r _ {c i} = \\operatorname {a v g p o o l} \\left(f _ {v} \\left(I _ {c i}\\right)\\right), \\tag {2}", + "image_path": "d55101ab862e7083e8673064786bd8a14af343d42b954cda7f36b0d7bc51d0df.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 335, + 294, + 350 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 335, + 294, + 350 + ], + "spans": [ + { + "bbox": [ + 130, + 335, + 294, + 350 + ], + "type": "interline_equation", + "content": "R _ {c} = \\left[ r _ {c _ {1}}, r _ {c _ {2}}, \\dots , r _ {c _ {n}} \\right], \\tag {3}", + "image_path": "12b078e8e2fd28b63d5c774cf616c066a0bd21c41973822071bbfa2e26b7278a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 358, + 295, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 358, + 295, + 382 + ], + "spans": [ + { + "bbox": [ + 55, + 358, + 295, + 382 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 358, + 295, + 382 + ], + "type": "inline_equation", + "content": "I_{c_i}" + }, + { + "bbox": [ + 55, + 358, + 295, + 382 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 55, + 358, + 295, + 382 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 358, + 295, + 382 + ], + "type": "text", + "content": "-th character image rendered in a fixed font, and " + }, + { + "bbox": [ + 55, + 358, + 295, + 382 + ], + "type": "inline_equation", + "content": "R_{c} \\in \\mathbb{R}^{n \\times c}" + }, + { + "bbox": [ + 55, + 358, + 295, + 382 + ], + "type": "text", + "content": " is the char-level text representation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 383, + 296, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 383, + 296, + 490 + ], + "spans": [ + { + "bbox": [ + 55, + 383, + 296, + 490 + ], + "type": "text", + "content": "As shown in Fig. 5, compared to previous methods, our key difference is extracting representations from character glyph images. This enables the model to perceive character stroke structures and achieve high text accuracy. Additionally, since the number of characters is fixed, we can pre-extract the representations of each character and store them in a dictionary, eliminating the need for online rendering and feature extraction. This significantly simplifies the training and inference pipeline." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "spans": [ + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "text", + "content": "Finally, this text representation lacks order and positional information. Thus, the character order encoding " + }, + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "inline_equation", + "content": "P_{rank}" + }, + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "text", + "content": " is introduced to represent the order of characters in the text, which is implemented through a sinusoidal position encoding of the char order. Besides, inspired by GLIGEN [21], the text position coordinates are mapped to sinusoidal position encoding " + }, + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "inline_equation", + "content": "P_{bbox}" + }, + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "text", + "content": " to control the position of the text. Then we concatenate " + }, + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "inline_equation", + "content": "P_{rank}" + }, + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "inline_equation", + "content": "P_{bbox}" + }, + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "inline_equation", + "content": "R_c" + }, + { + "bbox": [ + 55, + 491, + 296, + 597 + ], + "type": "text", + "content": " along the feature dimension to construct the final text representation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 602, + 205, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 602, + 205, + 615 + ], + "spans": [ + { + "bbox": [ + 55, + 602, + 205, + 615 + ], + "type": "text", + "content": "3.4. Improving Subject Fidelity" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 617, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 296, + 713 + ], + "type": "text", + "content": "In the task of generating product posters, it is crucial to maintain subject fidelity, i.e., ensuring that the subject in the generated poster remains consistent with the user-specified subject. To achieve this goal, we employ SceneGenNet to perform background inpainting, which is trained to precisely preserve the foreground subject and only inpaint the background according to the prompt. However, inpainting-based models sometimes extend the foreground subject into" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 321, + 69, + 542, + 167 + ], + "blocks": [ + { + "bbox": [ + 321, + 69, + 542, + 167 + ], + "lines": [ + { + "bbox": [ + 321, + 69, + 542, + 167 + ], + "spans": [ + { + "bbox": [ + 321, + 69, + 542, + 167 + ], + "type": "image", + "image_path": "7b1ab2581a2637e361192dadf19b149350be83c2f5e81ad624f504a04833ae50.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 170, + 552, + 180 + ], + "lines": [ + { + "bbox": [ + 315, + 170, + 552, + 180 + ], + "spans": [ + { + "bbox": [ + 315, + 170, + 552, + 180 + ], + "type": "text", + "content": "Figure 6. The model details of the foreground extension detector." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 316, + 186, + 420, + 261 + ], + "blocks": [ + { + "bbox": [ + 316, + 186, + 420, + 261 + ], + "lines": [ + { + "bbox": [ + 316, + 186, + 420, + 261 + ], + "spans": [ + { + "bbox": [ + 316, + 186, + 420, + 261 + ], + "type": "image", + "image_path": "cba135ab6536ff93c30644eef668d3f30af7317c5bf63b7a1b66f5d7419fe1cf.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 428, + 186, + 550, + 263 + ], + "blocks": [ + { + "bbox": [ + 428, + 186, + 550, + 263 + ], + "lines": [ + { + "bbox": [ + 428, + 186, + 550, + 263 + ], + "spans": [ + { + "bbox": [ + 428, + 186, + 550, + 263 + ], + "type": "image", + "image_path": "ac592ba23bddb135c0915fc0128194478bb41293ec4e39dc475919d75340602d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 265, + 553, + 286 + ], + "lines": [ + { + "bbox": [ + 313, + 265, + 553, + 286 + ], + "spans": [ + { + "bbox": [ + 313, + 265, + 553, + 286 + ], + "type": "text", + "content": "Figure 7. The illustration of our two-stage training strategy for efficiently optimizing PosterMaker." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 295, + 554, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 295, + 554, + 355 + ], + "spans": [ + { + "bbox": [ + 313, + 295, + 554, + 355 + ], + "type": "text", + "content": "another subject (as shown in Fig. 2 (b)) thereby compromising subject fidelity. We refer to this as \"foreground extension\". To mitigate this issue, we develop a model to detect foreground extension and employ it as a reward model to fine-tune PosterMaker to improve subject fidelity." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "spans": [ + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "text", + "content": "Foreground Extension Detector. We develop the foreground extension detector " + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "inline_equation", + "content": "S_{\\theta}" + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "text", + "content": " based on HQ-SAM [17]. As shown in Fig. 6, we input the generated image " + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "inline_equation", + "content": "I_{g}" + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "text", + "content": " to SAM [18] image encoder. The subject mask " + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "inline_equation", + "content": "M_{s}" + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "text", + "content": " and box " + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "inline_equation", + "content": "B_{s}" + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "text", + "content": " are provided as mask prompt and box prompt, respectively, to the HQ-SAM decoder to obtain an intermediate mask " + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "text", + "content": ". Next, we concatenate the image features extracted from SAM encoder with " + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "inline_equation", + "content": "M_{s}" + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "inline_equation", + "content": "M_{s} - M_{i}" + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "text", + "content": " at the channel dimension. The concatenated features are processed through convolutional layers and MLP layers to predict whether the foreground has been extended in the generated image. We collected 20k manually annotated images to train the foreground extension detector " + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "inline_equation", + "content": "S_{\\theta}" + }, + { + "bbox": [ + 313, + 356, + 554, + 511 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "spans": [ + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": "Subject Fidelity Feedback Learning. The foreground extension detector " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "S_{\\theta}" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": ", after the offline training, is used as a reward model to supervise PosterMaker to improve subject fidelity. Specifically, assuming the reverse process has a total of " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "T'" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": " steps, we follow ReFL [47] to first sample " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "z_{T'} \\sim \\mathcal{N}(0,1)" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": " and after " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "T' - t'" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": " steps of inference " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "(z_{T'} \\rightarrow z_{T'-1} \\rightarrow \\dots \\rightarrow z_{t'})" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": ", we obtain " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "z_{t'}" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "t' \\sim [1, t_1]" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": ". Then, we directly perform a one-step inference " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "z_{t'} \\rightarrow z_0" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": " to accelerate the reverse process. Furthermore, " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "z_0" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": " is decoded to the generated image " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": ". The detector " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "S_{\\theta}" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": " predicts the foreground extension score for " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": ", and this score is used as the reward loss to optimize the generator " + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "inline_equation", + "content": "G_{\\phi}" + }, + { + "bbox": [ + 313, + 513, + 555, + 668 + ], + "type": "text", + "content": " (i.e., PostMaker). The reward loss is defined as follows:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 673, + 553, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 673, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 317, + 673, + 553, + 714 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\text {r e w a r d}} (\\phi) = - \\mathbb {E} _ {(x, c, m) \\sim \\mathcal {D} _ {\\text {t r a i n}}, t ^ {\\prime} \\sim [ 1, t _ {1} ], z _ {T ^ {\\prime}} \\sim \\mathcal {N} (0, 1)} \\\\ \\log \\sigma \\left(1 - S _ {\\theta} \\left(G _ {\\phi} \\left(z _ {T ^ {\\prime}}, x, c, m, t ^ {\\prime}\\right), m\\right)\\right), \\tag {4} \\\\ \\end{array}", + "image_path": "24ca81d60b370c4397f3be04cf42a59193824bccef0b0c63b48b29673a01efdc.jpg" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 71, + 553, + 297 + ], + "blocks": [ + { + "bbox": [ + 59, + 71, + 553, + 297 + ], + "lines": [ + { + "bbox": [ + 59, + 71, + 553, + 297 + ], + "spans": [ + { + "bbox": [ + 59, + 71, + 553, + 297 + ], + "type": "image", + "image_path": "4aaf4cadc0bd2e76d849fa02a6386076ba7a57a8b840bee0a63daad304225b64.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 297, + 555, + 319 + ], + "lines": [ + { + "bbox": [ + 55, + 297, + 555, + 319 + ], + "spans": [ + { + "bbox": [ + 55, + 297, + 555, + 319 + ], + "type": "text", + "content": "Figure 8. Qualitative comparison with different methods. Best viewed on Screen. To aid comprehension, Chinese text lines in the image are translated into English and annotated using corresponding colors." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 323, + 296, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 323, + 296, + 384 + ], + "spans": [ + { + "bbox": [ + 55, + 323, + 296, + 384 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 323, + 296, + 384 + ], + "type": "inline_equation", + "content": "x, c, m" + }, + { + "bbox": [ + 55, + 323, + 296, + 384 + ], + "type": "text", + "content": " sampled from the train data " + }, + { + "bbox": [ + 55, + 323, + 296, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{train}}" + }, + { + "bbox": [ + 55, + 323, + 296, + 384 + ], + "type": "text", + "content": ", represent the subject image, control conditions, and subject mask respectively. To avoid overfitting, we don't calculate reward loss for the cases where the foreground extension score is below 0.3. Our total training loss is defined as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 397, + 295, + 410 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 397, + 295, + 410 + ], + "spans": [ + { + "bbox": [ + 119, + 397, + 295, + 410 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {d e n o i s e}} + \\lambda \\mathcal {L} _ {\\text {r e w a r d}}, \\tag {5}", + "image_path": "5aec90ffde2450410d03c71d671e77e85a895c0442dbc0116e68b9d874370019.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 424, + 295, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 424, + 295, + 448 + ], + "spans": [ + { + "bbox": [ + 55, + 424, + 295, + 448 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 424, + 295, + 448 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 55, + 424, + 295, + 448 + ], + "type": "text", + "content": " is the hyperparameter to adjust the weight of reward loss and the denoise loss." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 468, + 162, + 482 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 468, + 162, + 482 + ], + "spans": [ + { + "bbox": [ + 55, + 468, + 162, + 482 + ], + "type": "text", + "content": "3.5. Training Strategy" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 489, + 296, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 489, + 296, + 717 + ], + "spans": [ + { + "bbox": [ + 55, + 489, + 296, + 717 + ], + "type": "text", + "content": "To efficiently train PosterMaker, this paper introduces a two-stage training strategy, as shown in Fig. 7, aimed at decoupling the learning for text rendering and background image generation. Specifically, in the first stage, the training task is local text editing. We freeze SceneGenNet and only the TextRenderNet and adapter are optimized. Since we initialize SceneGenNet with pre-trained weights of inpainting-controlnet [7], it can fill the local background well thus TextRenderNet can focus on learning text generation. In the second stage, the training task is subject-based text-to-image generation. Here we froze TextRenderNet and only train the SceneGenNet. In this stage, SceneGenNet focuses on learning poster scenes and creative design from the train data. Notably, Stage 1 learns local text editing/inpainting and Stage 2 learns background inpainting, thus the input images indicating the area to inpaint are different (See Fig. 7). With such a two-stage training strategy, TextRenderNet and SceneGenNet can be efficiently optimized since they can focus on their specific tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 321, + 395, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 321, + 395, + 335 + ], + "spans": [ + { + "bbox": [ + 313, + 321, + 395, + 335 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 338, + 430, + 351 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 338, + 430, + 351 + ], + "spans": [ + { + "bbox": [ + 313, + 338, + 430, + 351 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 354, + 555, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 354, + 555, + 557 + ], + "spans": [ + { + "bbox": [ + 313, + 354, + 555, + 557 + ], + "type": "text", + "content": "Dataset. We crawl product posters from online e-commerce platforms to construct our training set. Our training data mainly consists of Chinese posters, we first employ PPOCRv4 model [34] to extract the text content and their bounding boxes from the images as a coarse annotation. And we ask some annotators to further refine the bounding boxes and correct the text content to improve the annotation quality. Resulting in a dataset containing 160k images. We generate image captions with GPT4-o [32] and extract foreground subject masks with " + }, + { + "bbox": [ + 313, + 354, + 555, + 557 + ], + "type": "inline_equation", + "content": "\\mathrm{U}^2" + }, + { + "bbox": [ + 313, + 354, + 555, + 557 + ], + "type": "text", + "content": "-Net [37] and VitMatte [50]. We randomly select 302 images for evaluation and leave the rest for training. To better evaluate the performance of our method, we use LLM [10] to generate some background prompts and text layouts as evaluation samples, after manually checking and removing those irrational ones, we obtain another 198 evaluation samples to form a final evaluation set named PosterBenchmark containing 500 samples." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "text", + "content": "Evaluation Metrics. We follow Anytext [42] to evaluate text rendering accuracy using two metrics: sentence accuracy (Sen. Acc) and normalized edit distance (NED). Specifically, we crop the text line from the generated image according to the provided bounding box and utilize the OCR model [31] to predict the content " + }, + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "inline_equation", + "content": "s_{\\mathrm{pred}}" + }, + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "text", + "content": " of the generated text line. We denote the ground truth text content as " + }, + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "inline_equation", + "content": "s_{\\mathrm{gt}}" + }, + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "text", + "content": ". A text line is considered to be correctly generated if " + }, + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "inline_equation", + "content": "s_{\\mathrm{pred}} = s_{\\mathrm{gt}}" + }, + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "text", + "content": "; this condition is used to calculate Sen. Acc. Additionally, we compute the normalized edit distance (NED) between " + }, + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "inline_equation", + "content": "s_{\\mathrm{pred}}" + }, + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "inline_equation", + "content": "s_{\\mathrm{gt}}" + }, + { + "bbox": [ + 313, + 558, + 556, + 714 + ], + "type": "text", + "content": " to measure their similarity. We further calculate FID [15] to measure the visual quality and CLIP-T [40] metric for evaluating text-image alignment." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 71, + 553, + 294 + ], + "blocks": [ + { + "bbox": [ + 59, + 71, + 553, + 294 + ], + "lines": [ + { + "bbox": [ + 59, + 71, + 553, + 294 + ], + "spans": [ + { + "bbox": [ + 59, + 71, + 553, + 294 + ], + "type": "image", + "image_path": "feef9c7b9f80eb27c54a517336ee0da7210d0f42e9a0a3a0f5135fc86b1da784.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 294, + 555, + 316 + ], + "lines": [ + { + "bbox": [ + 55, + 294, + 555, + 316 + ], + "spans": [ + { + "bbox": [ + 55, + 294, + 555, + 316 + ], + "type": "text", + "content": "Figure 9. Qualitative comparison using various text features. It is obvious that the character-level OCR features we used (PPOCR Char) are the most effective at maintaining character accuracy." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 323, + 297, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 323, + 297, + 515 + ], + "spans": [ + { + "bbox": [ + 54, + 323, + 297, + 515 + ], + "type": "text", + "content": "Implementation Details. Our SceneGenNet is initialized from pre-trained SD3 Inpainting-Controlnet [7] and TextRenderNet is initialized from SD3 [13] weight with the same configuration as in [8]. For Subject Fidelity Feedback Learning, we follow existing work [47] to uniformly sample " + }, + { + "bbox": [ + 54, + 323, + 297, + 515 + ], + "type": "inline_equation", + "content": "t'" + }, + { + "bbox": [ + 54, + 323, + 297, + 515 + ], + "type": "text", + "content": " between [1, 10]. Within this range, the one-step inference result of image " + }, + { + "bbox": [ + 54, + 323, + 297, + 515 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 54, + 323, + 297, + 515 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 54, + 323, + 297, + 515 + ], + "type": "inline_equation", + "content": "t'" + }, + { + "bbox": [ + 54, + 323, + 297, + 515 + ], + "type": "text", + "content": " is close to the full inference result. The weight coefficient of " + }, + { + "bbox": [ + 54, + 323, + 297, + 515 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 54, + 323, + 297, + 515 + ], + "type": "text", + "content": " is set to 0.0005. The learning rate is set to 1e-4 and the batch size is set to 192. We train our framework for 26k and 29.5k steps for training stage1 and stage2, respectively. Finally, PosterMaker was trained on 32 A100 GPUs for 3 days. During the sampling process, based on the statistical information, a maximum of 7 lines of text and 16 characters per line of text are selected from each image to render onto the image, as this setting can cover most situations in the dataset." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 521, + 222, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 521, + 222, + 533 + ], + "spans": [ + { + "bbox": [ + 55, + 521, + 222, + 533 + ], + "type": "text", + "content": "4.2. Comparison with Prior Works" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 536, + 296, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 536, + 296, + 693 + ], + "spans": [ + { + "bbox": [ + 55, + 536, + 296, + 693 + ], + "type": "text", + "content": "Baseline methods. We carefully designed the following baseline approaches based on existing open-sourced techniques for comparative analysis. SD3_inpaint_byt5: We encode the text content into prompt embeddings using ByT5 [48] and employ an adapter to map these embeddings to the original prompt embedding space of SD3 before feeding them into the controlnet, which enables the controlnet to render multilingual text. SD3_canny&inpaint: First render the text into a white-background image and extract the canny edge from it as control. Then finetune a pre-trained SD3 canny controlnet together with an inpainting controlnet to achieve multilingual text rendering. Anytext: It is the SOTA open-sourced T2I method that supports multilin" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 317, + 320, + 550, + 423 + ], + "blocks": [ + { + "bbox": [ + 317, + 320, + 550, + 423 + ], + "lines": [ + { + "bbox": [ + 317, + 320, + 550, + 423 + ], + "spans": [ + { + "bbox": [ + 317, + 320, + 550, + 423 + ], + "type": "table", + "html": "
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑FG Ext. Ratio ↓
SD3 inpaint_AnyText52.78%75.27%100.8726.9014.82%
SD3 inpaint_byt552.28%86.57%65.4526.7114.60%
AnyText63.90%82.81%71.2726.6919.25%
Glyph-ByT5-v269.54%87.65%79.2326.6018.91%
SD3_canny&inpaint80.75%92.75%67.1927.0314.38%
GlyphDraw286.14%96.78%72.4926.7216.52%
GT (w/ SD1.5 Rec.)76.95%89.91%---
GT (w/ SD3 Rec.)98.09%99.36%---
GT98.53%99.59%---
Ours (SD1.5)72.12%88.01%68.1726.93-
Ours93.36%98.39%65.3527.0411.57%
", + "image_path": "447d088256e1c87d6ae91a0c725ac663c352ccd8596de25616f41d878902395b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 352, + 424, + 515, + 434 + ], + "lines": [ + { + "bbox": [ + 352, + 424, + 515, + 434 + ], + "spans": [ + { + "bbox": [ + 352, + 424, + 515, + 434 + ], + "type": "text", + "content": "Table 1. Comparison with baseline methods." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 437, + 555, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 437, + 555, + 568 + ], + "spans": [ + { + "bbox": [ + 313, + 437, + 555, + 568 + ], + "type": "text", + "content": "gual text rendering and its text editing mode supports text inpainting [42]. So we directly finetune it on our data using its text editing training pipeline. SD3_inpaint_Anytext: First generate the background with SD3 inpainting control-net, then render the text on the corresponding region using Anytext. Glyph-ByT5-v2 and GlyphDraw2: They are both the SOTA T2I methods that support multilingual text rendering [26, 28]. However, they don't have open-sourced pre-trained weights, so we reproduced them on our dataset. And we added an inpainting controlnet for them to support subject-preserved generation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 570, + 556, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 556, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 556, + 714 + ], + "type": "text", + "content": "Quantitative Comparison. We trained all baseline models on the same dataset, and then quantitatively compared all methods on the PosterBenchmark, as shown in Tab. 1. It is worth noting that SD3 is used as the base model by default, but since we observed that the SD1.5 VAE leads to significant error in reconstruction, to enable a more equitable comparison between our method and AnyText (SD1.5 architecture), we also implemented an SD1.5 version of PosterMaker with the same experimental setup as AnyText. As the VAEs, especially SD1.5, introduce some reconstruction error and the OCR model may incorrectly recognize some characters, we also report the metrics on ground truth" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 66, + 702, + 194, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 702, + 194, + 713 + ], + "spans": [ + { + "bbox": [ + 66, + 702, + 194, + 713 + ], + "type": "text", + "content": "Details can be found in the Appendix." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 80, + 70, + 270, + 159 + ], + "blocks": [ + { + "bbox": [ + 80, + 70, + 270, + 159 + ], + "lines": [ + { + "bbox": [ + 80, + 70, + 270, + 159 + ], + "spans": [ + { + "bbox": [ + 80, + 70, + 270, + 159 + ], + "type": "table", + "html": "
Text FeatureTypeSen. ACCNED
ByT5textual feat.33.48%54.50%
Cannyimg81.50%92.72%
TrOCR Linevisual feat.26.58%49.46%
TrOCR Charvisual feat.94.27%98.54%
PPOCR Linevisual feat.38.91%53.86%
PPOCR Char (Ours)visual feat.95.15%98.75%
GT (w/o Rec.)-98.53%99.59%
GT (w/ SD3 Rec.)-98.09%99.36%
", + "image_path": "c202934d10fe0a130d64ae0ba56d2d6b00f5173e31d456711996a78dfc2d1cb4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 59, + 173, + 291, + 201 + ], + "blocks": [ + { + "bbox": [ + 64, + 159, + 286, + 170 + ], + "lines": [ + { + "bbox": [ + 64, + 159, + 286, + 170 + ], + "spans": [ + { + "bbox": [ + 64, + 159, + 286, + 170 + ], + "type": "text", + "content": "Table 2. Quantitative comparison using various text features." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 173, + 291, + 201 + ], + "lines": [ + { + "bbox": [ + 59, + 173, + 291, + 201 + ], + "spans": [ + { + "bbox": [ + 59, + 173, + 291, + 201 + ], + "type": "table", + "html": "
MethodFG Ext. Ratio↓Sen. ACC ↑NED↑FID↓CLIP-T↑
Ours11.57%93.36%98.39%65.3527.04
Ours w/o Lreward15.05%93.11%98.21%65.1027.04
", + "image_path": "e20e8244690f5e01dcd53e455d92f3a60769ab100ea63c696af31a4b42c9de1c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 64, + 201, + 285, + 213 + ], + "lines": [ + { + "bbox": [ + 64, + 201, + 285, + 213 + ], + "spans": [ + { + "bbox": [ + 64, + 201, + 285, + 213 + ], + "type": "text", + "content": "Table 3. Evaluation on the subject fidelity feedback learning." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 215, + 295, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 215, + 295, + 407 + ], + "spans": [ + { + "bbox": [ + 55, + 215, + 295, + 407 + ], + "type": "text", + "content": "images as an upper bound. As shown in Tab. 1, our method achieves the best performance on all metrics. Notably, on text rendering metrics Sen. ACC and NED, our model outperforms the baselines by an impressive margin and is already close to the upper bound. The promising results demonstrate the effectiveness of the proposed PosterMaker. Qualitative Comparison. The results are shown in Fig. 8. Compared to the baselines, our PosterMaker generates more readable and accurate poster images with texts, particularly for smaller texts. Notably, as an end-to-end generation method, PosterMaker automatically creates underlays to enhance the contrast between text and background, effectively highlighting the text. This feature is crucial in product poster design for capturing viewers' attention. These findings demonstrate that our PosterMaker successfully learns the distribution of posters created by human designers." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 411, + 212, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 411, + 212, + 423 + ], + "spans": [ + { + "bbox": [ + 55, + 411, + 212, + 423 + ], + "type": "text", + "content": "4.3. Ablation Study and Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 426, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 426, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 426, + 296, + 714 + ], + "type": "text", + "content": "How to achieve high text rendering accuracy? We conduct experiments to explore the effectiveness of different control conditions for visual text rendering. Due to the fact that text rendering accuracy is primarily determined by the first training stage, we discard the second training stage in this experiment to save computational resources. The results are summarized in Tab. 2. We observed several valuable experimental results: 1) The use of char-level features significantly outperforms previous line-level features, benefiting from finer-grained representation. This explains why previous methods [4, 28, 42], achieve inferior performance (PPOCR Line is used in [28, 42], TrOCR Line is used in [4]). Recent concurrent works [29, 46] have also found similar experimental findings as ours. 2) Char-level feature representation is superior to low-level image features such as Canny. 3) PPOCR outperforms TrOCR, which is attributed to PPOCR being a multi-language OCR model, while TrOCR is an English version model. 4) Even though TrOCR has not been trained on multi-language text data, it still achieves decent results, likely because it extracts universal visual structural features. 5) ByT5 extracts char-level features but the performance is inferior to OCR features, because it extracts semantic features rather than character structural features, while T2I models' text rendering" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 320, + 70, + 386, + 120 + ], + "blocks": [ + { + "bbox": [ + 320, + 70, + 386, + 120 + ], + "lines": [ + { + "bbox": [ + 320, + 70, + 386, + 120 + ], + "spans": [ + { + "bbox": [ + 320, + 70, + 386, + 120 + ], + "type": "image", + "image_path": "c241b8495f20c789e14637a4732870b94dd037c3ecd9e058762ced2a4aab2709.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 388, + 70, + 552, + 187 + ], + "blocks": [ + { + "bbox": [ + 388, + 70, + 552, + 187 + ], + "lines": [ + { + "bbox": [ + 388, + 70, + 552, + 187 + ], + "spans": [ + { + "bbox": [ + 388, + 70, + 552, + 187 + ], + "type": "image", + "image_path": "213ffce2dd7c950b4a730fb1079c8eecdbe8ab5edf2fd70d17dc6203496226e0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 326, + 188, + 541, + 200 + ], + "lines": [ + { + "bbox": [ + 326, + 188, + 541, + 200 + ], + "spans": [ + { + "bbox": [ + 326, + 188, + 541, + 200 + ], + "type": "text", + "content": "Figure 10. Visual examples showing the effect of " + }, + { + "bbox": [ + 326, + 188, + 541, + 200 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reward}" + }, + { + "bbox": [ + 326, + 188, + 541, + 200 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 209, + 555, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 209, + 555, + 376 + ], + "spans": [ + { + "bbox": [ + 313, + 209, + 555, + 376 + ], + "type": "text", + "content": "capability relies more on character structural features. We present visualization results in Fig. 9. We observe that when using line-level features as a control, the generated text occasionally becomes completely unrecognizable. This suggests that line-level features are insufficient for achieving precise text rendering. Additionally, it is evident that using canny control always introduces stroke artifacts, particularly in smaller texts (as seen in row 3 of Fig. 9). This further demonstrates that canny control is also not an ideal condition for text rendering. In summary, the char-level feature extracted by PPOCR performs optimally and the accuracy is already close to the upper bound, indicating the discriminative char-level visual feature is the key to achieve high text rendering accuracy." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 376, + 556, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 376, + 556, + 521 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 556, + 521 + ], + "type": "text", + "content": "Effectiveness of subject fidelity feedback learning. We calculate the foreground extension ratio (termed as FG Ext. Ratio) by asking human annotators to manually check each generated image whether the foreground subject is incorrectly extended. As demonstrated in Tab. 3, training our model with " + }, + { + "bbox": [ + 313, + 376, + 556, + 521 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reward}" + }, + { + "bbox": [ + 313, + 376, + 556, + 521 + ], + "type": "text", + "content": " effectively reduces FG Ext. Ratio by " + }, + { + "bbox": [ + 313, + 376, + 556, + 521 + ], + "type": "inline_equation", + "content": "3.4\\%" + }, + { + "bbox": [ + 313, + 376, + 556, + 521 + ], + "type": "text", + "content": ", while maintaining subtle variations in other performance metrics. Representative visual examples are presented in Fig. 10. Besides, our model outperforms baseline methods in FG Ext. Ratio (see Tab. 1). These results show the efficacy of our proposed subject fidelity feedback learning approach in mitigating foreground extension artifacts." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 528, + 388, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 528, + 388, + 540 + ], + "spans": [ + { + "bbox": [ + 313, + 528, + 388, + 540 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "text", + "content": "The application of image generation in poster creation is often impeded by subpar text rendering and inconsistent subjects. To address these challenges, this paper introduces a novel framework, PosterMaker, which synthesizes aesthetically pleasing product posters with accurate and harmonious texts and contents. Moreover, we reveal that the key underlying successful multilingual text rendering is the construction of robust character-level visual text representations. Additionally, we propose subject fidelity feedback learning to mitigate inconsistencies in subjects. Through extensive experiments, our method demonstrates a significant improvement in both high-precision text generation and subject fidelity. These findings not only advance poster generation but also inspire future research on T2I models." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 297, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 297, + 140 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 297, + 140 + ], + "type": "text", + "content": "This work was supported by the National Nature Science Foundation of China (62425114, 62121002, U23B2028, 62232006, 62272436) and Alibaba Group (Alibaba Research Intern Program)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 149, + 115, + 162 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 149, + 115, + 162 + ], + "spans": [ + { + "bbox": [ + 56, + 149, + 115, + 162 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 169, + 296, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 169, + 296, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 169, + 296, + 224 + ], + "spans": [ + { + "bbox": [ + 61, + 169, + 296, + 224 + ], + "type": "text", + "content": "[1] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Qinsheng Zhang, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, et al. ediff-i: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 225, + 296, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 225, + 296, + 312 + ], + "spans": [ + { + "bbox": [ + 61, + 225, + 296, + 312 + ], + "type": "text", + "content": "[2] Tingfeng Cao, Junsheng Kong, Xue Zhao, Wenqing Yao, Junwei Ding, Jinhui Zhu, and Jiandong Zhang. Product2img: Prompt-free e-commerce product background generation with diffusion model and self-improved LMM. In Proceedings of the 32nd ACM International Conference on Multimedia, MM 2024, Melbourne, VIC, Australia, 28 October 2024 - 1 November 2024, pages 10774-10783. ACM, 2024. 2, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 313, + 296, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 313, + 296, + 378 + ], + "spans": [ + { + "bbox": [ + 61, + 313, + 296, + 378 + ], + "type": "text", + "content": "[3] Kelvin C. K. Chan, Yang Zhao, Xuhui Jia, Ming-Hsuan Yang, and Huisheng Wang. Improving subject-driven image synthesis with subject-agnostic guidance. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2024, Seattle, WA, USA, June 16-22, 2024, pages 6733-6742. IEEE, 2024. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 380, + 296, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 380, + 296, + 456 + ], + "spans": [ + { + "bbox": [ + 62, + 380, + 296, + 456 + ], + "type": "text", + "content": "[4] Haoxing Chen, Zhuoer Xu, Zhangxuan Gu, Jun Lan, Xing Zheng, Yaohui Li, Changhua Meng, Huijia Zhu, and Weiqiang Wang. Diffuse: Universal text editing diffusion model. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 2, 3, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 457, + 296, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 457, + 296, + 512 + ], + "spans": [ + { + "bbox": [ + 62, + 457, + 296, + 512 + ], + "type": "text", + "content": "[5] Ruidong Chen, Lanjun Wang, Weizhi Nie, Yongdong Zhang, and An-An Liu. Anyscene: Customized image synthesis with composited foreground. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8724-8733, 2024. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 514, + 296, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 514, + 296, + 589 + ], + "spans": [ + { + "bbox": [ + 62, + 514, + 296, + 589 + ], + "type": "text", + "content": "[6] Wenhu Chen, Hexiang Hu, Yandong Li, Nataniel Ruiz, Xuhui Jia, Ming-Wei Chang, and William W. Cohen. Subject-driven text-to-image generation via apprenticeship learning. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 590, + 294, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 590, + 294, + 623 + ], + "spans": [ + { + "bbox": [ + 62, + 590, + 294, + 623 + ], + "type": "text", + "content": "[7] Alimama Creative. Sd3-controlnet-inpainting. https://huggingface.co/alamama-creative/SD3-Controlnet-Inpainting, 2024.6,7,2,4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 624, + 294, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 624, + 294, + 657 + ], + "spans": [ + { + "bbox": [ + 62, + 624, + 294, + 657 + ], + "type": "text", + "content": "[8] Alimama Creative. Sd3-controlnet-softedge. https://huggingface.co/alamama-creative/SD3-Controlnet-Softedge, 2024.7, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 658, + 296, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 658, + 296, + 690 + ], + "spans": [ + { + "bbox": [ + 62, + 658, + 296, + 690 + ], + "type": "text", + "content": "[9] Alimama Creative. Ecomxl-controlnet-inpaint. https://huggingface.co/alimama-creative/EcomXL_controlnet_inpaint, 2024.2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 691, + 296, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 691, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 57, + 691, + 296, + 714 + ], + "type": "text", + "content": "[10] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Xilin Wei, Songyang Zhang, Haodong" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 556, + 713 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 333, + 73, + 555, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 555, + 139 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 555, + 139 + ], + "type": "text", + "content": "Duan, Maosong Cao, Wenwei Zhang, Yining Li, Hang Yan, Yang Gao, Xinyue Zhang, Wei Li, Jingwen Li, Kai Chen, Conghui He, Xingcheng Zhang, Yu Qiao, Dahua Lin, and Jiaqi Wang. Internlm-xcomposer2: Mastering free-form text-image composition and comprehension in vision-language large model. arXiv preprint arXiv:2401.16420, 2024. 6, 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 140, + 555, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 140, + 555, + 194 + ], + "spans": [ + { + "bbox": [ + 316, + 140, + 555, + 194 + ], + "type": "text", + "content": "[11] Zhenbang Du, Wei Feng, Haohan Wang, Yaoyu Li, Jingsen Wang, Jian Li, Zheng Zhang, Jingjing Lv, Xin Zhu, Junsheng Jin, et al. Towards reliable advertising image generation using human feedback. In European Conference on Computer Vision, pages 399-415. Springer, 2024. 2, 4, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 195, + 556, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 195, + 556, + 270 + ], + "spans": [ + { + "bbox": [ + 316, + 195, + 556, + 270 + ], + "type": "text", + "content": "[12] Amir Erfan Eshratifar, Joao V.B. Soares, Kapil Thadani, Shaunak Mishra, Mikhail Kuznetsov, Yueh-Ning Ku, and Paloma De Juan. Salient object-aware background generation using text-guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 7489-7499, 2024. 2, 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 272, + 554, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 272, + 554, + 337 + ], + "spans": [ + { + "bbox": [ + 316, + 272, + 554, + 337 + ], + "type": "text", + "content": "[13] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. 2, 3, 4, 7, 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 338, + 555, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 338, + 555, + 403 + ], + "spans": [ + { + "bbox": [ + 316, + 338, + 555, + 403 + ], + "type": "text", + "content": "[14] Yifan Gao, Jinpeng Lin, Min Zhou, Chuanbin Liu, Hongtao Xie, Tiezheng Ge, and Yuning Jiang. Textpainter: Multimodal text image generation with visual-harmony and text-comprehension for poster design. In Proceedings of the 31st ACM International Conference on Multimedia, pages 7236-7246, 2023. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 404, + 554, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 404, + 554, + 480 + ], + "spans": [ + { + "bbox": [ + 317, + 404, + 554, + 480 + ], + "type": "text", + "content": "[15] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 6626-6637, 2017. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 482, + 554, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 482, + 554, + 536 + ], + "spans": [ + { + "bbox": [ + 317, + 482, + 554, + 536 + ], + "type": "text", + "content": "[16] Peidong Jia, Chenxuan Li, Yuhui Yuan, Zeyu Liu, Yichao Shen, Bohan Chen, Xingru Chen, Yinglin Zheng, Dong Chen, Ji Li, Xiaodong Xie, Shanghang Zhang, and Baining Guo. Cole: A hierarchical generation framework for multilayered and editable graphic design, 2024. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 537, + 554, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 537, + 554, + 590 + ], + "spans": [ + { + "bbox": [ + 317, + 537, + 554, + 590 + ], + "type": "text", + "content": "[17] Lei Ke, Mingqiao Ye, Martin Danelljan, Yifan liu, Yu-Wing Tai, Chi-Keung Tang, and Fisher Yu. Segment anything in high quality. In Advances in Neural Information Processing Systems, pages 29914–29934. Curran Associates, Inc., 2023. 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 592, + 554, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 592, + 554, + 657 + ], + "spans": [ + { + "bbox": [ + 316, + 592, + 554, + 657 + ], + "type": "text", + "content": "[18] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dollar, and Ross Girshick. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4015-4026, 2023. 5, 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 658, + 554, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 658, + 554, + 690 + ], + "spans": [ + { + "bbox": [ + 316, + 658, + 554, + 690 + ], + "type": "text", + "content": "[19] Chao Li, Chen Jiang, Xiaolong Liu, Jun Zhao, and Guoxin Wang. Joytype: A robust design for multilingual visual text creation. arXiv preprint arXiv:2409.17524, 2024. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 692, + 554, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 692, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 692, + 554, + 713 + ], + "type": "text", + "content": "[20] Dongxu Li, Junnan Li, and Steven C. H. Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 76, + 72, + 296, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 296, + 117 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 296, + 117 + ], + "type": "text", + "content": "image generation and editing. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 296, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 296, + 173 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 296, + 173 + ], + "type": "text", + "content": "[21] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee. Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22511-22521, 2023. 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 174, + 295, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 174, + 295, + 228 + ], + "spans": [ + { + "bbox": [ + 56, + 174, + 295, + 228 + ], + "type": "text", + "content": "[22] Zhaochen Li, Fengheng Li, Wei Feng, Honghe Zhu, An Liu, Yaoyu Li, Zheng Zhang, Jingjing Lv, Xin Zhu, Junjie Shen, et al. Planning and rendering: Towards end-to-end product poster generation. arXiv preprint arXiv:2312.08822, 2023. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 230, + 295, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 230, + 295, + 295 + ], + "spans": [ + { + "bbox": [ + 56, + 230, + 295, + 295 + ], + "type": "text", + "content": "[23] Jinpeng Lin, Min Zhou, Ye Ma, Yifan Gao, Chenxi Fei, Yangjian Chen, Zhang Yu, and Tiezheng Ge. Autoposter: A highly automatic and content-aware design system for advertising poster generation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 1250–1260, 2023. 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 297, + 295, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 297, + 295, + 330 + ], + "spans": [ + { + "bbox": [ + 56, + 297, + 295, + 330 + ], + "type": "text", + "content": "[24] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 331, + 295, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 331, + 295, + 385 + ], + "spans": [ + { + "bbox": [ + 56, + 331, + 295, + 385 + ], + "type": "text", + "content": "[25] Zeyu Liu, Weicong Liang, Zhanhao Liang, Chong Luo, Ji Li, Gao Huang, and Yuhui Yuan. Glyph-byt5: A customized text encoder for accurate visual text rendering. In European Conference on Computer Vision, pages 361-377. Springer, 2024. 2, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 387, + 295, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 387, + 295, + 431 + ], + "spans": [ + { + "bbox": [ + 56, + 387, + 295, + 431 + ], + "type": "text", + "content": "[26] Zeyu Liu, Weicong Liang, Yiming Zhao, Bohan Chen, Ji Li, and Yuhui Yuan. Glyph-byt5-v2: A strong aesthetic baseline for accurate multilingual visual text rendering. arXiv preprint arXiv:2406.10208, 2024. 2, 3, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 432, + 295, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 432, + 295, + 476 + ], + "spans": [ + { + "bbox": [ + 56, + 432, + 295, + 476 + ], + "type": "text", + "content": "[27] Zhiying Lu, Chuanbin Liu, Xiaojun Chang, Yongdong Zhang, and Hongtao Xie. Dhvt: Dynamic hybrid vision transformer for small dataset recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2025. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 477, + 295, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 477, + 295, + 521 + ], + "spans": [ + { + "bbox": [ + 56, + 477, + 295, + 521 + ], + "type": "text", + "content": "[28] Jian Ma, Yonglin Deng, Chen Chen, Haonan Lu, and Zhenyu Yang. Glyphdraw2: Automatic generation of complex glyph posters with diffusion models and large language models. arXiv preprint arXiv:2407.02252, 2024. 2, 3, 5, 7, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 522, + 295, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 522, + 295, + 566 + ], + "spans": [ + { + "bbox": [ + 56, + 522, + 295, + 566 + ], + "type": "text", + "content": "[29] Lichen Ma, Tiezhu Yue, Pei Fu, Yujie Zhong, Kai Zhou, Xiaoming Wei, and Jie Hu. Chargen: High accurate character-level visual text generation model with multimodal encoder. arXiv preprint arXiv:2412.17225, 2024. 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 567, + 295, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 567, + 295, + 621 + ], + "spans": [ + { + "bbox": [ + 56, + 567, + 295, + 621 + ], + "type": "text", + "content": "[30] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. SDEdit: Guided image synthesis and editing with stochastic differential equations. In International Conference on Learning Representations, 2022. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 623, + 295, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 623, + 295, + 656 + ], + "spans": [ + { + "bbox": [ + 56, + 623, + 295, + 656 + ], + "type": "text", + "content": "[31] ModelScope. https://modelscope.cn/models/damo/cv_convnextTinyOCR-recognition-general_damo/summary,2023.6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 657, + 295, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 657, + 295, + 679 + ], + "spans": [ + { + "bbox": [ + 56, + 657, + 295, + 679 + ], + "type": "text", + "content": "[32] OpenAI. https://openai.com/index/hello-gpt-4o/, 2024.6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 680, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 680, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 680, + 295, + 713 + ], + "type": "text", + "content": "[33] Maxime Oquab, Timothee Darcet, Théo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 333, + 72, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 72, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 72, + 553, + 95 + ], + "type": "text", + "content": "Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 96, + 553, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 96, + 553, + 118 + ], + "spans": [ + { + "bbox": [ + 316, + 96, + 553, + 118 + ], + "type": "text", + "content": "[34] PaddlePaddle. https://github.com/PaddlePaddle/PaddleOCR, 2023.6,2,3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 119, + 553, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 119, + 553, + 195 + ], + "spans": [ + { + "bbox": [ + 316, + 119, + 553, + 195 + ], + "type": "text", + "content": "[35] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. SDXL: improving latent diffusion models for high-resolution image synthesis. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 198, + 553, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 198, + 553, + 262 + ], + "spans": [ + { + "bbox": [ + 316, + 198, + 553, + 262 + ], + "type": "text", + "content": "[36] Tianhao Qi, Shancheng Fang, Yanze Wu, Hongtao Xie, Jiawei Liu, Lang Chen, Qian He, and Yongdong Zhang. Deadiff: An efficient stylization diffusion model with disentangled representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8693-8702, 2024. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 264, + 553, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 264, + 553, + 308 + ], + "spans": [ + { + "bbox": [ + 316, + 264, + 553, + 308 + ], + "type": "text", + "content": "[37] Xuebin Qin, Zichen Zhang, Chenyang Huang, Masood Dehghan, Osmar Zaiane, and Martin Jagersand. U2-net: Going deeper with nested u-structure for salient object detection. page 107404, 2020. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 310, + 553, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 310, + 553, + 365 + ], + "spans": [ + { + "bbox": [ + 316, + 310, + 553, + 365 + ], + "type": "text", + "content": "[38] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and PeterJ. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv: Learning, arXiv: Learning, 2019. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 366, + 553, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 366, + 553, + 431 + ], + "spans": [ + { + "bbox": [ + 316, + 366, + 553, + 431 + ], + "type": "text", + "content": "[39] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 10674-10685. IEEE, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 434, + 553, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 434, + 553, + 498 + ], + "spans": [ + { + "bbox": [ + 316, + 434, + 553, + 498 + ], + "type": "text", + "content": "[40] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22500-22510, 2023. 2, 3, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 500, + 553, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 500, + 553, + 567 + ], + "spans": [ + { + "bbox": [ + 316, + 500, + 553, + 567 + ], + "type": "text", + "content": "[41] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 567, + 553, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 567, + 553, + 623 + ], + "spans": [ + { + "bbox": [ + 316, + 567, + 553, + 623 + ], + "type": "text", + "content": "[42] Yuxiang Tuo, Wangmeng Xiang, Jun-Yan He, Yifeng Geng, and Xuansong Xie. Anytext: Multilingual visual text generation and editing. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. 2, 3, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 624, + 553, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 624, + 553, + 689 + ], + "spans": [ + { + "bbox": [ + 316, + 624, + 553, + 689 + ], + "type": "text", + "content": "[43] Haohan Wang, Wei Feng, Yaoyu Li, Zheng Zhang, Jingjing Lv, Junjie Shen, Zhangang Lin, and Jingping Shao. Generate e-commerce product background by integrating category commonality and personalized style. In ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5. IEEE, 2025. 4" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "type": "text", + "content": "[44] Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, and Anthony Chen. Instantid: Zero-shot identity-preserving gener" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 676 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 76, + 72, + 294, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 294, + 94 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 294, + 94 + ], + "type": "text", + "content": "ation in seconds. arXiv preprint arXiv:2401.07519, 2024. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 295, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 295, + 172 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 295, + 172 + ], + "type": "text", + "content": "[45] Shaodong Wang, Yunyang Ge, Liuhan Chen, Haiyang Zhou, Qian Wang, Xinhua Cheng, and Li Yuan. Prompt2poster: Automatically artistic chinese poster creation from prompt only. In Proceedings of the 32nd ACM International Conference on Multimedia, MM 2024, Melbourne, VIC, Australia, 28 October 2024 - 1 November 2024, pages 10716-10724. ACM, 2024. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 174, + 294, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 174, + 294, + 206 + ], + "spans": [ + { + "bbox": [ + 56, + 174, + 294, + 206 + ], + "type": "text", + "content": "[46] Tong Wang, Xiaochao Qu, and Ting Liu. Textmastero: Mastering high-quality scene text editing in diverse languages and styles. arXiv preprint arXiv:2408.10623, 2024. 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 208, + 294, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 208, + 294, + 272 + ], + "spans": [ + { + "bbox": [ + 56, + 208, + 294, + 272 + ], + "type": "text", + "content": "[47] Jiazheng Xu, Xiao Liu, Yuchen Wu, Yuxuan Tong, Qinkai Li, Ming Ding, Jie Tang, and Yuxiao Dong. Imagereward: Learning and evaluating human preferences for text-to-image generation. In Advances in Neural Information Processing Systems, pages 15903-15935. Curran Associates, Inc., 2023. 5, 7, 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 274, + 294, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 274, + 294, + 328 + ], + "spans": [ + { + "bbox": [ + 56, + 274, + 294, + 328 + ], + "type": "text", + "content": "[48] Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, and Colin Raffel. ByT5: Towards a token-free future with pre-trained byte-to-byte models. Transactions of the Association for Computational Linguistics, 10:291-306, 2022. 7, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 331, + 294, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 331, + 294, + 406 + ], + "spans": [ + { + "bbox": [ + 56, + 331, + 294, + 406 + ], + "type": "text", + "content": "[49] Yukang Yang, Dongnan Gui, Yuhui Yuan, Weicong Liang, Haisong Ding, Han Hu, and Kai Chen. Glyphcontrol: Glyph conditional control for visual text generation. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. 2, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 408, + 294, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 408, + 294, + 450 + ], + "spans": [ + { + "bbox": [ + 56, + 408, + 294, + 450 + ], + "type": "text", + "content": "[50] Jingfeng Yao, Xinggang Wang, Shusheng Yang, and Baoyuan Wang. Vitmatte: Boosting image matting with pretrained plain vision transformers. Information Fusion, 103: 102091, 2024. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 453, + 294, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 453, + 294, + 495 + ], + "spans": [ + { + "bbox": [ + 56, + 453, + 294, + 495 + ], + "type": "text", + "content": "[51] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 498, + 294, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 498, + 294, + 540 + ], + "spans": [ + { + "bbox": [ + 56, + 498, + 294, + 540 + ], + "type": "text", + "content": "[52] Boqiang Zhang, Zuan Gao, Yadong Qu, and Hongtao Xie. How control information influences multilingual text image generation and editing? arXiv preprint arXiv:2407.11502, 2024. 3, 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 543, + 294, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 543, + 294, + 586 + ], + "spans": [ + { + "bbox": [ + 56, + 543, + 294, + 586 + ], + "type": "text", + "content": "[53] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 2, 3, 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 588, + 294, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 588, + 294, + 630 + ], + "spans": [ + { + "bbox": [ + 56, + 588, + 294, + 630 + ], + "type": "text", + "content": "[54] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning deep features for discriminative localization. In Computer Vision and Pattern Recognition, 2016. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 632, + 294, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 632, + 294, + 676 + ], + "spans": [ + { + "bbox": [ + 56, + 632, + 294, + 676 + ], + "type": "text", + "content": "[55] Yuanzhi Zhu, Jiawei Liu, Feiyu Gao, Wenyu Liu, Xinggang Wang, Peng Wang, Fei Huang, Cong Yao, and Zhibo Yang. Visual text generation in the wild. In European Conference on Computer Vision, pages 89-106. Springer, 2024. 3" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 68, + 504, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 68, + 504, + 103 + ], + "spans": [ + { + "bbox": [ + 107, + 68, + 504, + 103 + ], + "type": "text", + "content": "PosterMaker: Towards High-Quality Product Poster Generation with Accurate Text Rendering" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "spans": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "content": "Supplementary Material" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 142, + 296, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 142, + 296, + 191 + ], + "spans": [ + { + "bbox": [ + 55, + 142, + 296, + 191 + ], + "type": "text", + "content": "Due to space limitations, we were unable to present all experimental results in the main text. In this supplementary material, we will give more details about our experiments and present additional results." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 203, + 191, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 203, + 191, + 217 + ], + "spans": [ + { + "bbox": [ + 55, + 203, + 191, + 217 + ], + "type": "text", + "content": "6. Implementation Details" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 223, + 296, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 223, + 296, + 282 + ], + "spans": [ + { + "bbox": [ + 55, + 223, + 296, + 282 + ], + "type": "text", + "content": "Training and Inference. We fully follow the settings of SD3 [13]. During training, the denoise loss " + }, + { + "bbox": [ + 55, + 223, + 296, + 282 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{denoise}}" + }, + { + "bbox": [ + 55, + 223, + 296, + 282 + ], + "type": "text", + "content": " uses simplified flow matching, also known as 0-rectified flow matching loss [24]. In inference, we also use the inference method of flow matching, with 28 inference steps." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 283, + 296, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 283, + 296, + 379 + ], + "spans": [ + { + "bbox": [ + 55, + 283, + 296, + 379 + ], + "type": "text", + "content": "TextRenderNet and SceneGenNet. TextRenderNet and SceneGenNet have an architecture similar to SD3 [13], composed of multiple MM-DiT Blocks. In our implementation, TextRenderNet consists of 12 layers of MM-DiT Blocks, while SceneGenNet consists of 23 layers of MM-DiT Blocks. The output of the " + }, + { + "bbox": [ + 55, + 283, + 296, + 379 + ], + "type": "inline_equation", + "content": "N_{i}" + }, + { + "bbox": [ + 55, + 283, + 296, + 379 + ], + "type": "text", + "content": "-th block of SceneGenNet is first added with the output of the " + }, + { + "bbox": [ + 55, + 283, + 296, + 379 + ], + "type": "inline_equation", + "content": "\\left\\lceil \\frac{N_i}{2} \\right\\rceil" + }, + { + "bbox": [ + 55, + 283, + 296, + 379 + ], + "type": "text", + "content": "-th block of TextRenderNet, and then add to the " + }, + { + "bbox": [ + 55, + 283, + 296, + 379 + ], + "type": "inline_equation", + "content": "N_{i}" + }, + { + "bbox": [ + 55, + 283, + 296, + 379 + ], + "type": "text", + "content": "-th SD3 block." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 380, + 296, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 380, + 296, + 451 + ], + "spans": [ + { + "bbox": [ + 55, + 380, + 296, + 451 + ], + "type": "text", + "content": "Classifier-Free Guidance. We use CFG during inference, with a CFG scale of 5. Additionally, since the \"prompt\" inputted to TextRenderNet is not a caption but a text representation, the negative one for CFG is set to a zero vector. During training, we randomly drop the text representation to a zero vector with " + }, + { + "bbox": [ + 55, + 380, + 296, + 451 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 55, + 380, + 296, + 451 + ], + "type": "text", + "content": " probability." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 452, + 296, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 452, + 296, + 511 + ], + "spans": [ + { + "bbox": [ + 55, + 452, + 296, + 511 + ], + "type": "text", + "content": "The Setting of " + }, + { + "bbox": [ + 55, + 452, + 296, + 511 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 55, + 452, + 296, + 511 + ], + "type": "text", + "content": " in Reward Loss. We follow [47] to train the reward loss at the last 10 inference steps, i.e., we set " + }, + { + "bbox": [ + 55, + 452, + 296, + 511 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 55, + 452, + 296, + 511 + ], + "type": "text", + "content": " to 10. Within the range of " + }, + { + "bbox": [ + 55, + 452, + 296, + 511 + ], + "type": "inline_equation", + "content": "t' \\sim [1, t_1]" + }, + { + "bbox": [ + 55, + 452, + 296, + 511 + ], + "type": "text", + "content": ", the result of the image " + }, + { + "bbox": [ + 55, + 452, + 296, + 511 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 55, + 452, + 296, + 511 + ], + "type": "text", + "content": " obtained by one-step inference is close to the result of complete inference." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 512, + 296, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 512, + 296, + 607 + ], + "spans": [ + { + "bbox": [ + 55, + 512, + 296, + 607 + ], + "type": "text", + "content": "Details about Metric Calculation. Our evaluation benchmark contains samples generated by LLM [10] thus there is no ground truth for these samples. Therefore, we exclude these LLM-generated samples when calculating metrics that depend on ground truth images, i.e., FID metric for all experiments, text accuracy metrics for GT (with and without VAE reconstruction) and results for ablation on different text features." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 609, + 296, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 609, + 296, + 657 + ], + "spans": [ + { + "bbox": [ + 55, + 609, + 296, + 657 + ], + "type": "text", + "content": "About ground truth for training Foreground Extension Detector. We treat the task of detecting foreground extension as a binary classification problem and ask annotators to manually label the ground truth." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 669, + 152, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 669, + 152, + 681 + ], + "spans": [ + { + "bbox": [ + 55, + 669, + 152, + 681 + ], + "type": "text", + "content": "7. Baseline Details" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "content": "We carefully designed 6 baseline approaches based on existing techniques for comparative analysis. The de" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 318, + 140, + 566, + 207 + ], + "blocks": [ + { + "bbox": [ + 318, + 140, + 566, + 207 + ], + "lines": [ + { + "bbox": [ + 318, + 140, + 566, + 207 + ], + "spans": [ + { + "bbox": [ + 318, + 140, + 566, + 207 + ], + "type": "image", + "image_path": "d6744c5d9caa6bc83f69bb57ecb1d9daafd4f9a7d94f1863a5c86d9575225be2.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 318, + 208, + 499, + 290 + ], + "blocks": [ + { + "bbox": [ + 318, + 208, + 499, + 290 + ], + "lines": [ + { + "bbox": [ + 318, + 208, + 499, + 290 + ], + "spans": [ + { + "bbox": [ + 318, + 208, + 499, + 290 + ], + "type": "image", + "image_path": "d83b09b24b46dfeee4c8c54a89e5dbf00003f3dc7e316e42c12abbfdcf799593.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 318, + 293, + 541, + 381 + ], + "blocks": [ + { + "bbox": [ + 318, + 293, + 541, + 381 + ], + "lines": [ + { + "bbox": [ + 318, + 293, + 541, + 381 + ], + "spans": [ + { + "bbox": [ + 318, + 293, + 541, + 381 + ], + "type": "image", + "image_path": "d7751a41607c78a3558de3d5bbf74fffe5b47dff69d5f6c0fb1aec1026926f20.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 318, + 384, + 498, + 437 + ], + "blocks": [ + { + "bbox": [ + 318, + 384, + 498, + 437 + ], + "lines": [ + { + "bbox": [ + 318, + 384, + 498, + 437 + ], + "spans": [ + { + "bbox": [ + 318, + 384, + 498, + 437 + ], + "type": "image", + "image_path": "efadff1d49693fc289e772cae52adb6dc2a92ba704dca7a62cfbaedaf7a7cadf.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 318, + 438, + 496, + 510 + ], + "blocks": [ + { + "bbox": [ + 318, + 438, + 496, + 510 + ], + "lines": [ + { + "bbox": [ + 318, + 438, + 496, + 510 + ], + "spans": [ + { + "bbox": [ + 318, + 438, + 496, + 510 + ], + "type": "image", + "image_path": "879af9b19d382fa98252e55347ac57b0349226e8c666c4bc696de805ba1b0522.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 318, + 511, + 493, + 593 + ], + "blocks": [ + { + "bbox": [ + 318, + 511, + 493, + 593 + ], + "lines": [ + { + "bbox": [ + 318, + 511, + 493, + 593 + ], + "spans": [ + { + "bbox": [ + 318, + 511, + 493, + 593 + ], + "type": "image", + "image_path": "235f3cdda602d017551430d3d9e0d84d734901b0f8a30523be01b46932a2c2dd.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 609, + 555, + 630 + ], + "lines": [ + { + "bbox": [ + 313, + 609, + 555, + 630 + ], + "spans": [ + { + "bbox": [ + 313, + 609, + 555, + 630 + ], + "type": "text", + "content": "Figure 11. Detailed illustration of the implementation of the different baseline methods." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 653, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 555, + 713 + ], + "type": "text", + "content": "tails are shown in Fig. 11. For 1) SD3_inpaint_byt5, 2) SD3_canny&inpaint, and 4) AnyText, we fine-tune them on our 160K dataset for the poster generation task. Meanwhile, 3) SD3_inpaint_Anytext is a two-stage inference method. In the first stage, the pre-trained Inpaint ControlNet gener" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 204 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 204 + ], + "type": "text", + "content": "ates the background, and in the second stage, AnyText performs the text editing task, with AnyText also fine-tuned on the 160K dataset specifically for the text editing task. The Inpainting ControlNet is initialized from pre-trained SD3 Inpainting-ControlNet [7] and Canny ControlNet is initialized from [8]. For 5) GlyphDraw2 [28] and 6) Glyph-ByT5-v2 [26] are both the SOTA T2I methods that support multilingual text rendering. However, they neither have open-source pre-trained weights nor support subject input, so we reproduced them on our dataset by adding the pre-trained inpainting controlnet [9] to support the subject input." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 215, + 261, + 229 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 215, + 261, + 229 + ], + "spans": [ + { + "bbox": [ + 55, + 215, + 261, + 229 + ], + "type": "text", + "content": "8. Scalable Training for Text Rendering" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 236, + 296, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 236, + 296, + 462 + ], + "spans": [ + { + "bbox": [ + 56, + 236, + 296, + 462 + ], + "type": "text", + "content": "Our proposed two-stage training strategy allows the model to learn two different capabilities (i.e., text rendering and scene generation) separately, enabling more flexibility with distinct datasets for each phase. Recent text rendering methods [4, 25, 26, 42] typically train their models on datasets containing millions of samples. To verify the potential of further improving our performance with more training data, we build a large dataset with 1 million samples and we directly obtain the text annotations with PPOCRv4 [34] without manually annotating. And we use this dataset for the first stage of text rendering training and use the same 160k data for the second stage of scene generation learning. Compared to using 160k data in both of the previous stages, the text sentence accuracy significantly improved by " + }, + { + "bbox": [ + 56, + 236, + 296, + 462 + ], + "type": "inline_equation", + "content": "4.48\\%" + }, + { + "bbox": [ + 56, + 236, + 296, + 462 + ], + "type": "text", + "content": " (as shown in Tab. 4), demonstrating that the multistage training strategy is flexible and scalable. However, in the main experiments, we select to report the performance of our model training only on 160k data for fair comparison with the baselines." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 83, + 471, + 268, + 506 + ], + "blocks": [ + { + "bbox": [ + 83, + 471, + 268, + 506 + ], + "lines": [ + { + "bbox": [ + 83, + 471, + 268, + 506 + ], + "spans": [ + { + "bbox": [ + 83, + 471, + 268, + 506 + ], + "type": "table", + "html": "
Data Size (St.1 & St.2)Sen. ACCNED
160k & 160k93.11%98.21%
1M & 160k97.59%99.38%
", + "image_path": "164a4b0b551f6078712682cff0d2fad4136ca43265769cddadb2be6d386ef572.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 544, + 295, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 544, + 295, + 572 + ], + "spans": [ + { + "bbox": [ + 55, + 544, + 295, + 572 + ], + "type": "text", + "content": "9. Discussion on advantages of end-to-end over two-stage methods." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 579, + 295, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 579, + 295, + 664 + ], + "spans": [ + { + "bbox": [ + 55, + 579, + 295, + 664 + ], + "type": "text", + "content": "The main weakness of two-stage methods (first inpaint background, then render text) is their inability to consistently provide a clean background for texts (see Fig. 12, reducing text readability, especially with complex backgrounds. In contrast, one-stage methods generate texts and backgrounds simultaneously, enabling them to create a clean backdrop or underlays that enhance text visibility." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 669, + 186, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 669, + 186, + 681 + ], + "spans": [ + { + "bbox": [ + 56, + 669, + 186, + 681 + ], + "type": "text", + "content": "10. Text Position Control" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "The position control of PosterMaker uses a very straightforward approach (as shown in Fig. 13), mapping the text" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 316, + 66, + 392, + 188 + ], + "blocks": [ + { + "bbox": [ + 316, + 66, + 392, + 188 + ], + "lines": [ + { + "bbox": [ + 316, + 66, + 392, + 188 + ], + "spans": [ + { + "bbox": [ + 316, + 66, + 392, + 188 + ], + "type": "image", + "image_path": "1d93b0891749cf4e584bd0d8c2a0366ce2a33c0bed4b72a2ba222d7d57d42ce0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 322, + 198, + 545, + 209 + ], + "lines": [ + { + "bbox": [ + 322, + 198, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 322, + 198, + 545, + 209 + ], + "type": "text", + "content": "Figure 12. Showcases for end-to-end and two-stage methods." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 395, + 65, + 474, + 189 + ], + "blocks": [ + { + "bbox": [ + 395, + 65, + 474, + 189 + ], + "lines": [ + { + "bbox": [ + 395, + 65, + 474, + 189 + ], + "spans": [ + { + "bbox": [ + 395, + 65, + 474, + 189 + ], + "type": "image", + "image_path": "7255cb2de3ecc45d951a2703e4df897c410c3a500d2cbea9e40d0630fb329c24.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 476, + 65, + 554, + 189 + ], + "blocks": [ + { + "bbox": [ + 476, + 65, + 554, + 189 + ], + "lines": [ + { + "bbox": [ + 476, + 65, + 554, + 189 + ], + "spans": [ + { + "bbox": [ + 476, + 65, + 554, + 189 + ], + "type": "image", + "image_path": "70b4fc4b9ea93367b5f8c88d331426b2da2ceffe70c8e631ce18d9d27c1b4f4c.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 348, + 219, + 520, + 243 + ], + "blocks": [ + { + "bbox": [ + 55, + 514, + 295, + 537 + ], + "lines": [ + { + "bbox": [ + 55, + 514, + 295, + 537 + ], + "spans": [ + { + "bbox": [ + 55, + 514, + 295, + 537 + ], + "type": "text", + "content": "Table 4. Quantitative comparison with different data sizes for text rendering training." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 348, + 219, + 520, + 243 + ], + "lines": [ + { + "bbox": [ + 348, + 219, + 520, + 243 + ], + "spans": [ + { + "bbox": [ + 348, + 219, + 520, + 243 + ], + "type": "table", + "html": "
MethodmIoUIoU@0.5IoU@0.7
Ours84.65%97.18%93.94%
", + "image_path": "7a184ebc870702926c0d50fabb66d44c3e79daadc427cc87722d2cdb992e8db9.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 351, + 251, + 517, + 262 + ], + "lines": [ + { + "bbox": [ + 351, + 251, + 517, + 262 + ], + "spans": [ + { + "bbox": [ + 351, + 251, + 517, + 262 + ], + "type": "text", + "content": "Table 5. Evaluation on text location accuracy." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 281, + 555, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 555, + 474 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 555, + 474 + ], + "type": "text", + "content": "bounding box to cosine position encoding, which is then concatenated with text features and used as the input to TextRenderNet. To demonstrate our method's effectiveness, we evaluate the bounding box IoU (Intersection of Union) metric as follows: 1) we employ OCR model to extract texts from the generated image. 2) For each ground truth text, we identify the best-matched OCR-detected text based on edit distance and then calculate the IoU between their corresponding bounding boxes. We average the IoU score over all the samples to obtain mean IoU (termed mIoU). And we also report IoU@R which indicates the proportion of samples with IoU higher than " + }, + { + "bbox": [ + 313, + 281, + 555, + 474 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 313, + 281, + 555, + 474 + ], + "type": "text", + "content": ". As shown in Tab. 5, our method achieves a high mIoU of " + }, + { + "bbox": [ + 313, + 281, + 555, + 474 + ], + "type": "inline_equation", + "content": "84.65\\%" + }, + { + "bbox": [ + 313, + 281, + 555, + 474 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 281, + 555, + 474 + ], + "type": "inline_equation", + "content": "93.94\\%" + }, + { + "bbox": [ + 313, + 281, + 555, + 474 + ], + "type": "text", + "content": " samples have an IoU score higher than 0.7. These promising results prove that our text position control method is simple yet effective." + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 319, + 486, + 561, + 564 + ], + "blocks": [ + { + "bbox": [ + 319, + 486, + 561, + 564 + ], + "lines": [ + { + "bbox": [ + 319, + 486, + 561, + 564 + ], + "spans": [ + { + "bbox": [ + 319, + 486, + 561, + 564 + ], + "type": "image", + "image_path": "b8c56939fa446816f17a9f9e6141f67f07ea783277973f441764a0c8d18d3221.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 574, + 555, + 597 + ], + "lines": [ + { + "bbox": [ + 313, + 574, + 555, + 597 + ], + "spans": [ + { + "bbox": [ + 313, + 574, + 555, + 597 + ], + "type": "text", + "content": "Figure 13. Detailed illustration of how we construct the position embedding for controlling the text position." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 619, + 555, + 646 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 619, + 555, + 646 + ], + "spans": [ + { + "bbox": [ + 315, + 619, + 555, + 646 + ], + "type": "text", + "content": "11. Comparison Between GlyphByT5 and PosterMaker" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 653, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 554, + 713 + ], + "type": "text", + "content": "GlyphByT5 [25, 26] are recently proposed visual text rendering methods that achieve high text rendering accuracy. And we will discuss some differences and internal connections between our PosterMaker and GlyphByT5 on how to control text rendering." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "text", + "content": "- Text position control: GlyphByT5 achieve text position control by modifying the original cross-attention module with their proposed region-wise multi-head cross-attention. In contrast, our PosterMaker encodes the text location directly into the character-level text representation to accomplish text position control. As discussed in Sec. 10, our approach is both simple and effective for precise text location control." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 168, + 297, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 168, + 297, + 515 + ], + "spans": [ + { + "bbox": [ + 55, + 168, + 297, + 515 + ], + "type": "text", + "content": "- Text content control: both GlyphByT5 and our PosterMaker control the generation of text content by constructing suitable text representation. Specifically, in this work, we claim that the key to achieve accurate text rendering is to extract character-level visual features as the control condition and carefully construct a robust text representation based on off-the-shelf OCR model [34]. In GlyphByT5, the authors also extract character-level text features, but with a textual encoder named ByT5 [48]. Then they propose glyph-alignment pre-training to align these textual features with pre-trained visual encoders DINOv2 [33]. Additionally, they employ box-level contrastive learning with complex augmentations and a hard-mining strategy to enhance character-level discriminativeness. We hypothesize that the primary reason both our method and GlyphByT5 achieve high text rendering accuracy is our shared goal of constructing a robust character-level visual representation. In fact, the ability of GlyphByT5's character-level visual representation is distilled from the pre-trained visual encoder DINOv2, rather than inherited from the pre-trained textual encoder ByT5 itself. In order to verify our hypothesis and insights, we adopt a more direct approach to directly replace the PPOCR encoder in PosterMaker with DINOv2. As shown in Tab. 6, simply extracting character-wise visual features with DINOv2 can also achieve precise text rendering. This result further verifies our claim: the key to precise text rendering is to extract character-level visual features as the control condition." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 67, + 522, + 284, + 602 + ], + "blocks": [ + { + "bbox": [ + 67, + 522, + 284, + 602 + ], + "lines": [ + { + "bbox": [ + 67, + 522, + 284, + 602 + ], + "spans": [ + { + "bbox": [ + 67, + 522, + 284, + 602 + ], + "type": "table", + "html": "
Text FeatureTypeSen. ACCNED
PPOCR Linevisual feat.38.91%53.86%
PPOCR Charvisual feat.95.15%98.75%
DINOv2 Linevisual feat.4.25%20.59%
DINOv2 Charvisual feat.94.92%98.66%
GT (w/o Rec.)-98.53%99.59%
GT (w/ SD3 Rec.)-98.09%99.36%
", + "image_path": "6d387ff5c8a05c8017ac5a154369d50fe2bc44b6362e2366fba384dd7ded98aa.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 657, + 250, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 657, + 250, + 671 + ], + "spans": [ + { + "bbox": [ + 56, + 657, + 250, + 671 + ], + "type": "text", + "content": "12. Visualization of Training Samples" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 677, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 295, + 715 + ], + "type": "text", + "content": "We present example training images from our dataset in Fig. 17. The dataset predominantly consists of Chinese text, with a small portion of English text. Additionally, it in" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 541, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 541, + 85 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 541, + 85 + ], + "type": "text", + "content": "cludes challenging cases with small-sized text elements." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 95, + 553, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 95, + 553, + 110 + ], + "spans": [ + { + "bbox": [ + 314, + 95, + 553, + 110 + ], + "type": "text", + "content": "13. The Generalization of Text Representation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 116, + 555, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 116, + 555, + 283 + ], + "spans": [ + { + "bbox": [ + 313, + 116, + 555, + 283 + ], + "type": "text", + "content": "PosterMaker is trained primarily on common Chinese data, with only a minimal amount of English data. Despite this, it demonstrates a notable level of generalization, enabling it to generate English, Japanese, and uncommon Chinese characters that were not included in the training set (as shown in Fig. 16). In order to quantitatively evaluate the generalization capability of PosterMaker, we compared the accuracy of different text representations on uncommon characters using a randomly sampled uncommon character benchmark. The results show that our method can also generalize well to some characters that are unseen in the training set. Our performance is inferior to the canny baseline, likely because the canny baseline has been pre-trained on large-scale image data." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 333, + 293, + 537, + 351 + ], + "blocks": [ + { + "bbox": [ + 64, + 609, + 286, + 621 + ], + "lines": [ + { + "bbox": [ + 64, + 609, + 286, + 621 + ], + "spans": [ + { + "bbox": [ + 64, + 609, + 286, + 621 + ], + "type": "text", + "content": "Table 6. Quantitative comparison using various text features." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 333, + 293, + 537, + 351 + ], + "lines": [ + { + "bbox": [ + 333, + 293, + 537, + 351 + ], + "spans": [ + { + "bbox": [ + 333, + 293, + 537, + 351 + ], + "type": "table", + "html": "
Text FeatureTypeSen. ACCNED
ByT5textual feat.2.01%10.27%
Cannyimg65.12%74.56%
PPOCR Linevisual feat.8.34%15.84%
PPOCR Charvisual feat.61.54%70.38%
", + "image_path": "94f522890ac89db5cbced1996353dcdd9ce9d6d4ee56253dc26bff58a3b35ab3.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 415, + 553, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 415, + 553, + 442 + ], + "spans": [ + { + "bbox": [ + 314, + 415, + 553, + 442 + ], + "type": "text", + "content": "14. Ablation about Foreground Extension Detector" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 450, + 556, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 450, + 556, + 606 + ], + "spans": [ + { + "bbox": [ + 313, + 450, + 556, + 606 + ], + "type": "text", + "content": "We collected " + }, + { + "bbox": [ + 313, + 450, + 556, + 606 + ], + "type": "inline_equation", + "content": "20\\mathrm{k}" + }, + { + "bbox": [ + 313, + 450, + 556, + 606 + ], + "type": "text", + "content": " manually annotated images to train the foreground extension detector. We randomly selected " + }, + { + "bbox": [ + 313, + 450, + 556, + 606 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 313, + 450, + 556, + 606 + ], + "type": "text", + "content": " samples as a validation set, while using the remaining " + }, + { + "bbox": [ + 313, + 450, + 556, + 606 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 313, + 450, + 556, + 606 + ], + "type": "text", + "content": " for model training. We conduct ablation experiments on different architecture designs of the detector to verify the effectiveness of the proposed architecture. We implement 2 baselines: 1) RFNet [11]: we reimplemented RFNet based on the description in their paper [11]. Since we could not access their depth and saliency detection models, we modified our implementation to only use the product image and generated image as input, excluding the depth and saliency maps. 2) RFNet(SAM): in this baseline, we replace the image encoder used in RFNet with the same SAM[18] im" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 337, + 624, + 533, + 671 + ], + "blocks": [ + { + "bbox": [ + 313, + 358, + 554, + 381 + ], + "lines": [ + { + "bbox": [ + 313, + 358, + 554, + 381 + ], + "spans": [ + { + "bbox": [ + 313, + 358, + 554, + 381 + ], + "type": "text", + "content": "Table 7. Quantitative comparison of the rendering results of different text features on uncommon characters." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 337, + 624, + 533, + 671 + ], + "lines": [ + { + "bbox": [ + 337, + 624, + 533, + 671 + ], + "spans": [ + { + "bbox": [ + 337, + 624, + 533, + 671 + ], + "type": "table", + "html": "
MethodPrecisionRecallF1 Score
RFNet (our impl.)76.52%75.52%76.02%
RFNet (SAM)81.35%80.99%81.17%
Ours83.52%84.81%84.16%
", + "image_path": "35f87e04b1510ca15f297ae621021c0892cb6465a983b42340d3dcacb4511d73.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 677, + 553, + 700 + ], + "lines": [ + { + "bbox": [ + 313, + 677, + 553, + 700 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 553, + 700 + ], + "type": "text", + "content": "Table 8. Evaluation on different architectures of foreground extension detector." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 108, + 131, + 150 + ], + "blocks": [ + { + "bbox": [ + 75, + 72, + 100, + 82 + ], + "lines": [ + { + "bbox": [ + 75, + 72, + 100, + 82 + ], + "spans": [ + { + "bbox": [ + 75, + 72, + 100, + 82 + ], + "type": "text", + "content": "Subject" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 108, + 131, + 150 + ], + "lines": [ + { + "bbox": [ + 72, + 108, + 131, + 150 + ], + "spans": [ + { + "bbox": [ + 72, + 108, + 131, + 150 + ], + "type": "image", + "image_path": "acced411c7664b213472fb0ffbfb1a259736f4feed475108678b21a9429b879f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 140, + 83, + 216, + 156 + ], + "blocks": [ + { + "bbox": [ + 149, + 73, + 202, + 81 + ], + "lines": [ + { + "bbox": [ + 149, + 73, + 202, + 81 + ], + "spans": [ + { + "bbox": [ + 149, + 73, + 202, + 81 + ], + "type": "text", + "content": "Generated Image" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 140, + 83, + 216, + 156 + ], + "lines": [ + { + "bbox": [ + 140, + 83, + 216, + 156 + ], + "spans": [ + { + "bbox": [ + 140, + 83, + 216, + 156 + ], + "type": "image", + "image_path": "bd46d644d9015e5b410203395e465ed73688ec000cef69633c7ab447a9200c8f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 217, + 83, + 293, + 233 + ], + "blocks": [ + { + "bbox": [ + 230, + 73, + 276, + 81 + ], + "lines": [ + { + "bbox": [ + 230, + 73, + 276, + 81 + ], + "spans": [ + { + "bbox": [ + 230, + 73, + 276, + 81 + ], + "type": "text", + "content": "Activation Map" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 217, + 83, + 293, + 233 + ], + "lines": [ + { + "bbox": [ + 217, + 83, + 293, + 233 + ], + "spans": [ + { + "bbox": [ + 217, + 83, + 293, + 233 + ], + "type": "image", + "image_path": "ffedaf24c35862b032dc99b4a85857797a2e5b2a367e7bd1415005d7a390fe76.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 61, + 189, + 136, + 228 + ], + "blocks": [ + { + "bbox": [ + 61, + 189, + 136, + 228 + ], + "lines": [ + { + "bbox": [ + 61, + 189, + 136, + 228 + ], + "spans": [ + { + "bbox": [ + 61, + 189, + 136, + 228 + ], + "type": "image", + "image_path": "19a2fb38e515248ee4b45a3ab38d79f63e2f74ea44d6f7b97387045908418125.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 140, + 232, + 204, + 308 + ], + "blocks": [ + { + "bbox": [ + 140, + 232, + 204, + 308 + ], + "lines": [ + { + "bbox": [ + 140, + 232, + 204, + 308 + ], + "spans": [ + { + "bbox": [ + 140, + 232, + 204, + 308 + ], + "type": "image", + "image_path": "af6d3635434938a34b71ba93e460bad374edc1de60303ed9f7c30405bf9390ec.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 316, + 295, + 338 + ], + "lines": [ + { + "bbox": [ + 55, + 316, + 295, + 338 + ], + "spans": [ + { + "bbox": [ + 55, + 316, + 295, + 338 + ], + "type": "text", + "content": "Figure 14. Class activation map of the foreground extension detector." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 230, + 232, + 282, + 308 + ], + "blocks": [ + { + "bbox": [ + 230, + 232, + 282, + 308 + ], + "lines": [ + { + "bbox": [ + 230, + 232, + 282, + 308 + ], + "spans": [ + { + "bbox": [ + 230, + 232, + 282, + 308 + ], + "type": "image", + "image_path": "8c9d4514fd6a4a9d1ec9f5ff58b54853662ebc113a4a253a827240e56b0b06da.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 358, + 295, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 358, + 295, + 405 + ], + "spans": [ + { + "bbox": [ + 55, + 358, + 295, + 405 + ], + "type": "text", + "content": "age encoder used in our method. As summarized in Tab. 8, our proposed foreground extension detector outperforms the baselines by a considerable margin, which demonstrates its effectiveness." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 407, + 296, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 407, + 296, + 502 + ], + "spans": [ + { + "bbox": [ + 55, + 407, + 296, + 502 + ], + "type": "text", + "content": "In Fig. 14, we visualize the class activation map [54] of our proposed foreground extension detector. As shown, we can observe a notably higher activation score in the extended foreground regions compared to other areas. This compelling evidence demonstrates that our detector has effectively learned to discern foreground extension cases, thereby it can serve as a robust reward model for fine-tuning PosterMaker to mitigate the foreground extension problem." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 512, + 226, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 512, + 226, + 525 + ], + "spans": [ + { + "bbox": [ + 56, + 512, + 226, + 525 + ], + "type": "text", + "content": "15. Ablation about SceneGenNet" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 533, + 295, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 533, + 295, + 593 + ], + "spans": [ + { + "bbox": [ + 55, + 533, + 295, + 593 + ], + "type": "text", + "content": "SceneGenNet enables our model to perform background inpainting while preserve the subject so we cannot directly remove it. We replace it by SDEdit [30] to achieve inpainting. As the results shown in Sec. 15, replacing it results in a significant drop of performance." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 58, + 601, + 294, + 632 + ], + "blocks": [ + { + "bbox": [ + 58, + 601, + 294, + 632 + ], + "lines": [ + { + "bbox": [ + 58, + 601, + 294, + 632 + ], + "spans": [ + { + "bbox": [ + 58, + 601, + 294, + 632 + ], + "type": "table", + "html": "
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑
Ours w/o SceneGenNet90.53%97.95%79.4426.67
Ours93.36%98.39%65.3527.04
", + "image_path": "6057f34d7b5bcdecb9710dc1ea7043989105c23d15c139875fc1f6224f4cc489.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 669, + 295, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 669, + 295, + 683 + ], + "spans": [ + { + "bbox": [ + 56, + 669, + 295, + 683 + ], + "type": "text", + "content": "16. Discussion on the impact of the test set size." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "content": "To ensure a fairer comparison between PosterMaker and the baseline methods, we expanded the test set to 5,000 sam" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 72, + 554, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 554, + 144 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 554, + 144 + ], + "type": "text", + "content": "plies(10x the previous PosterBenchmark). The results are shown in Tab. 10, and the experimental conclusions remain consistent with the previous test set. Due to the calculation principle of the FID metric, increasing the test size leads to a significant decrease in the FID scores for all methods, but still maintains the same conclusion." + } + ] + } + ], + "index": 18 + }, + { + "type": "table", + "bbox": [ + 317, + 152, + 553, + 203 + ], + "blocks": [ + { + "bbox": [ + 72, + 635, + 279, + 647 + ], + "lines": [ + { + "bbox": [ + 72, + 635, + 279, + 647 + ], + "spans": [ + { + "bbox": [ + 72, + 635, + 279, + 647 + ], + "type": "text", + "content": "Table 9. Comparison between SceneGenNet and SDEdit" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 152, + 553, + 203 + ], + "lines": [ + { + "bbox": [ + 317, + 152, + 553, + 203 + ], + "spans": [ + { + "bbox": [ + 317, + 152, + 553, + 203 + ], + "type": "table", + "html": "
ModelSen. ACC ↑NED ↑FID ↓CLIP-T ↑
Glyph-ByT5-v267.87%86.23%20.3721.08
SD3_canny&inpaint74.49%88.78%17.9120.79
GlyphDraw283.81%96.49%15.2420.67
Ours90.20%97.58%13.3621.36
", + "image_path": "c0827822b7a1d65a2caaebda643136502f5b4bdb61348e9306f863b50c1f13e3.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 208, + 553, + 230 + ], + "lines": [ + { + "bbox": [ + 313, + 208, + 553, + 230 + ], + "spans": [ + { + "bbox": [ + 313, + 208, + 553, + 230 + ], + "type": "text", + "content": "Table 10. Comparison with baseline methods on 5,000 test samples." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 314, + 250, + 553, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 250, + 553, + 277 + ], + "spans": [ + { + "bbox": [ + 314, + 250, + 553, + 277 + ], + "type": "text", + "content": "17. Discussion on the meaningless texts generated outside target position." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 284, + 554, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 284, + 554, + 379 + ], + "spans": [ + { + "bbox": [ + 313, + 284, + 554, + 379 + ], + "type": "text", + "content": "In our early experimental attempts about text rendering in poster generation, we found that the trained model sometimes generates meaningless texts outside the target area of the text, which will seriously affect the aesthetics. We conjecture that the main reason is that the ground truth images sometimes contain text outside the specified position. To solve this problem, we masked out the extra text during training and it solved most cases." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 380, + 555, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 380, + 555, + 523 + ], + "spans": [ + { + "bbox": [ + 313, + 380, + 555, + 523 + ], + "type": "text", + "content": "Specifically, SceneGenNet is initialized from pre-trained SD3 Inpainting-Controlnet [7]. In the second stage of training, we simultaneously mask out the regions of the untrained texts (usually those that are too small or just logos) both in the subject mask input to SceneGenNet and in the ground truth image used for loss calculation(as shown in Fig. 15). It is worth noting that although these small texts and logos are not included in the training, we have also annotated them to address the aforementioned issues. Finally, this technique makes the loss corresponding to the masked-out regions very close to zero so that the model will not learn these meaningless texts." + } + ] + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 328, + 536, + 398, + 634 + ], + "blocks": [ + { + "bbox": [ + 328, + 536, + 398, + 634 + ], + "lines": [ + { + "bbox": [ + 328, + 536, + 398, + 634 + ], + "spans": [ + { + "bbox": [ + 328, + 536, + 398, + 634 + ], + "type": "image", + "image_path": "ac56ce7a14fe06acfd1f9d373213ba03b55a7a52cc249287d4914e6f752251c1.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 642, + 554, + 665 + ], + "lines": [ + { + "bbox": [ + 313, + 642, + 554, + 665 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 554, + 665 + ], + "type": "text", + "content": "Figure 15. Example of our solution technique for meaningless texts and logos that generated outside target position." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 399, + 536, + 469, + 634 + ], + "blocks": [ + { + "bbox": [ + 399, + 536, + 469, + 634 + ], + "lines": [ + { + "bbox": [ + 399, + 536, + 469, + 634 + ], + "spans": [ + { + "bbox": [ + 399, + 536, + 469, + 634 + ], + "type": "image", + "image_path": "773f2b5721a7dfd43745f510e4aa2a780733a1f155cb049f71c66cfa0f62e74a.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 471, + 536, + 542, + 634 + ], + "blocks": [ + { + "bbox": [ + 471, + 536, + 542, + 634 + ], + "lines": [ + { + "bbox": [ + 471, + 536, + 542, + 634 + ], + "spans": [ + { + "bbox": [ + 471, + 536, + 542, + 634 + ], + "type": "image", + "image_path": "62ab0255279e0013a7a9e0ffe8d85575ce34f5e5f9e399dfd33d8dbdb10447d6.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 69, + 154, + 168 + ], + "blocks": [ + { + "bbox": [ + 56, + 69, + 154, + 168 + ], + "lines": [ + { + "bbox": [ + 56, + 69, + 154, + 168 + ], + "spans": [ + { + "bbox": [ + 56, + 69, + 154, + 168 + ], + "type": "image", + "image_path": "5e80a7606f80a52667546bcd4d7872be923bd45b4c53b620e658b4b3e64d671f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 128, + 183, + 481, + 194 + ], + "lines": [ + { + "bbox": [ + 128, + 183, + 481, + 194 + ], + "spans": [ + { + "bbox": [ + 128, + 183, + 481, + 194 + ], + "type": "text", + "content": "Figure 16. Visualization results on texts in English, Japanese, and uncommon Chinese characters." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 154, + 70, + 253, + 168 + ], + "blocks": [ + { + "bbox": [ + 154, + 70, + 253, + 168 + ], + "lines": [ + { + "bbox": [ + 154, + 70, + 253, + 168 + ], + "spans": [ + { + "bbox": [ + 154, + 70, + 253, + 168 + ], + "type": "image", + "image_path": "74039fd0e173fe7d59038a7f229de2e88e5cbcf1d43d3440544e155caba17099.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 253, + 70, + 351, + 168 + ], + "blocks": [ + { + "bbox": [ + 253, + 70, + 351, + 168 + ], + "lines": [ + { + "bbox": [ + 253, + 70, + 351, + 168 + ], + "spans": [ + { + "bbox": [ + 253, + 70, + 351, + 168 + ], + "type": "image", + "image_path": "4a2e2c8c0a9d1d6a8fb03c52c4c0a557c6b5d165113dc13020b29cdf24ce825f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 351, + 70, + 419, + 168 + ], + "blocks": [ + { + "bbox": [ + 351, + 70, + 419, + 168 + ], + "lines": [ + { + "bbox": [ + 351, + 70, + 419, + 168 + ], + "spans": [ + { + "bbox": [ + 351, + 70, + 419, + 168 + ], + "type": "image", + "image_path": "6260cbe319bf3452d748f869daf3b8586b2edf5abe2fe1e22740950281fb35db.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 419, + 70, + 487, + 168 + ], + "blocks": [ + { + "bbox": [ + 419, + 70, + 487, + 168 + ], + "lines": [ + { + "bbox": [ + 419, + 70, + 487, + 168 + ], + "spans": [ + { + "bbox": [ + 419, + 70, + 487, + 168 + ], + "type": "image", + "image_path": "5377a14a7ca31602338199f71cf74f38cce1d22b831db83a4f7e315ec63825c7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 487, + 70, + 553, + 168 + ], + "blocks": [ + { + "bbox": [ + 487, + 70, + 553, + 168 + ], + "lines": [ + { + "bbox": [ + 487, + 70, + 553, + 168 + ], + "spans": [ + { + "bbox": [ + 487, + 70, + 553, + 168 + ], + "type": "image", + "image_path": "285d0c0eb47d0b7c3ec34802cd6ff44913974f5624cce623b3c1b4d5858a5444.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 70, + 204, + 187, + 322 + ], + "blocks": [ + { + "bbox": [ + 70, + 204, + 187, + 322 + ], + "lines": [ + { + "bbox": [ + 70, + 204, + 187, + 322 + ], + "spans": [ + { + "bbox": [ + 70, + 204, + 187, + 322 + ], + "type": "image", + "image_path": "5ec602cf40fe1938ef2768c493fab20a46c6a41971b9cf7f91249bab5e7ac082.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 189, + 208, + 304, + 322 + ], + "blocks": [ + { + "bbox": [ + 189, + 208, + 304, + 322 + ], + "lines": [ + { + "bbox": [ + 189, + 208, + 304, + 322 + ], + "spans": [ + { + "bbox": [ + 189, + 208, + 304, + 322 + ], + "type": "image", + "image_path": "484ea3a158871455f056f770a790db31f20761ccaec686d36278db567da63f36.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 308, + 208, + 421, + 322 + ], + "blocks": [ + { + "bbox": [ + 308, + 208, + 421, + 322 + ], + "lines": [ + { + "bbox": [ + 308, + 208, + 421, + 322 + ], + "spans": [ + { + "bbox": [ + 308, + 208, + 421, + 322 + ], + "type": "image", + "image_path": "5a04c3f6873570243f276bd503122b3e1e26e195d7e7a21ae5c52320d5f8336e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 425, + 207, + 539, + 322 + ], + "blocks": [ + { + "bbox": [ + 425, + 207, + 539, + 322 + ], + "lines": [ + { + "bbox": [ + 425, + 207, + 539, + 322 + ], + "spans": [ + { + "bbox": [ + 425, + 207, + 539, + 322 + ], + "type": "image", + "image_path": "ca0895ab89b57898376a5b8d1f4963a9a9aea4e850914a42fd1b3fa9e73a9d7b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 71, + 331, + 185, + 479 + ], + "blocks": [ + { + "bbox": [ + 71, + 331, + 185, + 479 + ], + "lines": [ + { + "bbox": [ + 71, + 331, + 185, + 479 + ], + "spans": [ + { + "bbox": [ + 71, + 331, + 185, + 479 + ], + "type": "image", + "image_path": "d19d64c86c9f00cf6a484d8797879b816195a46db8a3fb1ba3ee4edbd33c9c0c.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 189, + 331, + 302, + 491 + ], + "blocks": [ + { + "bbox": [ + 189, + 331, + 302, + 491 + ], + "lines": [ + { + "bbox": [ + 189, + 331, + 302, + 491 + ], + "spans": [ + { + "bbox": [ + 189, + 331, + 302, + 491 + ], + "type": "image", + "image_path": "f65aecae76910308dbf4fdfd3db0dfa49557ea379c1fb2f97b2a0d19123d85b8.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 308, + 331, + 421, + 495 + ], + "blocks": [ + { + "bbox": [ + 308, + 331, + 421, + 495 + ], + "lines": [ + { + "bbox": [ + 308, + 331, + 421, + 495 + ], + "spans": [ + { + "bbox": [ + 308, + 331, + 421, + 495 + ], + "type": "image", + "image_path": "c9aac91754c4899933bcf0e740e1bfc3817b1d9f38f73587ff56903fd9479333.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 425, + 331, + 538, + 497 + ], + "blocks": [ + { + "bbox": [ + 425, + 331, + 538, + 497 + ], + "lines": [ + { + "bbox": [ + 425, + 331, + 538, + 497 + ], + "spans": [ + { + "bbox": [ + 425, + 331, + 538, + 497 + ], + "type": "image", + "image_path": "d13c740981bc7e2e64e6ebf40a3f42a41a3a67a6706cdbdd1ae3d96c325cc0f1.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 70, + 503, + 187, + 652 + ], + "blocks": [ + { + "bbox": [ + 70, + 503, + 187, + 652 + ], + "lines": [ + { + "bbox": [ + 70, + 503, + 187, + 652 + ], + "spans": [ + { + "bbox": [ + 70, + 503, + 187, + 652 + ], + "type": "image", + "image_path": "f1edcbe6ebae9de62998064f33c802d0608f55c5748245719098ae0035f7178c.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 173, + 684, + 436, + 695 + ], + "lines": [ + { + "bbox": [ + 173, + 684, + 436, + 695 + ], + "spans": [ + { + "bbox": [ + 173, + 684, + 436, + 695 + ], + "type": "text", + "content": "Figure 17. Visualization of ground truth for some samples in the dataset." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 190, + 505, + 301, + 665 + ], + "blocks": [ + { + "bbox": [ + 190, + 505, + 301, + 665 + ], + "lines": [ + { + "bbox": [ + 190, + 505, + 301, + 665 + ], + "spans": [ + { + "bbox": [ + 190, + 505, + 301, + 665 + ], + "type": "image", + "image_path": "1f80e0402f89e1240e016e8362ce19565d42bf67373ccd3cffd4465e9d757ab9.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 309, + 504, + 419, + 668 + ], + "blocks": [ + { + "bbox": [ + 309, + 504, + 419, + 668 + ], + "lines": [ + { + "bbox": [ + 309, + 504, + 419, + 668 + ], + "spans": [ + { + "bbox": [ + 309, + 504, + 419, + 668 + ], + "type": "image", + "image_path": "95f067b8f9adc2c7f64f770fe86a8a0c33167d2b1220013b4f0653e183b12bdd.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 424, + 504, + 537, + 668 + ], + "blocks": [ + { + "bbox": [ + 424, + 504, + 537, + 668 + ], + "lines": [ + { + "bbox": [ + 424, + 504, + 537, + 668 + ], + "spans": [ + { + "bbox": [ + 424, + 504, + 537, + 668 + ], + "type": "image", + "image_path": "30723d6c96c7f0e3a399f301dfdc4277f4fd49d43e6db8e55aa926072df85f2c.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file